repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
xeBuz/Flask-Validator | setup.py | 1 | 1530 | import re
import os
from setuptools import setup, find_packages
try:
import pypandoc
LONG_DESCRIPTION = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
LONG_DESCRIPTION = ''
setup(
name='Flask-Validator',
version='1.4.2',
license='Mozilla Public License',
author='Jesus Roldan',
author_email='jesus.roldan@gmail.com',
description="Data validator for Flask and SQL-Alchemy, working at Model component with events",
long_description=LONG_DESCRIPTION,
url='https://github.com/xeBuz/Flask-Validator',
packages=find_packages(),
platforms='any',
test_suite='nose.collector',
install_requires=[
'Flask-SQLAlchemy==2.4.4',
'email_validator==1.1.2',
'iso3166==1.0.1',
'pytz==2020.4',
'isbnlib==3.10.4',
'py-moneyed==0.8.0',
'schwifty==2020.11.0'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| mpl-2.0 |
DarkArtek/FFXIVITAFC | allauth/socialaccount/providers/eveonline/provider.py | 10 | 1461 | from allauth.socialaccount.app_settings import STORE_TOKENS
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class EveOnlineAccount(ProviderAccount):
def get_profile_url(self):
return 'https://gate.eveonline.com/Profile/{char_name}'.format(
char_name=self.account.extra_data.get('CharacterName'))
def get_avatar_url(self):
return ('https://image.eveonline.com/Character/'
'{char_id}_128.jpg').format(
char_id=self.account.extra_data.get('CharacterID', 1))
def to_str(self):
dflt = super(EveOnlineAccount, self).to_str()
return next(
value
for value in (
self.account.extra_data.get('CharacterName', None),
self.account.extra_data.get('CharacterID', None),
dflt
)
if value is not None
)
class EveOnlineProvider(OAuth2Provider):
id = 'eveonline'
name = 'EVE Online'
account_class = EveOnlineAccount
def get_default_scope(self):
scopes = []
if STORE_TOKENS:
scopes.append('publicData')
return scopes
def extract_uid(self, data):
return str(data['CharacterOwnerHash'])
def extract_common_fields(self, data):
return dict(name=data.get('CharacterName'))
provider_classes = [EveOnlineProvider]
| unlicense |
jjmleiro/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Cipher/PKCS1_OAEP.py | 123 | 9373 | # -*- coding: utf-8 -*-
#
# Cipher/PKCS1_OAEP.py : PKCS#1 OAEP
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RSA encryption protocol according to PKCS#1 OAEP
See RFC3447__ or the `original RSA Labs specification`__ .
This scheme is more properly called ``RSAES-OAEP``.
As an example, a sender may encrypt a message in this way:
>>> from Crypto.Cipher import PKCS1_OAEP
>>> from Crypto.PublicKey import RSA
>>>
>>> message = 'To be encrypted'
>>> key = RSA.importKey(open('pubkey.der').read())
>>> cipher = PKCS1_OAEP.new(key)
>>> ciphertext = cipher.encrypt(message)
At the receiver side, decryption can be done using the private part of
the RSA key:
>>> key = RSA.importKey(open('privkey.der').read())
>>> cipher = PKCS1_OAP.new(key)
>>> message = cipher.decrypt(ciphertext)
:undocumented: __revision__, __package__
.. __: http://www.ietf.org/rfc/rfc3447.txt
.. __: http://www.rsa.com/rsalabs/node.asp?id=2125.
"""
from __future__ import nested_scopes
__revision__ = "$Id$"
__all__ = [ 'new', 'PKCS1OAEP_Cipher' ]
import Crypto.Signature.PKCS1_PSS
import Crypto.Hash.SHA
from Crypto.Util.py3compat import *
import Crypto.Util.number
from Crypto.Util.number import ceil_div
from Crypto.Util.strxor import strxor
class PKCS1OAEP_Cipher:
"""This cipher can perform PKCS#1 v1.5 OAEP encryption or decryption."""
def __init__(self, key, hashAlgo, mgfunc, label):
"""Initialize this PKCS#1 OAEP cipher object.
:Parameters:
key : an RSA key object
If a private half is given, both encryption and decryption are possible.
If a public half is given, only encryption is possible.
hashAlgo : hash object
The hash function to use. This can be a module under `Crypto.Hash`
or an existing hash object created from any of such modules. If not specified,
`Crypto.Hash.SHA` (that is, SHA-1) is used.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used (a safe choice).
label : string
A label to apply to this particular encryption. If not specified,
an empty string is used. Specifying a label does not improve
security.
:attention: Modify the mask generation function only if you know what you are doing.
Sender and receiver must use the same one.
"""
self._key = key
if hashAlgo:
self._hashObj = hashAlgo
else:
self._hashObj = Crypto.Hash.SHA
if mgfunc:
self._mgf = mgfunc
else:
self._mgf = lambda x,y: Crypto.Signature.PKCS1_PSS.MGF1(x,y,self._hashObj)
self._label = label
def can_encrypt(self):
"""Return True/1 if this cipher object can be used for encryption."""
return self._key.can_encrypt()
def can_decrypt(self):
"""Return True/1 if this cipher object can be used for decryption."""
return self._key.can_decrypt()
def encrypt(self, message):
"""Produce the PKCS#1 OAEP encryption of a message.
This function is named ``RSAES-OAEP-ENCRYPT``, and is specified in
section 7.1.1 of RFC3447.
:Parameters:
message : string
The message to encrypt, also known as plaintext. It can be of
variable length, but not longer than the RSA modulus (in bytes)
minus 2, minus twice the hash output size.
:Return: A string, the ciphertext in which the message is encrypted.
It is as long as the RSA modulus (in bytes).
:Raise ValueError:
If the RSA key length is not sufficiently long to deal with the given
message.
"""
# TODO: Verify the key is RSA
randFunc = self._key._randfunc
# See 7.1.1 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
hLen = self._hashObj.digest_size
mLen = len(message)
# Step 1b
ps_len = k-mLen-2*hLen-2
if ps_len<0:
raise ValueError("Plaintext is too long.")
# Step 2a
lHash = self._hashObj.new(self._label).digest()
# Step 2b
ps = bchr(0x00)*ps_len
# Step 2c
db = lHash + ps + bchr(0x01) + message
# Step 2d
ros = randFunc(hLen)
# Step 2e
dbMask = self._mgf(ros, k-hLen-1)
# Step 2f
maskedDB = strxor(db, dbMask)
# Step 2g
seedMask = self._mgf(maskedDB, hLen)
# Step 2h
maskedSeed = strxor(ros, seedMask)
# Step 2i
em = bchr(0x00) + maskedSeed + maskedDB
# Step 3a (OS2IP), step 3b (RSAEP), part of step 3c (I2OSP)
m = self._key.encrypt(em, 0)[0]
# Complete step 3c (I2OSP)
c = bchr(0x00)*(k-len(m)) + m
return c
def decrypt(self, ct):
"""Decrypt a PKCS#1 OAEP ciphertext.
This function is named ``RSAES-OAEP-DECRYPT``, and is specified in
section 7.1.2 of RFC3447.
:Parameters:
ct : string
The ciphertext that contains the message to recover.
:Return: A string, the original message.
:Raise ValueError:
If the ciphertext length is incorrect, or if the decryption does not
succeed.
:Raise TypeError:
If the RSA key has no private half.
"""
# TODO: Verify the key is RSA
# See 7.1.2 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
hLen = self._hashObj.digest_size
# Step 1b and 1c
if len(ct) != k or k<hLen+2:
raise ValueError("Ciphertext with incorrect length.")
# Step 2a (O2SIP), 2b (RSADP), and part of 2c (I2OSP)
m = self._key.decrypt(ct)
# Complete step 2c (I2OSP)
em = bchr(0x00)*(k-len(m)) + m
# Step 3a
lHash = self._hashObj.new(self._label).digest()
# Step 3b
y = em[0]
# y must be 0, but we MUST NOT check it here in order not to
# allow attacks like Manger's (http://dl.acm.org/citation.cfm?id=704143)
maskedSeed = em[1:hLen+1]
maskedDB = em[hLen+1:]
# Step 3c
seedMask = self._mgf(maskedDB, hLen)
# Step 3d
seed = strxor(maskedSeed, seedMask)
# Step 3e
dbMask = self._mgf(seed, k-hLen-1)
# Step 3f
db = strxor(maskedDB, dbMask)
# Step 3g
valid = 1
one = db[hLen:].find(bchr(0x01))
lHash1 = db[:hLen]
if lHash1!=lHash:
valid = 0
if one<0:
valid = 0
if bord(y)!=0:
valid = 0
if not valid:
raise ValueError("Incorrect decryption.")
# Step 4
return db[hLen+one+1:]
def new(key, hashAlgo=None, mgfunc=None, label=b('')):
"""Return a cipher object `PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption.
:Parameters:
key : RSA key object
The key to use to encrypt or decrypt the message. This is a `Crypto.PublicKey.RSA` object.
Decryption is only possible if *key* is a private RSA key.
hashAlgo : hash object
The hash function to use. This can be a module under `Crypto.Hash`
or an existing hash object created from any of such modules. If not specified,
`Crypto.Hash.SHA` (that is, SHA-1) is used.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used (a safe choice).
label : string
A label to apply to this particular encryption. If not specified,
an empty string is used. Specifying a label does not improve
security.
:attention: Modify the mask generation function only if you know what you are doing.
Sender and receiver must use the same one.
"""
return PKCS1OAEP_Cipher(key, hashAlgo, mgfunc, label)
| apache-2.0 |
PaulSchrum/CogoPy | BestFit/CogoPointAnalyst.py | 1 | 12847 | # Paul Schrum Unity ID: ptschrum
# Course Project for GIS 540
"""
Given one or more input files (required to be Polylines),
an output file, an optional LoadCheck boolean, and an optional
spatial reference, generate one or more csv files showing the
radius (and other attributes) of each point triplet in the
polylines. Really the only function you need to call is
analyzePolylines.
It does all the work and all the other functions and classes
just serve analyzePolylines.
"""
__author__ = ['Paul Schrum']
print 'starting imports'
import sys
import os
from arcpy.arcobjects.arcobjects import Point as arcPoint
import arcpy
import collections
from ExtendedPoint import ExtendedPoint
from ExtendedPoint import any_in_point_equals_any_in_other
from ExtendedPoint import compute_arc_parameters
print 'finished imports'
successList = []
def analyzePolylines(fcs, outDir, loadCSVtoFeatureClass=False,spatialRef=None):
"""
This is the only function you need to call.
Given a list of Polyline Feature classes, compute the curve data for each
curve (circular arc segment) that passes through each triplet of points.
:param fcs: Name of feature class to be processed (or list of feature classes)
:param outDir: Directory to put the resulting csv files. (CSV names are autogenerated)
:param loadCSVtoFeatureClass: Optional. Load the csv file back into arcmap as a confidence check
:param spatialRef: Coordinate System to which to project point coordinates and show length units
:return: None
"""
try:
validate_or_create_outDir(outDir)
except:
arcPrint("Unable to create output directory. No files processed.")
return
for fc in fcs:
try:
arcPrint("Now processing {0}".format(fc))
csvName = processFCforCogoAnalysis(fc, outDir, spatialRef=spatialRef)
successList.extend(csvName)
arcPrint("File created: {0}".format(csvName))
arcPrint(" ")
except NotPolylineError:
arcPrint("{0} not processed because it " + \
"is not a Polyline Feature Class.".format(fc))
except arcpy.ExecuteError:
arcPrint("Arc Error while processing Feature Class: {0}".format(fc))
except Exception as e:
arcPrint("Unexpected error: {0}".format(e.message))
raise
if loadCSVtoFeatureClass and len(successList) > 0:
tempPoints = 'tempPoints___'
mxd = arcpy.mapping.MapDocument('CURRENT')
dataFrame = mxd.activeDataFrame
try:
for csv in successList:
baseName = os.path.basename(csv)[:-4] + "_check"
newLayerName = arcpy.env.workspace + '/' + baseName
arcpy.AddMessage('Workspace: {0}'.format(arcpy.env.workspace))
arcpy.MakeXYEventLayer_management(csv, 'X', 'Y',
tempPoints,
spatial_reference=spatialRef)
arcpy.AddMessage('Attempting to Add Layer: {0}'.format(baseName))
arcpy.PointsToLine_management(tempPoints, newLayerName)
layerObj = arcpy.mapping.Layer(newLayerName)
arcpy.mapping.AddLayer(dataFrame, layerObj, 'BOTTOM')
arcpy.AddMessage('Added Layer: {0}'.format(baseName))
finally:
arcpy.Delete_management(tempPoints)
del mxd
else:
arcpy.AddMessage('Loading check layers was not requested.')
arcpy.AddMessage(' ')
def processFCforCogoAnalysis(fc, outputDir, spatialRef=None):
"""
Process a Polyline file to analyze its points, generating a csv file of
the same name, but saved to the output Directory.
:param fc: Feature Class to be processed.
:param outputDir: Output directory to put the resulting csv file in.
:return: list of filename(s) of the csv file that was saved (str)
"""
confirmFCisPolyline(fc)
returnList = []
alignmentsList = getListOfAlignmentsAsPoints(fc, spatialRef=spatialRef)
for num, alignment in enumerate(alignmentsList):
outputFile = _generateOutputFileName(fc, num, outputDir)
returnList.append(outputFile)
processPointsForCogo(alignment)
writeToCSV(alignment, outputFile)
return returnList
def processPointsForCogo(listOfPoints):
"""
For each triplet of points in the list of points, compute the
attribute data for the arc (circular curve segment) that starts
at point 1, passes through point 2, and ends at point 3. Then
assign the curve data to point 2 for safe keeping.
:param listOfPoints: A list of points to be analyzed. These must be ordered spatially or the results are meaningless.
:return: None
"""
for pt1, pt2, pt3 in zip(listOfPoints[:-2],
listOfPoints[1:-1],
listOfPoints[2:]):
compute_arc_parameters(pt1, pt2, pt3)
def writeToCSV(pointList, fileName):
"""
Write all points in the point list to the indicated file, expecting
the points to be of type ExtendedPoint.
:param pointList:
:return: None
"""
with open(fileName, 'w') as f:
headerStr = ExtendedPoint.header_list()
f.write(headerStr + '\n')
for i, point in enumerate(pointList):
writeStr = str(point)
f.write(writeStr + '\n')
def getListOfAlignmentsAsPoints(fc, spatialRef=None):
"""
Given a feature class (believed to be Polyline), convert each
contiguous line segment into an spatially ordered list of points.
:param fc: Feature Class to extract points from
:return: List of List of Points. Each List of Points represents a single
alignment.
:rtype: List of list of ExtendedPoints.
"""
# Extract all of the segments into a list of segments.
# Note: A key assumption is that points within a given segment are
# already spatially ordered
segmentList = _breakPolylinesIntoSegments(fc, spatialRef=spatialRef)
# _writeToCSV(segmentList, 'segmentListDump.csv')
alignmentList = []
while len(segmentList) > 0:
pointList = getPointListFromSegmentList(segmentList)
alignmentList.append(pointList)
return alignmentList
def getPointListFromSegmentList(segmentDeque):
"""
Gets a point list (spatially ordered) from a Deque of Polyline Segments.
(The Polyline Segments have already been reduced to just points.)
As a Polyline Segment is added to the return list, it is removed from the
segmentDeque. If more than one alignment have been passed to this function,
it will only remove the segments which are colinear with the first segment.
Thus len(segmentDeque) will not == 0.
:param segmentList: Deque containing all of the Polyline Segments
:return: List of Points that are spatially ordered from beginning to end.
"""
# Check for adjacency going to the right
currentSegment = segmentDeque.popleft()
orderedSegments = collections.deque()
orderedSegments.append(currentSegment)
firstSegment = currentSegment
matchingSegment = True
while matchingSegment: # Search right from firstSegment
matchingSegment = False
for i in xrange(len(segmentDeque)):
matches = any_in_point_equals_any_in_other(currentSegment.endPoints,
segmentDeque[0].endPoints)
if matches:
testSegment = segmentDeque.popleft()
if matches[0] == 0: # if current's begin point is the match
currentSegment.reverse()
if matches[1] == 1: # if test's end point is the match
testSegment.reverse()
testSegment.popleft() # eliminates duplicate point
orderedSegments.append(testSegment)
currentSegment = testSegment
matchingSegment = True
break
segmentDeque.rotate(-1) # for performance since deque is a linked list
matchingSegment = True
currentSegment = firstSegment
while matchingSegment: # Search left from firstSegment
matchingSegment = False
for i in xrange(len(segmentDeque)):
matches = any_in_point_equals_any_in_other(currentSegment.endPoints,
segmentDeque[0].endPoints)
if matches:
testSegment = segmentDeque.pop()
if matches[0] == 1: # if current's end point is the match
currentSegment.reverse()
if matches[1] == 0: # if test's begin point is the match
testSegment.reverse()
testSegment.pop() # eliminates duplicate point
orderedSegments.appendleft(testSegment)
currentSegment = testSegment
matchingSegment = True
break
segmentDeque.rotate(1) # for performance since deque is a linked list
# flatten all points to a single list
# orderPoints = [pt for seg in orderedSegments for pt in seg]
orderedPoints = []
for seg in orderedSegments:
for pt in seg:
orderedPoints.append(pt)
return orderedPoints
class _PolylineSegment(collections.deque):
"""
Convenience class to make picking the start point and
end point a little easier.
"""
@property
def endPoints(self):
return self[0], self[-1]
def _breakPolylinesIntoSegments(fc, spatialRef=None):
"""
Given a feature class (Polyline), returns all segments
broken out as ExtendedPoints.
:param fc: Feature Class to break into segments.
:param onlySoSelected: If true, only operate on selected items
:return: deque of all segments in the feature class
:rtype: deque (of list of segments)
"""
segmentDeque = collections.deque()
lines_cursor = arcpy.da.SearchCursor(fc, ["SHAPE@", "OBJECTID"], spatial_reference=spatialRef)
try:
for lines_row in lines_cursor:
oid = lines_row[1]
aPolylineSegment = _PolylineSegment()
geom = lines_row[0]
for partIndex in range(geom.partCount):
geomPart = geom.getPart(partIndex)
for aPoint in geomPart:
aPolylineSegment.append(ExtendedPoint(aPoint, parentPK=oid))
segmentDeque.append(aPolylineSegment)
finally:
del lines_cursor
return segmentDeque
def _generateOutputFileName(seedName, fileNumber, outDir):
"""
Takes a feature class name and generates a .csv filename from it
with the outDir path (instead of the original path).
:param seedName: Name of feature class to bass output file name on
:param outDir: Directory to prepend to the seedName
:rtype: str
"""
seedName_ = seedName
if seedName.endswith('.shp'):
seedName_ = seedName[:-4]
if fileNumber > 0:
fn = str(fileNumber)
else:
fn = ""
return outDir + '/' + os.path.basename(seedName_) + fn + '.csv'
class NotPolylineError(TypeError):
"""
Indicates that the given file or feature class is not a Polyline type.
"""
pass
def confirmFCisPolyline(fc):
"""
If the parameter fc is an ArcGIS polyline, the function does nothing.
If it is not a polyline, it raises NotPolylineError
:param fc:
:return: None
:raises: NotPolylineError
"""
desc = arcpy.Describe(fc)
if not (desc.dataType == 'ShapeFile' or desc.dataType == 'FeatureClass'):
raise NotPolylineError
if desc.shapeType != 'Polyline':
raise NotPolylineError
def validate_or_create_outDir(outDir):
"""
If the output directory does not already exist, create it.
:param outDir:
:return:
"""
if not os.path.exists(outDir):
os.makedirs(outDir)
if __name__ == '__main__':
"""
Code for testing outside of ArcMap.
"""
arcpy.env.workspace = r"C:\GISdata\SelectedRoads.gdb"
featureClasses = [r'C:\GISdata\SelectedRoads.gdb\LeesvilleRoadRaleigh',
r'C:\GISdata\SelectedRoads.gdb\CatesAvenue',
r'C:\GISdata\SelectedRoads.gdb\DanAllenDrive',
r'C:\GISdata\SelectedRoads.gdb\FaucetteDrive',
r'C:\GISdata\SelectedRoads.gdb\MorrillDrive',
]
neuseRiver = [r"C:\SourceModules\CogoPy\data\other\Neuse401.shp"]
outputDir = r"C:\GISdata\testOutput"
analyzePolylines(neuseRiver,
# analyzePolylines(featureClasses,
outputDir,
loadCSVtoFeatureClass=False,
spatialRef=None)
def arcPrint(aString):
print aString
arcpy.AddMessage(aString)
| lgpl-3.0 |
jdubs/cloud-custodian | c7n/resources/sns.py | 1 | 1600 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.filters import CrossAccountAccessFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register('sns')
class SNS(QueryResourceManager):
class resource_type(object):
service = 'sns'
type = 'topic'
enum_spec = ('list_topics', 'Topics', None)
detail_spec = (
'get_topic_attributes', 'TopicArn', 'TopicArn', 'Attributes')
id = 'TopicArn'
filter_name = None
filter_type = None
name = 'DisplayName'
date = None
dimension = 'TopicName'
default_report_fields = (
'TopicArn',
'DisplayName',
'SubscriptionsConfirmed',
'SubscriptionsPending',
'SubscriptionsDeleted'
)
@SNS.filter_registry.register('cross-account')
class SNSCrossAccount(CrossAccountAccessFilter):
permissions = ('sns:GetTopicAttributes',)
| apache-2.0 |
2014cdbg4/2015finalexam | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_loader.py | 738 | 49593 | import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError as e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.suiteClass is unittest.TestSuite)
| gpl-3.0 |
zeroc0d3/docker-lab | application/rootfs/usr/lib/python2.7/dist-packages/powerline/lint/markedjson/error.py | 33 | 6948 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import re
from powerline.lib.encoding import get_preferred_output_encoding
NON_PRINTABLE_STR = (
'[^'
# ASCII control characters: 0x00-0x19
+ '\t\n' # Tab, newline: allowed ASCII control characters
+ '\x20-\x7E' # ASCII printable characters
# Unicode control characters: 0x7F-0x9F
+ '\u0085' # Allowed unicode control character: next line character
+ '\u00A0-\uD7FF'
# Surrogate escapes: 0xD800-0xDFFF
+ '\uE000-\uFFFD'
+ ((
'\uD800-\uDFFF'
) if sys.maxunicode < 0x10FFFF else (
'\U00010000-\U0010FFFF'
))
+ ']'
+ ((
# Paired surrogate escapes: allowed in UCS-2 builds as the only way to
# represent characters above 0xFFFF. Only paired variant is allowed.
'|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]'
+ '|[\uD800-\uDBFF](?![\uDC00-\uDFFF])'
) if sys.maxunicode < 0x10FFFF else (
''
))
)
NON_PRINTABLE_RE = re.compile(NON_PRINTABLE_STR)
def repl(s):
return '<x%04x>' % ord(s.group())
def strtrans(s):
return NON_PRINTABLE_RE.sub(repl, s.replace('\t', '>---'))
class Mark:
def __init__(self, name, line, column, buffer, pointer, old_mark=None, merged_marks=None):
self.name = name
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
self.old_mark = old_mark
self.merged_marks = merged_marks or []
def copy(self):
return Mark(self.name, self.line, self.column, self.buffer, self.pointer, self.old_mark, self.merged_marks[:])
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start - 1] not in '\0\n':
start -= 1
if self.pointer - start > max_length / 2 - 1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in '\0\n':
end += 1
if end - self.pointer > max_length / 2 - 1:
tail = ' ... '
end -= 5
break
snippet = [self.buffer[start:self.pointer], self.buffer[self.pointer], self.buffer[self.pointer + 1:end]]
snippet = [strtrans(s) for s in snippet]
return (
' ' * indent + head + ''.join(snippet) + tail + '\n'
+ ' ' * (indent + len(head) + len(snippet[0])) + '^'
)
def advance_string(self, diff):
ret = self.copy()
# FIXME Currently does not work properly with escaped strings.
ret.column += diff
ret.pointer += diff
return ret
def set_old_mark(self, old_mark):
if self is old_mark:
return
checked_marks = set([id(self)])
older_mark = old_mark
while True:
if id(older_mark) in checked_marks:
raise ValueError('Trying to set recursive marks')
checked_marks.add(id(older_mark))
older_mark = older_mark.old_mark
if not older_mark:
break
self.old_mark = old_mark
def set_merged_mark(self, merged_mark):
self.merged_marks.append(merged_mark)
def to_string(self, indent=0, head_text='in ', add_snippet=True):
mark = self
where = ''
processed_marks = set()
while mark:
indentstr = ' ' * indent
where += ('%s %s"%s", line %d, column %d' % (
indentstr, head_text, mark.name, mark.line + 1, mark.column + 1))
if add_snippet:
snippet = mark.get_snippet(indent=(indent + 4))
if snippet:
where += ':\n' + snippet
if mark.merged_marks:
where += '\n' + indentstr + ' with additionally merged\n'
where += mark.merged_marks[0].to_string(indent + 4, head_text='', add_snippet=False)
for mmark in mark.merged_marks[1:]:
where += '\n' + indentstr + ' and\n'
where += mmark.to_string(indent + 4, head_text='', add_snippet=False)
if add_snippet:
processed_marks.add(id(mark))
if mark.old_mark:
where += '\n' + indentstr + ' which replaced value\n'
indent += 4
mark = mark.old_mark
if id(mark) in processed_marks:
raise ValueError('Trying to dump recursive mark')
return where
if sys.version_info < (3,):
def __str__(self):
return self.to_string().encode('utf-8')
def __unicode__(self):
return self.to_string()
else:
def __str__(self):
return self.to_string()
def __eq__(self, other):
return self is other or (
self.name == other.name
and self.line == other.line
and self.column == other.column
)
if sys.version_info < (3,):
def echoerr(**kwargs):
stream = kwargs.pop('stream', sys.stderr)
stream.write('\n')
stream.write((format_error(**kwargs) + '\n').encode(get_preferred_output_encoding()))
else:
def echoerr(**kwargs):
stream = kwargs.pop('stream', sys.stderr)
stream.write('\n')
stream.write(format_error(**kwargs) + '\n')
def format_error(context=None, context_mark=None, problem=None, problem_mark=None, note=None, indent=0):
lines = []
indentstr = ' ' * indent
if context is not None:
lines.append(indentstr + context)
if (
context_mark is not None
and (
problem is None or problem_mark is None
or context_mark != problem_mark
)
):
lines.append(context_mark.to_string(indent=indent))
if problem is not None:
lines.append(indentstr + problem)
if problem_mark is not None:
lines.append(problem_mark.to_string(indent=indent))
if note is not None:
lines.append(indentstr + note)
return '\n'.join(lines)
class MarkedError(Exception):
def __init__(self, context=None, context_mark=None, problem=None, problem_mark=None, note=None):
Exception.__init__(self, format_error(context, context_mark, problem, problem_mark, note))
class EchoErr(object):
__slots__ = ('echoerr', 'logger', 'indent')
def __init__(self, echoerr, logger, indent=0):
self.echoerr = echoerr
self.logger = logger
self.indent = indent
def __call__(self, **kwargs):
kwargs = kwargs.copy()
kwargs.setdefault('indent', self.indent)
self.echoerr(**kwargs)
class DelayedEchoErr(EchoErr):
__slots__ = ('echoerr', 'logger', 'errs', 'message', 'separator_message', 'indent', 'indent_shift')
def __init__(self, echoerr, message='', separator_message=''):
super(DelayedEchoErr, self).__init__(echoerr, echoerr.logger)
self.errs = [[]]
self.message = message
self.separator_message = separator_message
self.indent_shift = (4 if message or separator_message else 0)
self.indent = echoerr.indent + self.indent_shift
def __call__(self, **kwargs):
kwargs = kwargs.copy()
kwargs['indent'] = kwargs.get('indent', 0) + self.indent
self.errs[-1].append(kwargs)
def next_variant(self):
self.errs.append([])
def echo_all(self):
if self.message:
self.echoerr(problem=self.message, indent=(self.indent - self.indent_shift))
for variant in self.errs:
if not variant:
continue
if self.separator_message and variant is not self.errs[0]:
self.echoerr(problem=self.separator_message, indent=(self.indent - self.indent_shift))
for kwargs in variant:
self.echoerr(**kwargs)
def __nonzero__(self):
return not not self.errs
__bool__ = __nonzero__
| mit |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/cherrypy/cherrypy/test/test_json.py | 42 | 2541 | import cherrypy
from cherrypy.test import helper
from cherrypy._cpcompat import json
class JsonTest(helper.CPWebCase):
def setup_server():
class Root(object):
def plain(self):
return 'hello'
plain.exposed = True
def json_string(self):
return 'hello'
json_string.exposed = True
json_string._cp_config = {'tools.json_out.on': True}
def json_list(self):
return ['a', 'b', 42]
json_list.exposed = True
json_list._cp_config = {'tools.json_out.on': True}
def json_dict(self):
return {'answer': 42}
json_dict.exposed = True
json_dict._cp_config = {'tools.json_out.on': True}
def json_post(self):
if cherrypy.request.json == [13, 'c']:
return 'ok'
else:
return 'nok'
json_post.exposed = True
json_post._cp_config = {'tools.json_in.on': True}
root = Root()
cherrypy.tree.mount(root)
setup_server = staticmethod(setup_server)
def test_json_output(self):
if json is None:
self.skip("json not found ")
return
self.getPage("/plain")
self.assertBody("hello")
self.getPage("/json_string")
self.assertBody('"hello"')
self.getPage("/json_list")
self.assertBody('["a", "b", 42]')
self.getPage("/json_dict")
self.assertBody('{"answer": 42}')
def test_json_input(self):
if json is None:
self.skip("json not found ")
return
body = '[13, "c"]'
headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertBody('ok')
body = '[13, "c"]'
headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertStatus(415, 'Expected an application/json content type')
body = '[13, -]'
headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertStatus(400, 'Invalid JSON document')
| lgpl-3.0 |
bthirion/scikit-learn | sklearn/manifold/setup.py | 43 | 1283 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.pyx"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
sungkim11/mhargadh | django/contrib/gis/sitemaps/views.py | 250 | 4342 | from django.http import HttpResponse, Http404
from django.template import loader
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.contrib.gis.db.models.fields import GeometryField
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import get_model
from django.utils.encoding import smart_str
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
def index(request, sitemaps):
"""
This view generates a sitemap index that uses the proper view
for resolving geographic section sitemap URLs.
"""
current_site = get_current_site(request)
sites = []
protocol = request.is_secure() and 'https' or 'http'
for section, site in sitemaps.items():
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.gis.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites})
return HttpResponse(xml, mimetype='application/xml')
def sitemap(request, sitemaps, section=None):
"""
This view generates a sitemap with additional geographic
elements defined by Google.
"""
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps.append(sitemaps[section])
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
current_site = get_current_site(request)
for site in maps:
try:
if callable(site):
urls.extend(site().get_urls(page=page, site=current_site))
else:
urls.extend(site.get_urls(page=page, site=current_site))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
xml = smart_str(loader.render_to_string('gis/sitemaps/geo_sitemap.xml', {'urlset': urls}))
return HttpResponse(xml, mimetype='application/xml')
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The model's default manager must be GeoManager, and the field name
must be that of a geographic field.
"""
placemarks = []
klass = get_model(label, model)
if not klass:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
info = klass._meta.get_field_by_name(field_name)
if not isinstance(info[0], GeometryField):
raise Exception
except:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.ops.postgis:
# PostGIS will take care of transformation.
placemarks = klass._default_manager.using(using).kml(field_name=field_name)
else:
# There's no KML method on Oracle or MySQL, so we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.ops.oracle:
qs = klass._default_manager.using(using).transform(4326, field_name=field_name)
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places' : placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| bsd-3-clause |
HWal/paparazzi | sw/ground_segment/python/real_time_plot/plotframe.py | 95 | 11217 | #Boa:Frame:PlotFrame
from __future__ import division
import wx
import plotpanel
_INITIAL_TIME_VALUE_ = 0.2 # initial refresh rate in seconds
def create(parent):
return PlotFrame(parent)
[wxID_PLOTFRAME, wxID_PLOTFRAMECHECKAUTOSCALE, wxID_PLOTFRAMEEDITMAX, wxID_PLOTFRAMEEDITMIN, wxID_PLOTFRAMEEDITTIME, wxID_PLOTFRAMEPANEL1, wxID_PLOTFRAMESLIDERTIME, wxID_PLOTFRAMESTATICTEXT1, wxID_PLOTFRAMESTATICTEXT2, wxID_PLOTFRAMESTATICTEXT3] = [wx.NewId() for _init_ctrls in range(10)]
[wxID_PLOTFRAMEMENU1ITEM_ADD, wxID_PLOTFRAMEMENU1ITEM_PAUSE, wxID_PLOTFRAMEMENU1ITEM_RESET] = [wx.NewId() for _init_coll_menuPlot_Items in range(3)]
class PlotFrame(wx.Frame):
def _init_coll_boxSizer1_Items(self, parent):
# generated method, don't edit
parent.AddSizer(self.boxSizer2, 0, border=0, flag=0)
parent.AddWindow(self.panel1, 1, border=0, flag=wx.EXPAND)
def _init_coll_boxSizer2_Items(self, parent):
# generated method, don't edit
parent.AddWindow(self.checkAutoScale, 0, border=0, flag=wx.ALIGN_CENTER_VERTICAL)
parent.AddWindow(self.staticText1, 0, border=0, flag=wx.ALIGN_CENTER_VERTICAL)
parent.AddWindow(self.editMin, 0, border=0, flag=0)
parent.AddWindow(self.staticText2, 0, border=0, flag=wx.ALIGN_CENTER_VERTICAL)
parent.AddWindow(self.editMax, 0, border=0, flag=0)
parent.AddWindow(self.staticText3, 0, border=0, flag=wx.ALIGN_CENTER_VERTICAL)
parent.AddWindow(self.sliderTime, 0, border=0, flag=wx.ALIGN_CENTER_VERTICAL)
parent.AddWindow(self.editTime, 0, border=0, flag=0)
def _init_coll_menuBar1_Menus(self, parent):
# generated method, don't edit
parent.Append(menu=self.menuPlot, title=u'Plot')
parent.Append(menu=self.menuCurves, title=u'Curves')
def _init_coll_menuPlot_Items(self, parent):
# generated method, don't edit
parent.Append(help=u'Add plots', id=wxID_PLOTFRAMEMENU1ITEM_ADD, kind=wx.ITEM_NORMAL, text=u'&Add\tCtrl+A')
parent.Append(help=u'Reset plot scale', id=wxID_PLOTFRAMEMENU1ITEM_RESET, kind=wx.ITEM_NORMAL, text=u'&Reset\tCtrl+L')
parent.Append(help=u'Pause the plot', id=wxID_PLOTFRAMEMENU1ITEM_PAUSE, kind=wx.ITEM_CHECK, text=u'&Pause\tCtrl+P')
self.Bind(wx.EVT_MENU, self.OnMenu1Item_addMenu, id=wxID_PLOTFRAMEMENU1ITEM_ADD)
self.Bind(wx.EVT_MENU, self.OnMenu1Item_resetMenu, id=wxID_PLOTFRAMEMENU1ITEM_RESET)
self.Bind(wx.EVT_MENU, self.OnMenu1Item_pauseMenu, id=wxID_PLOTFRAMEMENU1ITEM_PAUSE)
def _init_sizers(self):
# generated method, don't edit
self.boxSizer1 = wx.BoxSizer(orient=wx.VERTICAL)
self.boxSizer2 = wx.BoxSizer(orient=wx.HORIZONTAL)
self._init_coll_boxSizer1_Items(self.boxSizer1)
self._init_coll_boxSizer2_Items(self.boxSizer2)
self.SetSizer(self.boxSizer1)
def _init_utils(self):
# generated method, don't edit
self.menuPlot = wx.Menu(title='')
self.menuCurves = wx.Menu(title='')
self.menuBar1 = wx.MenuBar()
self._init_coll_menuPlot_Items(self.menuPlot)
self._init_coll_menuBar1_Menus(self.menuBar1)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_PLOTFRAME, name=u'PlotFrame', parent=prnt, pos=wx.Point(476, 365), size=wx.Size(800, 225), style=wx.DEFAULT_FRAME_STYLE, title=u'Real Time Plot')
self._init_utils()
self.SetMenuBar(self.menuBar1)
self.SetClientSize(wx.Size(800, 225))
self.checkAutoScale = wx.CheckBox(id=wxID_PLOTFRAMECHECKAUTOSCALE, label=u'Auto scale', name=u'checkAutoScale', parent=self, pos=wx.Point(0, 2), size=wx.Size(93, 22), style=0)
self.checkAutoScale.SetValue(True)
self.checkAutoScale.Bind(wx.EVT_CHECKBOX, self.OnCheckAutoScaleCheckbox, id=wxID_PLOTFRAMECHECKAUTOSCALE)
self.staticText1 = wx.StaticText(id=wxID_PLOTFRAMESTATICTEXT1, label=u'min', name='staticText1', parent=self, pos=wx.Point(93, 5), size=wx.Size(68, 17), style=wx.ALIGN_RIGHT)
self.editMin = wx.TextCtrl(id=wxID_PLOTFRAMEEDITMIN, name=u'editMin', parent=self, pos=wx.Point(161, 0), size=wx.Size(80, 27), style=0, value=u'')
self.editMin.Enable(False)
self.editMin.Bind(wx.EVT_TEXT, self.OnEditMinText, id=wxID_PLOTFRAMEEDITMIN)
self.staticText2 = wx.StaticText(id=wxID_PLOTFRAMESTATICTEXT2, label=u'max', name='staticText2', parent=self, pos=wx.Point(241, 5), size=wx.Size(68, 17), style=wx.ALIGN_RIGHT)
self.editMax = wx.TextCtrl(id=wxID_PLOTFRAMEEDITMAX, name=u'editMax', parent=self, pos=wx.Point(309, 0), size=wx.Size(80, 27), style=0, value=u'')
self.editMax.Enable(False)
self.editMax.Bind(wx.EVT_TEXT, self.OnEditMaxText, id=wxID_PLOTFRAMEEDITMAX)
self.staticText3 = wx.StaticText(id=wxID_PLOTFRAMESTATICTEXT3, label=u'interval', name='staticText3', parent=self, pos=wx.Point(389, 5), size=wx.Size(68, 17), style=wx.ALIGN_RIGHT)
self.sliderTime = wx.Slider(id=wxID_PLOTFRAMESLIDERTIME, maxValue=1000, minValue=1, name=u'sliderTime', parent=self, pos=wx.Point(457, 4), size=wx.Size(200, 19), style=wx.SL_HORIZONTAL, value=_INITIAL_TIME_VALUE_ * 1000)
self.sliderTime.SetLabel(u'')
self.sliderTime.Bind(wx.EVT_COMMAND_SCROLL, self.OnSliderTimeCommandScroll, id=wxID_PLOTFRAMESLIDERTIME)
self.editTime = wx.TextCtrl(id=wxID_PLOTFRAMEEDITTIME, name=u'editTime', parent=self, pos=wx.Point(657, 0), size=wx.Size(80, 27), style=wx.TE_PROCESS_ENTER, value="%0.2f" % _INITIAL_TIME_VALUE_)
self.editTime.Bind(wx.EVT_TEXT_ENTER, self.OnEditTimeTextEnter, id=wxID_PLOTFRAMEEDITTIME)
self.panel1 = wx.Panel(id=wxID_PLOTFRAMEPANEL1, name='panel1', parent=self, pos=wx.Point(0, 27), size=wx.Size(800, 200), style=wx.TAB_TRAVERSAL)
self._init_sizers()
def __init__(self, parent):
self._init_ctrls(parent)
self.canvas = plotpanel.create(self.panel1, self)
self.dynamic_menus = {}
self.Bind( wx.EVT_CLOSE, self.OnClose)
self.Bind( wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.panel1.Bind( wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.panel1.Bind( wx.EVT_SIZE, self.OnSize)
def OnRightDown(self, event):
self.PopupMenu(self.menuPlot, event.GetPosition())
def AddPlot(self, ac_id, message, field, color = None, x_axis = False):
self.canvas.BindCurve(ac_id, message, field, color, x_axis)
def SetMinMax(self, min_, max_):
self.editMin.SetValue(str(min_))
self.editMax.SetValue(str(max_))
def OnClose(self, event):
# need to forward close to canvas so that ivy is shut down, otherwise ivy hangs the shutdown
self.canvas.OnClose()
self.Destroy()
def OnErase(self, event):
pass
def OnSize(self, event):
self.canvas.OnSize( event.GetSize())
def OnSliderTimeCommandScroll(self, event):
value = event.GetPosition()
self.canvas.SetPlotInterval(value)
self.editTime.SetValue( '%.3f' % (value/1000.0))
def OnEditTimeTextEnter(self, event):
try:
value = int(float(event.GetString()) * 1000.0)
except:
value = 0
if value < 1 or value > 1000:
value = '%.3f' % (self.sliderTime.GetValue() / 1000.0)
self.editTime.SetValue( value)
return
self.canvas.SetPlotInterval(value)
self.sliderTime.SetValue(value)
def OnCheckAutoScaleCheckbox(self, event):
value = self.checkAutoScale.GetValue()
self.editMin.Enable( not value)
self.editMax.Enable( not value)
self.canvas.SetAutoScale(value)
def OnMenu1Item_addMenu(self, event):
self.canvas.ShowMessagePicker(self)
def OnMenu1Item_resetMenu(self, event):
self.canvas.ResetScale()
def OnMenu1Item_pauseMenu(self, event):
self.canvas.Pause(event.IsChecked())
def AddCurve(self, menu_id, title, use_as_x = False):
curveMenu = wx.Menu(title='')
curveMenu.Append(help=u'Delete plot', id=menu_id*10, kind=wx.ITEM_NORMAL, text=u'&Delete')
curveMenu.Append(help=u'Offset plot', id=menu_id*10+1, kind=wx.ITEM_NORMAL, text=u'&Offset')
curveMenu.Append(help=u'Scale plot', id=menu_id*10+2, kind=wx.ITEM_NORMAL, text=u'&Scale')
curveMenu.Append(help=u'Plot data as messages are received rather than async', id=menu_id*10+3, kind=wx.ITEM_CHECK, text=u'&Real time plot')
curveMenu.Append(help=u'Use this curve as the X-axis rather than a time based scale', id=menu_id*10+4, kind=wx.ITEM_CHECK, text=u'&Use as X-axis')
curveMenu.Check(id=menu_id*10+4, check=bool(use_as_x))
self.Bind(wx.EVT_MENU, self.OnMenuDeleteCurve, id=menu_id*10)
self.Bind(wx.EVT_MENU, self.OnMenuOffsetCurve, id=menu_id*10+1)
self.Bind(wx.EVT_MENU, self.OnMenuScaleCurve, id=menu_id*10+2)
self.Bind(wx.EVT_MENU, self.OnMenuRealTime, id=menu_id*10+3)
self.Bind(wx.EVT_MENU, self.OnMenuUseAsXAxis, id=menu_id*10+4)
self.dynamic_menus[menu_id] = self.menuCurves.AppendSubMenu(submenu=curveMenu, text=title)
def OnMenuDeleteCurve(self, event):
menu_id = event.GetId() // 10
item = self.dynamic_menus[menu_id]
self.canvas.RemovePlot(menu_id)
self.menuCurves.DestroyItem(item)
del self.dynamic_menus[menu_id]
def OnMenuOffsetCurve(self, event):
menu_id = (event.GetId()-1) // 10
default_value = str(self.canvas.FindPlot(menu_id).offset)
value = wx.GetTextFromUser("Enter a value to offset the plot", "Offset", default_value)
try:
value = float(value)
self.canvas.OffsetPlot(menu_id, value)
except:
pass
def OnMenuScaleCurve(self, event):
menu_id = (event.GetId()-2) // 10
default_value = str(self.canvas.FindPlot(menu_id).scale)
value = wx.GetTextFromUser("Enter a factor to scale the plot", "Scale", default_value)
try:
value = float(value)
self.canvas.ScalePlot(menu_id, value)
except:
pass
def OnMenuRealTime(self,event):
menu_id = (event.GetId()-3) // 10
self.canvas.SetRealTime(menu_id, event.IsChecked())
def OnMenuUseAsXAxis(self,event):
event_id = event.GetId()
menu_id = (event_id-4) // 10
value = event.IsChecked()
if value:
# go through and clear the checks from any other curves
for i in self.dynamic_menus:
for item in self.dynamic_menus[i].GetSubMenu().GetMenuItems():
if item.GetText() == u'_Use as X-axis' and event_id != item.GetId():
item.Check(False)
self.canvas.SetXAxis(menu_id)
else:
self.canvas.ClearXAxis()
def OnEditMinText(self, event):
try:
value = float(event.GetString())
self.canvas.SetMin(value)
except:
pass
def OnEditMaxText(self, event):
try:
value = float(event.GetString())
self.canvas.SetMax(value)
except:
pass
| gpl-2.0 |
noba3/KoTos | addons/script.module.youtube.dl/lib/youtube_dl/extractor/prosiebensat1.py | 16 | 13998 | # encoding: utf-8
from __future__ import unicode_literals
import re
from hashlib import sha1
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
determine_ext,
float_or_none,
int_or_none,
unified_strdate,
)
class ProSiebenSat1IE(InfoExtractor):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
_VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at|ch)|ran\.de|fem\.com)/(?P<id>.+)'
_TESTS = [
{
# Tests changes introduced in https://github.com/rg3/youtube-dl/pull/6242
# in response to fixing https://github.com/rg3/youtube-dl/issues/6215:
# - malformed f4m manifest support
# - proper handling of URLs starting with `https?://` in 2.0 manifests
# - recursive child f4m manifests extraction
'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge',
'info_dict': {
'id': '2104602',
'ext': 'mp4',
'title': 'Episode 18 - Staffel 2',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'duration': 5845.04,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/videokatalog/Gesellschaft/Leben/Trends/video-Lady-Umstyling-f%C3%BCr-Audrina-Rebekka-Audrina-Fergen-billig-aussehen-Battal-Modica-700544.html',
'info_dict': {
'id': '2570327',
'ext': 'mp4',
'title': 'Lady-Umstyling für Audrina',
'description': 'md5:4c16d0c17a3461a0d43ea4084e96319d',
'upload_date': '20131014',
'duration': 606.76,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Seems to be broken',
},
{
'url': 'http://www.prosiebenmaxx.de/tv/experience/video/144-countdown-fuer-die-autowerkstatt-ganze-folge',
'info_dict': {
'id': '2429369',
'ext': 'mp4',
'title': 'Countdown für die Autowerkstatt',
'description': 'md5:809fc051a457b5d8666013bc40698817',
'upload_date': '20140223',
'duration': 2595.04,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.sixx.de/stars-style/video/sexy-laufen-in-ugg-boots-clip',
'info_dict': {
'id': '2904997',
'ext': 'mp4',
'title': 'Sexy laufen in Ugg Boots',
'description': 'md5:edf42b8bd5bc4e5da4db4222c5acb7d6',
'upload_date': '20140122',
'duration': 245.32,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.sat1.de/film/der-ruecktritt/video/im-interview-kai-wiesinger-clip',
'info_dict': {
'id': '2906572',
'ext': 'mp4',
'title': 'Im Interview: Kai Wiesinger',
'description': 'md5:e4e5370652ec63b95023e914190b4eb9',
'upload_date': '20140203',
'duration': 522.56,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.kabeleins.de/tv/rosins-restaurants/videos/jagd-auf-fertigkost-im-elsthal-teil-2-ganze-folge',
'info_dict': {
'id': '2992323',
'ext': 'mp4',
'title': 'Jagd auf Fertigkost im Elsthal - Teil 2',
'description': 'md5:2669cde3febe9bce13904f701e774eb6',
'upload_date': '20141014',
'duration': 2410.44,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.ran.de/fussball/bundesliga/video/schalke-toennies-moechte-raul-zurueck-ganze-folge',
'info_dict': {
'id': '3004256',
'ext': 'mp4',
'title': 'Schalke: Tönnies möchte Raul zurück',
'description': 'md5:4b5b271d9bcde223b54390754c8ece3f',
'upload_date': '20140226',
'duration': 228.96,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.the-voice-of-germany.de/video/31-andreas-kuemmert-rocket-man-clip',
'info_dict': {
'id': '2572814',
'ext': 'mp4',
'title': 'Andreas Kümmert: Rocket Man',
'description': 'md5:6ddb02b0781c6adf778afea606652e38',
'upload_date': '20131017',
'duration': 469.88,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html',
'info_dict': {
'id': '2156342',
'ext': 'mp4',
'title': 'Kurztrips zum Valentinstag',
'description': 'Romantischer Kurztrip zum Valentinstag? Wir verraten, was sich hier wirklich lohnt.',
'duration': 307.24,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/tv/joko-gegen-klaas/videos/playlists/episode-8-ganze-folge-playlist',
'info_dict': {
'id': '439664',
'title': 'Episode 8 - Ganze Folge - Playlist',
'description': 'md5:63b8963e71f481782aeea877658dec84',
},
'playlist_count': 2,
},
]
_CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"',
r'clip[iI]d=(\d+)',
r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)",
]
_TITLE_REGEXES = [
r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
r'<header class="clearfix">\s*<h3>(.+?)</h3>',
r'<!-- start video -->\s*<h1>(.+?)</h1>',
r'<h1 class="att-name">\s*(.+?)</h1>',
r'<header class="module_header">\s*<h2>([^<]+)</h2>\s*</header>',
]
_DESCRIPTION_REGEXES = [
r'<p itemprop="description">\s*(.+?)</p>',
r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>',
r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>',
r'<p class="att-description">\s*(.+?)\s*</p>',
]
_UPLOAD_DATE_REGEXES = [
r'<meta property="og:published_time" content="(.+?)">',
r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"',
r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr',
r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>',
]
_PAGE_TYPE_REGEXES = [
r'<meta name="page_type" content="([^"]+)">',
r"'itemType'\s*:\s*'([^']*)'",
]
_PLAYLIST_ID_REGEXES = [
r'content[iI]d=(\d+)',
r"'itemId'\s*:\s*'([^']*)'",
]
_PLAYLIST_CLIP_REGEXES = [
r'(?s)data-qvt=.+?<a href="([^"]+)"',
]
def _extract_clip(self, url, webpage):
clip_id = self._html_search_regex(self._CLIPID_REGEXES, webpage, 'clip id')
access_token = 'prosieben'
client_name = 'kolibri-2.0.19-splec4'
client_location = url
videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({
'access_token': access_token,
'client_location': client_location,
'client_name': client_name,
'ids': clip_id,
})
video = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON')[0]
if video.get('is_protected') is True:
raise ExtractorError('This video is DRM protected.', expected=True)
duration = float_or_none(video.get('duration'))
source_ids = [source['id'] for source in video['sources']]
source_ids_str = ','.join(map(str, source_ids))
g = '01!8d8F_)r9]4s[qeuXfP%'
client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name])
.encode('utf-8')).hexdigest()
sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({
'access_token': access_token,
'client_id': client_id,
'client_location': client_location,
'client_name': client_name,
}))
sources = self._download_json(sources_api_url, clip_id, 'Downloading sources JSON')
server_id = sources['server_id']
client_id = g[:2] + sha1(''.join([g, clip_id, access_token, server_id,
client_location, source_ids_str, g, client_name])
.encode('utf-8')).hexdigest()
url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({
'access_token': access_token,
'client_id': client_id,
'client_location': client_location,
'client_name': client_name,
'server_id': server_id,
'source_ids': source_ids_str,
}))
urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title')
description = self._html_search_regex(self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._html_search_regex(
self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
formats = []
urls_sources = urls['sources']
if isinstance(urls_sources, dict):
urls_sources = urls_sources.values()
def fix_bitrate(bitrate):
bitrate = int_or_none(bitrate)
if not bitrate:
return None
return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
for source in urls_sources:
protocol = source['protocol']
source_url = source['url']
if protocol == 'rtmp' or protocol == 'rtmpe':
mobj = re.search(r'^(?P<url>rtmpe?://[^/]+)/(?P<path>.+)$', source_url)
if not mobj:
continue
path = mobj.group('path')
mp4colon_index = path.rfind('mp4:')
app = path[:mp4colon_index]
play_path = path[mp4colon_index:]
formats.append({
'url': '%s/%s' % (mobj.group('url'), app),
'app': app,
'play_path': play_path,
'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf',
'page_url': 'http://www.prosieben.de',
'vbr': fix_bitrate(source['bitrate']),
'ext': 'mp4',
'format_id': '%s_%s' % (source['cdn'], source['bitrate']),
})
elif 'f4mgenerator' in source_url or determine_ext(source_url) == 'f4m':
formats.extend(self._extract_f4m_formats(source_url, clip_id))
else:
formats.append({
'url': source_url,
'vbr': fix_bitrate(source['bitrate']),
})
self._sort_formats(formats)
return {
'id': clip_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
}
def _extract_playlist(self, url, webpage):
playlist_id = self._html_search_regex(
self._PLAYLIST_ID_REGEXES, webpage, 'playlist id')
for regex in self._PLAYLIST_CLIP_REGEXES:
playlist_clips = re.findall(regex, webpage)
if playlist_clips:
title = self._html_search_regex(
self._TITLE_REGEXES, webpage, 'title')
description = self._html_search_regex(
self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
entries = [
self.url_result(
re.match('(.+?//.+?)/', url).group(1) + clip_path,
'ProSiebenSat1')
for clip_path in playlist_clips]
return self.playlist_result(entries, playlist_id, title, description)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
page_type = self._search_regex(
self._PAGE_TYPE_REGEXES, webpage,
'page type', default='clip').lower()
if page_type == 'clip':
return self._extract_clip(url, webpage)
elif page_type == 'playlist':
return self._extract_playlist(url, webpage)
| gpl-2.0 |
djvoleur/V_920P_BOF7 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
chokribr/invenioold | modules/miscutil/lib/sequtils_texkey.py | 13 | 12225 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.sequtils import SequenceGenerator
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, create_record
# Imports related to the texkey generation daemon
from invenio.search_engine import perform_request_search, get_record
from invenio.bibrecord import field_get_subfield_values, \
record_get_field_instances, \
record_add_field, print_rec
from invenio.config import CFG_TMPSHAREDDIR, CFG_VERSION
from invenio.bibtask import task_init, write_message, \
task_low_level_submission, task_update_progress, \
task_sleep_now_if_required
from invenio.dbquery import run_sql
import string
import random
import time
import os
from unidecode import unidecode
from tempfile import mkstemp
DESCRIPTION = """
Generate TexKeys in records without one
"""
HELP_MESSAGE = """
Examples:
(run a daemon job every hour)
bibtex -s 1h
"""
PREFIX = "bibtex"
TEXKEY_MAXTRIES = 10
class TexkeyNoAuthorError(Exception):
""" Error raised when the record does not have a main author or a
collaboration field
"""
pass
class TexkeyNoYearError(Exception):
""" Error raised when the record does not have a year field
"""
pass
def _texkey_random_chars(recid, use_random=False):
""" Generate the three random chars for the end of the texkey """
if recid and not use_random:
# Legacy random char generation from Spires
texkey_third_part = chr((recid % 26) + 97) + \
chr(((recid / 26) % 26) + 97) + \
chr(((recid * 26) % 26) + 97)
else:
letters = string.letters.lower()
texkey_third_part = ""
for _ in range(3):
texkey_third_part += random.choice(letters)
return texkey_third_part
class TexkeySeq(SequenceGenerator):
"""
texkey sequence generator
"""
seq_name = 'texkey'
def _next_value(self, recid=None, xml_record=None, bibrecord=None):
"""
Returns the next texkey for the given recid
@param recid: id of the record where the texkey will be generated
@type recid: int
@param xml_record: record in xml format
@type xml_record: string
@return: next texkey for the given recid.
@rtype: string
@raises TexkeyNoAuthorError: No main author (100__a) or collaboration
(710__g) in the given recid
"""
if recid is None and xml_record is not None:
bibrecord = create_record(xml_record)[0]
elif bibrecord is None:
bibrecord = get_bibrecord(recid)
main_author = record_get_field_value(bibrecord,
tag="100",
ind1="",
ind2="",
code="a")
if not main_author:
# Try with collaboration name
main_author = record_get_field_value(bibrecord,
tag="710",
ind1="",
ind2="",
code="g")
main_author = "".join([p for p in main_author.split()
if p.lower() != "collaboration"])
if not main_author:
# Try with corporate author
main_author = record_get_field_value(bibrecord,
tag="100",
ind1="",
ind2="",
code="a")
if not main_author:
raise TexkeyNoAuthorError
# Remove utf-8 special characters
main_author = unidecode(main_author.decode('utf-8'))
try:
texkey_first_part = main_author.split(',')[0].replace(" ", "")
except KeyError:
texkey_first_part = ""
year = record_get_field_value(bibrecord,
tag="269",
ind1="",
ind2="",
code="c")
if not year:
year = record_get_field_value(bibrecord,
tag="260",
ind1="",
ind2="",
code="c")
if not year:
year = record_get_field_value(bibrecord,
tag="773",
ind1="",
ind2="",
code="y")
if not year:
year = record_get_field_value(bibrecord,
tag="502",
ind1="",
ind2="",
code="d")
if not year:
raise TexkeyNoYearError
try:
texkey_second_part = year.split("-")[0]
except KeyError:
texkey_second_part = ""
texkey_third_part = _texkey_random_chars(recid)
texkey = texkey_first_part + ":" + texkey_second_part + texkey_third_part
tries = 0
while self._value_exists(texkey) and tries < TEXKEY_MAXTRIES:
# Key is already in the DB, generate a new one
texkey_third_part = _texkey_random_chars(recid, use_random=True)
texkey = texkey_first_part + ":" + texkey_second_part + texkey_third_part
tries += 1
return texkey
### Functions related to texkey generator daemon ###
def submit_task(to_submit, mode, sequence_id):
""" calls bibupload with all records to be modified
@param to_submit: list of xml snippets to be submitted
@type: list
@param mode: mode to be used in bibupload
@type: list
@param sequence_id: sequence id to be included in the task_id
@type: str
@return: id of the submitted task
@rtype: int
"""
(temp_fd, temp_path) = mkstemp(prefix=PREFIX,
dir=CFG_TMPSHAREDDIR)
temp_file = os.fdopen(temp_fd, 'w')
temp_file.write('<?xml version="1.0" encoding="UTF-8"?>')
temp_file.write('<collection>')
for el in to_submit:
temp_file.write(el)
temp_file.write('</collection>')
temp_file.close()
return task_low_level_submission('bibupload', PREFIX, '-P', '3', '-I',
sequence_id, '-%s' % mode,
temp_path)
def submit_bibindex_task(to_update, sequence_id):
""" submits a bibindex task for a set of records
@param to_update: list of recids to be updated by bibindex
@type: list
@param sequence_id: sequence id to be included in the task_id
@type: str
@return: id of bibindex task
@rtype: int
"""
recids = [str(r) for r in to_update]
return task_low_level_submission('bibindex', PREFIX, '-I',
sequence_id, '-P', '2', '-w', 'global',
'-i', ','.join(recids))
def wait_for_task(task_id):
sql = 'select status from schTASK where id = %s'
while run_sql(sql, [task_id])[0][0] not in ('DONE', 'ACK', 'ACK DONE'):
task_sleep_now_if_required(True)
time.sleep(5)
def process_chunk(to_process, sequence_id):
""" submit bibupload task and wait for it to finish
@param to_process: list of marcxml snippets
@type: list
"""
task_id = submit_task(to_process, 'a', sequence_id)
return wait_for_task(task_id)
def create_xml(recid, texkey):
""" Create the marcxml snippet with the new texkey
@param recid: recid of the record to be updated
@type: int
@param texkey: texkey that has been generated
@type: str
@return: marcxml with the fields to be record_add_field
@rtype: str
"""
record = {}
record_add_field(record, '001', controlfield_value=str(recid))
subfields_toadd = [('a', texkey), ('9', 'INSPIRETeX')]
record_add_field(record, tag='035', subfields=subfields_toadd)
return print_rec(record)
def task_run_core():
""" Performs a search to find records without a texkey, generates a new
one and uploads the changes in chunks """
recids = perform_request_search(p='-035:spirestex -035:inspiretex', cc='HEP')
write_message("Found %s records to assign texkeys" % len(recids))
processed_recids = []
xml_to_process = []
for count, recid in enumerate(recids):
write_message("processing recid %s" % recid)
# Check that the record does not have already a texkey
has_texkey = False
recstruct = get_record(recid)
for instance in record_get_field_instances(recstruct, tag="035", ind1="", ind2=""):
try:
provenance = field_get_subfield_values(instance, "9")[0]
except IndexError:
provenance = ""
try:
value = field_get_subfield_values(instance, "z")[0]
except IndexError:
try:
value = field_get_subfield_values(instance, "a")[0]
except IndexError:
value = ""
provenances = ["SPIRESTeX", "INSPIRETeX"]
if provenance in provenances and value:
has_texkey = True
write_message("INFO: Record %s has already texkey %s" % (recid, value))
if not has_texkey:
TexKeySeq = TexkeySeq()
new_texkey = ""
try:
new_texkey = TexKeySeq.next_value(recid)
except TexkeyNoAuthorError:
write_message("WARNING: Record %s has no first author or collaboration" % recid)
continue
except TexkeyNoYearError:
write_message("WARNING: Record %s has no year" % recid)
continue
write_message("Created texkey %s for record %d" % (new_texkey, recid))
xml = create_xml(recid, new_texkey)
processed_recids.append(recid)
xml_to_process.append(xml)
task_update_progress("Done %d out of %d." % (count, len(recids)))
task_sleep_now_if_required()
# sequence ID to be used in all subsequent tasks
sequence_id = str(random.randrange(1, 4294967296))
if xml_to_process:
process_chunk(xml_to_process, sequence_id)
# Finally, index all the records processed
#FIXME: Waiting for sequence id to be fixed
# if processed_recids:
# submit_bibindex_task(processed_recids, sequence_id)
return True
def main():
"""Constructs the bibtask."""
# Build and submit the task
task_init(authorization_action='runtexkeygeneration',
authorization_msg="Texkey generator task submission",
description=DESCRIPTION,
help_specific_usage=HELP_MESSAGE,
version="Invenio v%s" % CFG_VERSION,
specific_params=("", []),
# task_submit_elaborate_specific_parameter_fnc=parse_option,
# task_submit_check_options_fnc=check_options,
task_run_fnc=task_run_core
)
| gpl-2.0 |
ksrajkumar/openerp-6.1 | openerp/addons/base_module_quality/workflow_test/__init__.py | 63 | 1055 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
procangroup/edx-platform | common/lib/xmodule/xmodule/poll_module.py | 17 | 7885 | """Poll module is ungraded xmodule used by students to
to do set of polls.
On the client side we show:
If student does not yet anwered - Question with set of choices.
If student have answered - Question with statistics for each answers.
"""
import cgi
import json
import logging
from collections import OrderedDict
from copy import deepcopy
from lxml import etree
from pkg_resources import resource_string
from xblock.fields import Boolean, Dict, List, Scope, String
from openedx.core.djangolib.markup import Text
from xmodule.mako_module import MakoModuleDescriptor
from xmodule.stringify import stringify_children
from xmodule.x_module import XModule
from xmodule.xml_module import XmlDescriptor
log = logging.getLogger(__name__)
_ = lambda text: text
class PollFields(object):
# Name of poll to use in links to this poll
display_name = String(
help=_("The display name for this component."),
scope=Scope.settings
)
voted = Boolean(
help=_("Whether this student has voted on the poll"),
scope=Scope.user_state,
default=False
)
poll_answer = String(
help=_("Student answer"),
scope=Scope.user_state,
default=''
)
poll_answers = Dict(
help=_("Poll answers from all students"),
scope=Scope.user_state_summary
)
# List of answers, in the form {'id': 'some id', 'text': 'the answer text'}
answers = List(
help=_("Poll answers from xml"),
scope=Scope.content,
default=[]
)
question = String(
help=_("Poll question"),
scope=Scope.content,
default=''
)
class PollModule(PollFields, XModule):
"""Poll Module"""
js = {
'js': [
resource_string(__name__, 'js/src/javascript_loader.js'),
resource_string(__name__, 'js/src/poll/poll.js'),
resource_string(__name__, 'js/src/poll/poll_main.js')
]
}
css = {'scss': [resource_string(__name__, 'css/poll/display.scss')]}
js_module_name = "Poll"
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request data parameters
Returns:
json string
"""
if dispatch in self.poll_answers and not self.voted:
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[dispatch] += 1
self.poll_answers = temp_poll_answers
self.voted = True
self.poll_answer = dispatch
return json.dumps({'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values()),
'callback': {'objectName': 'Conditional'}
})
elif dispatch == 'get_state':
return json.dumps({'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values())
})
elif dispatch == 'reset_poll' and self.voted and \
self.descriptor.xml_attributes.get('reset', 'True').lower() != 'false':
self.voted = False
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[self.poll_answer] -= 1
self.poll_answers = temp_poll_answers
self.poll_answer = ''
return json.dumps({'status': 'success'})
else: # return error message
return json.dumps({'error': 'Unknown Command!'})
def get_html(self):
"""Renders parameters to template."""
params = {
'element_id': self.location.html_id(),
'element_class': self.location.block_type,
'ajax_url': self.system.ajax_url,
'configuration_json': self.dump_poll(),
}
self.content = self.system.render_template('poll.html', params)
return self.content
def dump_poll(self):
"""Dump poll information.
Returns:
string - Serialize json.
"""
# FIXME: hack for resolving caching `default={}` during definition
# poll_answers field
if self.poll_answers is None:
self.poll_answers = {}
answers_to_json = OrderedDict()
# FIXME: fix this, when xblock support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
# Fill self.poll_answers, prepare data for template context.
for answer in self.answers:
# Set default count for answer = 0.
if answer['id'] not in temp_poll_answers:
temp_poll_answers[answer['id']] = 0
answers_to_json[answer['id']] = cgi.escape(answer['text'])
self.poll_answers = temp_poll_answers
return json.dumps({
'answers': answers_to_json,
'question': cgi.escape(self.question),
# to show answered poll after reload:
'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers if self.voted else {},
'total': sum(self.poll_answers.values()) if self.voted else 0,
'reset': str(self.descriptor.xml_attributes.get('reset', 'true')).lower()
})
class PollDescriptor(PollFields, MakoModuleDescriptor, XmlDescriptor):
_tag_name = 'poll_question'
_child_tag_name = 'answer'
module_class = PollModule
resources_dir = None
@classmethod
def definition_from_xml(cls, xml_object, system):
"""Pull out the data into dictionary.
Args:
xml_object: xml from file.
system: `system` object.
Returns:
(definition, children) - tuple
definition - dict:
{
'answers': <List of answers>,
'question': <Question string>
}
"""
# Check for presense of required tags in xml.
if len(xml_object.xpath(cls._child_tag_name)) == 0:
raise ValueError("Poll_question definition must include \
at least one 'answer' tag")
xml_object_copy = deepcopy(xml_object)
answers = []
for element_answer in xml_object_copy.findall(cls._child_tag_name):
answer_id = element_answer.get('id', None)
if answer_id:
answers.append({
'id': answer_id,
'text': stringify_children(element_answer)
})
xml_object_copy.remove(element_answer)
definition = {
'answers': answers,
'question': stringify_children(xml_object_copy)
}
children = []
return (definition, children)
def definition_to_xml(self, resource_fs):
"""Return an xml element representing to this definition."""
poll_str = u'<{tag_name}>{text}</{tag_name}>'.format(
tag_name=self._tag_name, text=self.question)
xml_object = etree.fromstring(poll_str)
xml_object.set('display_name', self.display_name)
def add_child(xml_obj, answer):
# Escape answer text before adding to xml tree.
answer_text = unicode(Text(answer['text']))
child_str = u'<{tag_name} id="{id}">{text}</{tag_name}>'.format(
tag_name=self._child_tag_name, id=answer['id'],
text=answer_text)
child_node = etree.fromstring(child_str)
xml_object.append(child_node)
for answer in self.answers:
add_child(xml_object, answer)
return xml_object
| agpl-3.0 |
anzev/mingus | unittest/test_NoteContainers.py | 1 | 6071 | import sys
sys.path += ["../"]
from mingus.containers.NoteContainer import NoteContainer
from mingus.containers.Note import Note
import unittest
class test_NoteContainers(unittest.TestCase):
def setUp(self):
self.n1 = NoteContainer()
self.n2 = NoteContainer("A")
self.n3 = NoteContainer(["A", "C", "E"])
self.n4 = NoteContainer(["A", "C", "E", "F", "G"])
self.n5 = NoteContainer(["A", "C", "E", "F", "G", "A"])
def test_add_note(self):
self.assertEqual(self.n2 , self.n2.add_note("A"))
self.assertEqual(NoteContainer("A"), self.n1.add_note("A"))
self.n1 - "A"
self.assertEqual(self.n3 + ["F", "G"], self.n4)
self.assertEqual(self.n2 + ["C", "E"], self.n3 - ["F", "G"])
self.n2 - ["C", "E"]
def test_add_notes(self):
self.assertEqual(self.n3, self.n1.add_notes(["A", "C", "E"]))
self.n1.empty()
self.assertEqual(self.n3, self.n1.add_notes([["A", 4], ["C", 5], ["E", 5]]))
self.n1.empty()
self.assertEqual(self.n2, self.n1.add_notes(Note("A")))
self.n1.empty()
self.assertEqual(self.n2, self.n1.add_notes([Note("A")]))
self.n1.empty()
self.assertEqual(self.n2, self.n1.add_notes("A"))
self.n1.empty()
self.assertEqual(self.n3, self.n2 + NoteContainer([["C", 5], ["E", 5]]))
self.n2 = NoteContainer("A")
def test_remove_note(self):
n = NoteContainer(["C", "E", "G"])
n.remove_note("C")
self.assertEqual(NoteContainer(["E", "G"]), n)
n.remove_note("E")
self.assertEqual(NoteContainer(["G"]), n)
n.remove_note("G")
self.assertEqual(NoteContainer([]), n)
def test_determine(self):
n = NoteContainer(["C", "E", "G"])
self.assertEqual(["C major triad"], n.determine())
n.transpose("3")
self.assertEqual(["E major triad"], n.determine())
def test_remove_notes(self):
pass
def test_sort(self):
n1 = NoteContainer(["Eb", "Gb", "C"])
n2 = NoteContainer(["Eb", "Gb", "Cb"])
n1.sort()
n2.sort()
self.assertEqual(Note("Eb"), n1[0])
self.assertEqual(Note("Gb"), n2[1])
def test_getitem(self):
self.assertEqual(self.n2[0], Note("A"))
self.assertEqual(self.n3[0], Note("A"))
self.assertEqual(self.n4[0], Note("A"))
self.assertEqual(self.n4[1], Note("C", 5))
self.assertEqual(self.n4[2], Note("E", 5))
def test_transpose(self):
n = NoteContainer(["C", "E", "G"])
self.assertEqual(NoteContainer(["E", "G#", "B"]), n.transpose("3"))
n = NoteContainer(["C-6", "E-4", "G-2"])
self.assertEqual(NoteContainer(["E-6", "G#-4", "B-2"]), n.transpose("3"))
def test_get_note_names(self):
self.assertEqual(['A', 'C', 'E'], self.n3.get_note_names())
self.assertEqual(['A', 'C', 'E', 'F', 'G'], self.n4.get_note_names())
self.assertEqual(['A', 'C', 'E', 'F', 'G'], self.n5.get_note_names())
def test_from_chord_shorthand(self):
self.assertEqual(self.n3, NoteContainer().from_chord_shorthand("Am"))
def test_from_progression_shorthand(self):
self.assertEqual(self.n3, NoteContainer().from_progression_shorthand("VI"))
def test_from_interval_shorthand(self):
self.assertEqual(NoteContainer(['C-4', 'G-4']), NoteContainer().from_interval_shorthand("C", "5"))
self.assertEqual(NoteContainer(['F-3', 'C-4']), NoteContainer().from_interval_shorthand("C", "5", False))
def test_is_consonant(self):
self.assert_(NoteContainer().from_chord("Am").is_consonant())
self.assert_(NoteContainer().from_chord("C").is_consonant())
self.assert_(NoteContainer().from_chord("G").is_consonant())
self.assert_(NoteContainer().from_chord("Dm").is_consonant())
self.assert_(NoteContainer().from_chord("E").is_consonant())
self.assert_(not NoteContainer().from_chord("E7").is_consonant())
self.assert_(not NoteContainer().from_chord("Am7").is_consonant())
self.assert_(not NoteContainer().from_chord("Gdim").is_consonant())
def test_is_perfect_consonant(self):
self.assert_(NoteContainer(['A', 'E']).is_perfect_consonant())
self.assert_(NoteContainer(['A-4', 'A-6']).is_perfect_consonant())
self.assert_(NoteContainer(['A', 'D']).is_perfect_consonant())
self.assert_(not NoteContainer(['A', 'D']).is_perfect_consonant(False))
self.assert_(not NoteContainer().from_chord("Am").is_perfect_consonant())
self.assert_(not NoteContainer().from_chord("C").is_perfect_consonant())
self.assert_(not NoteContainer().from_chord("G").is_perfect_consonant())
self.assert_(not NoteContainer().from_chord("Dm").is_perfect_consonant())
self.assert_(not NoteContainer().from_chord("E").is_perfect_consonant())
def test_is_imperfect_consonant(self):
self.assert_(NoteContainer(['A', 'C']).is_imperfect_consonant())
self.assert_(NoteContainer(['A', 'C#']).is_imperfect_consonant())
self.assert_(NoteContainer(['A', 'F']).is_imperfect_consonant())
self.assert_(NoteContainer(['A', 'F#']).is_imperfect_consonant())
self.assert_(not NoteContainer(['A', 'B']).is_imperfect_consonant())
self.assert_(not NoteContainer(['A', 'E']).is_imperfect_consonant())
self.assert_(not NoteContainer(['A-4', 'A-5']).is_imperfect_consonant())
def test_is_dissonant(self):
self.assert_(NoteContainer().from_chord("E7").is_dissonant())
self.assert_(NoteContainer().from_chord("Am7").is_dissonant())
self.assert_(NoteContainer().from_chord("Gdim").is_dissonant())
self.assert_(not NoteContainer().from_chord("Am").is_dissonant())
self.assert_(not NoteContainer().from_chord("C").is_dissonant())
self.assert_(not NoteContainer().from_chord("G").is_dissonant())
self.assert_(not NoteContainer().from_chord("Dm").is_dissonant())
def suite():
return unittest.TestLoader().loadTestsFromTestCase(test_NoteContainers)
| gpl-3.0 |
qk4l/Flexget | flexget/tests/test_digest.py | 9 | 2954 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
class TestDigest(object):
config = """
tasks:
digest 1:
mock:
- title: entry 1
accept_all: yes
digest: aoeu
digest 2:
mock:
- title: entry 2
accept_all: yes
digest: aoeu
digest multi run:
mock:
- title: entry 1
- title: entry 2
accept_all: yes
limit_new: 1
digest: aoeu
many entries:
mock:
- title: entry 1
- title: entry 2
- title: entry 3
- title: entry 4
- title: entry 5
accept_all: yes
digest: aoeu
different states:
mock:
- title: accepted
- title: rejected
regexp:
accept:
- accepted
reject:
- rejected
digest:
list: aoeu
state: ['accepted', 'rejected']
emit digest:
from_digest:
list: aoeu
seen: local
emit state:
from_digest:
list: aoeu
restore_state: yes
seen: local
emit limit:
from_digest:
list: aoeu
limit: 3
seen: local
"""
def test_multiple_task_merging(self, execute_task):
execute_task('digest 1')
execute_task('digest 2')
task = execute_task('emit digest')
assert len(task.all_entries) == 2
def test_same_task_merging(self, execute_task):
execute_task('digest multi run')
execute_task('digest multi run')
task = execute_task('emit digest')
assert len(task.all_entries) == 2
def test_expire(self, execute_task):
execute_task('digest 1')
task = execute_task('emit digest')
assert len(task.all_entries) == 1
task = execute_task('emit digest')
assert len(task.all_entries) == 0
def test_limit(self, execute_task):
execute_task('many entries')
task = execute_task('emit limit')
assert len(task.all_entries) == 3
def test_different_states(self, execute_task):
execute_task('different states')
task = execute_task('emit digest')
assert len(task.all_entries) == 2
for entry in task.all_entries:
assert entry.undecided, 'Should have been emitted in undecided state'
def test_restore_state(self, execute_task):
execute_task('different states')
task = execute_task('emit state')
for entry in task.all_entries:
assert entry.state == entry['title'], 'Should have been emitted in same state as when digested'
| mit |
ahmedaljazzar/edx-platform | common/djangoapps/third_party_auth/migrations/0009_auto_20170415_1144.py | 25 | 1933 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0008_auto_20170413_1455'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='max_session_length',
field=models.PositiveIntegerField(default=None, help_text='If this option is set, then users logging in using this SSO provider will have their session length limited to no longer than this value. If set to 0 (zero), the session will expire upon the user closing their browser. If left blank, the Django platform session default length will be used.', null=True, verbose_name=b'Max session length (seconds)', blank=True),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='max_session_length',
field=models.PositiveIntegerField(default=None, help_text='If this option is set, then users logging in using this SSO provider will have their session length limited to no longer than this value. If set to 0 (zero), the session will expire upon the user closing their browser. If left blank, the Django platform session default length will be used.', null=True, verbose_name=b'Max session length (seconds)', blank=True),
),
migrations.AddField(
model_name='samlproviderconfig',
name='max_session_length',
field=models.PositiveIntegerField(default=None, help_text='If this option is set, then users logging in using this SSO provider will have their session length limited to no longer than this value. If set to 0 (zero), the session will expire upon the user closing their browser. If left blank, the Django platform session default length will be used.', null=True, verbose_name=b'Max session length (seconds)', blank=True),
),
]
| agpl-3.0 |
gimli-org/gimli | pygimli/physics/SIP/importData.py | 1 | 15351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Import/Export for SIP data."""
import codecs
from datetime import datetime
import numpy as np
import re
import pygimli as pg
def load(fileName, verbose=False, **kwargs):
"""Shortcut to load SIP spectral data.
Import Data and try to assume the file format.
Parameters
----------
fileName: str
Returns
-------
freqs, amp, phi : np.array
Frequencies, amplitudes and phases phi in neg. radiant
"""
firstLine = None
with codecs.open(fileName, 'r', encoding='iso-8859-15',
errors='replace') as fi:
firstLine = fi.readline()
f, amp, phi = None, None, None
fnLow = fileName.lower()
if 'SIP Fuchs III' in firstLine:
if verbose:
pg.info("Reading SIP Fuchs III file")
f, amp, phi, header = readFuchs3File(fileName,
verbose=verbose, **kwargs)
phi *= -np.pi/180.
# print(header) # not used?
elif 'SIP-Quad' in firstLine:
if verbose:
pg.info("Reading SIP Quad file")
f, amp, phi, header = readFuchs3File(fileName,
verbose=verbose, **kwargs)
phi *= -np.pi/180.
elif 'SIP-Fuchs' in firstLine:
if verbose:
pg.info("Reading SIP Fuchs file")
f, amp, phi, drhoa, dphi = readRadicSIPFuchs(fileName,
verbose=verbose, **kwargs)
phi *= -np.pi/180.
elif fnLow.endswith('.txt') or fnLow.endswith('.csv'):
f, amp, phi = readTXTSpectrum(filename)
amp *= 1.0 # scale it with k if available
else:
raise Exception("Don't know how to read data.")
return f, amp, phi
def fstring(fri):
"""Format frequency to human-readable (mHz or kHz)."""
if fri > 1e3:
fstr = '{:d} kHz'.format(int(np.round(fri/1e3)))
elif fri < 1.:
fstr = '{:d} mHz'.format(int(np.round(fri*1e3)))
elif fri < 10.:
fstr = '{:3.1f} Hz'.format(fri)
elif fri < 100.:
fstr = '{:4.1f} Hz'.format(fri)
else:
fstr = '{:d} Hz'.format(int(np.round(fri)))
return fstr
def readTXTSpectrum(filename):
"""Read spectrum from ZEL device output (txt) data file."""
fid = open(filename)
lines = fid.readlines()
fid.close()
f, amp, phi = [], [], []
for line in lines[1:]:
snums = line.replace(';', ' ').split()
if len(snums) > 3:
f.append(float(snums[0]))
amp.append(float(snums[1]))
phi.append(-float(snums[3]))
else:
break
return np.asarray(f), np.asarray(amp), np.asarray(phi)
def readFuchs3File(resfile, k=1.0, verbose=False):
"""Read Fuchs III (SIP spectrum) data file.
Parameters
----------
k : float
Overwrite internal geometric factor from device.
"""
activeBlock = ''
header = {}
LINE = []
dataAct = False
with codecs.open(resfile, 'r', encoding='iso-8859-15', errors='replace') as f:
for line in f:
line = line.replace('\r\n', '\n') # correct for carriage return
if dataAct:
LINE.append(line)
if len(line) < 2:
f, amp, phi, kIn = [], [], [], []
for li in LINE:
sline = li.split()
if len(sline) > 12:
fi = float(sline[11])
if np.isfinite(fi):
f.append(fi)
amp.append(float(sline[12]))
phi.append(float(sline[13]))
kIn.append(float(sline[9]))
if k != 1.0 and verbose is True:
pg.info("Geometric value changed to:", k)
return np.array(f), np.array(amp)/np.array(kIn) * k, \
np.array(phi), header
elif len(line):
if line.rfind('Current') >= 0:
if dataAct:
break
else:
dataAct = True
if line[0] == '[':
token = line[1:line.rfind(']')].replace(' ', '_')
if token[:3] == 'End':
header[activeBlock] = np.array(header[activeBlock])
activeBlock = ''
elif token[:5] == 'Begin':
activeBlock = token[6:]
header[activeBlock] = []
else:
value = line[line.rfind(']') + 1:]
try: # direct line information
if '.' in value:
num = float(value)
else:
num = int(value)
header[token] = num
except BaseException as e:
# maybe beginning or end of a block
#print(e)
pass
else:
if activeBlock:
nums = np.array(line.split(), dtype=float)
header[activeBlock].append(nums)
def readRadicSIPFuchs(filename, readSecond=False, delLast=True):
"""Read SIP-Fuchs Software rev.: 070903
Read Radic instrument res file containing a single spectrum.
Please note the apparent resistivity value might be scaled with the
real geometric factor. Default is 1.0.
Parameters
----------
filename : string
readSecond: bool [False]
Read the first data block[default] or read the second that
consists in the file.
delLast : bool [True]
??
Returns
-------
fr : array [float]
Measured frequencies
rhoa : array [float]
Measured apparent resistivties
phi : array [float]
Measured phases
drhoa : array [float]
Measured apparent resistivties error
phi : array [float]
Measured phase error
"""
with codecs.open(resfile, 'r', encoding='iso-8859-15', errors='replace') as f:
line = f.readline()
fr = []
rhoa = []
phi = []
drhoa = []
dphi = []
while True:
line = f.readline()
if line.rfind('Freq') > -1:
break
return
if readSecond:
while True:
if f.readline().rfind('Freq') > -1:
break
while True:
line = f.readline()
b = line.split('\t')
if len(b) < 5:
break
fr.append(float(b[0]))
rhoa.append(float(b[1]))
phi.append(-float(b[2]) * np.pi / 180.)
drhoa.append(float(b[3]))
dphi.append(float(b[4]) * np.pi / 180.)
f.close()
if delLast:
fr.pop(0)
rhoa.pop(0)
phi.pop(0)
drhoa.pop(0)
dphi.pop(0)
return np.array(fr), np.array(rhoa), np.array(phi), np.array(drhoa), np.array(dphi)
def toTime(t, d):
""" convert time format into timestamp
11:08:02, 21/02/2019
"""
tim = [int(_t) for _t in t.split(':')]
if '/' in d: # 03/02/1975
day = [int(_t) for _t in d.split('/')]
dt = datetime(year=day[2], month=day[1], day=day[0],
hour=tim[0], minute=tim[1], second=tim[2])
elif '.' in d: # 03.02.1975
day = [int(_t) for _t in d.split('.')]
dt = datetime(year=day[2], month=day[1], day=day[0],
hour=tim[0], minute=tim[1], second=tim[2])
else: # 1975-02-03
day = [int(_t) for _t in d.split('-')]
dt = datetime(year=day[0], month=day[1], day=day[2],
hour=tim[0], minute=tim[1], second=tim[2])
return dt.timestamp()
def readSIP256file(resfile, verbose=False):
"""Read SIP256 file (RES format) - mostly used for 2d SIP by pybert.sip.
Read SIP256 file (RES format) - mostly used for 2d SIP by pybert.sip.
Parameters
----------
filename: str
*.RES file (SIP256 raw output file)
verbose: bool
do some output [False]
Returns
-------
header - dictionary of measuring setup
DATA - data AB-list of MN-list of matrices with f, amp, phi, dAmp, dPhi
AB - list of current injection
RU - list of remote units
Examples
--------
header, DATA, AB, RU = readSIP256file('myfile.res', True)
"""
activeBlock = ''
header = {}
LINE = []
dataAct = False
with codecs.open(resfile, 'r', encoding='iso-8859-15',
errors='replace') as fi:
content = fi.readlines()
for line in content:
if dataAct:
LINE.append(line)
elif len(line):
if line[0] == '[':
token = line[1:line.rfind(']')].replace(' ', '_')
# handle early 256D software bug
if 'FrequencyParameterBegin' in token:
token = token.replace('FrequencyParameterBegin',
'Begin_FrequencyParameter')
if 'FrequencyParameterEnd' in token:
token = token.replace('FrequencyParameterEnd',
'End_FrequencyParameter')
if token.replace(' ', '_') == 'Messdaten_SIP256':
dataAct = True
elif 'Messdaten' in token:
# res format changed into SIP256D .. so we are a
# little bit more flexible with this.
dataAct = True
elif token[:3] == 'End':
header[activeBlock] = np.array(header[activeBlock])
activeBlock = ''
elif token[:5] == 'Begin':
activeBlock = token[6:]
header[activeBlock] = []
else:
value = line[line.rfind(']') + 1:]
try: # direct line information
if '.' in value:
num = float(value)
else:
try:
num = int(value)
except:
num = 0
pass
header[token] = num
except BaseException as e:
# maybe beginning or end of a block
print(e)
else:
if activeBlock:
nums = np.array(line.split(), dtype=float)
header[activeBlock].append(nums)
DATA, dReading, dFreq, AB, RU, ru = [], [], [], [], [], []
tMeas = []
for i, line in enumerate(LINE):
# print(i, line)
line = line.replace(' nc ', ' 0 ') # no calibration should 0
line = line.replace(' c ', ' 1 ') # calibration should 1
# sline = line.split()
sline = line.rstrip('\r\n').split()
if line.find('Reading') == 0:
rdno = int(sline[1])
if rdno > 0:
AB.append((int(sline[4]), int(sline[6])))
if ru:
RU.append(ru)
ru = []
if rdno > 1 and dReading:
dReading.append(np.array(dFreq))
DATA.append(dReading)
pg.verbose('Reading {0}:{1} RUs'.format(rdno-1, len(dReading)))
dReading, dFreq = [], []
elif line.find('Remote Unit') == 0:
ru.append(int(sline[2]))
if dFreq:
dReading.append(np.array(dFreq))
dFreq = []
elif line.find('Freq') >= 0:
pass
elif len(sline) > 1 and rdno > 0: # some data present
# search for two numbers (with .) without a space inbetween
# variant 1: do it for every part
for i, ss in enumerate(sline):
if re.search('\.20[01][0-9]', ss) is None: # no date
fd = re.search('\.[0-9-]*\.', ss)
if fd:
if '-' in ss[1:]:
bpos = ss[1:].find('-') + 1
else:
bpos = fd.start() + 4
# print(ss[:bpos], ss[bpos:])
sline.insert(i, ss[:bpos])
sline[i+1] = ss[bpos:]
# print(sline)
fd = re.search('NaN[0-9-]*\.', ss)
if fd:
if '-' in ss[1:]:
bpos = ss.find('-')
else:
bpos = fd.start() + 3
# print(ss[:bpos], ss[bpos:])
sline.insert(i, ss[:bpos])
sline[i+1] = ss[bpos:]
# print(sline)
# variant 2: do it on whole line
# cdate = re.search('\.20[01][0-9]', line)
# if cdate:
# n2000 = cdate.start()
# else:
# n2000 = len(line)
# print(sline)
# concnums = re.search('\.[0-9-]*\.', line[:n2000])
# while concnums:
# bpos = concnums.span()[0] + 4
# line = line[:bpos] + ' ' + line[bpos:]
# n2000 += 1
# concnums = re.search('\.[0-9-]*\.', line[:n2000])
# sline = line.rstrip('\r\n').split()
# print(sline)
# if re.search('[0-9]-', line[:85]): # missing whitespace before -
# sline = re.sub('[0-9]-', '5 -', line).split()
# not a good idea for dates
for c in range(7): # this is expensive .. do we really need this?
if len(sline[c]) > 15: # too long line / missing space
if c == 0:
part1 = sline[c][:-15]
part2 = sline[c][-15:] # [10:]
else:
part1 = sline[c][:-10]
part2 = sline[c][-10:] # [11:]
sline = sline[:c] + [part1] + [part2] + sline[c + 1:]
if sline[c].find('c') >= 0:
sline[c] = '1.0'
#Frequency /Hz RA/Ohmm PA/� ERA/% EPA/� Cal? IA/mA K.-F./m Gains Time/h:m:s Date/d.m.y
#20000.00000000 0.4609 -6.72598 0.02234 0.01280 1 20.067 1.00 0 11:08:02 21/02/2019
try:
dFreq.append(
np.array(sline[:8] + [toTime(sline[-2], sline[-1])],
dtype=float))
except:
# dFreq.append(np.array(sline[:8], dtype=float))
print(i, line, sline)
raise ImportError()
dReading.append(np.array(dFreq))
DATA.append(dReading)
pg.verbose('Reading {0}:{1} RUs'.format(rdno, len(dReading)))
return header, DATA, AB, RU
if __name__ == "__main__":
pass
| apache-2.0 |
brewster76/fuzzy-archer | bin/user/historygenerator.py | 1 | 17207 | #
# Copyright (c) 2013-2016 Nick Dajda <nick.dajda@gmail.com>
#
# Distributed under the terms of the GNU GENERAL PUBLIC LICENSE
#
"""Extends the Cheetah generator search list to add html historic data tables in a nice colour scheme.
Tested on Weewx release 4.0.0.
Works with all databases.
Observes the units of measure and display formats specified in skin.conf.
WILL NOT WORK with Weewx prior to release 3.0.
-- Use this version for 2.4 - 2.7: https://github.com/brewster76/fuzzy-archer/releases/tag/v2.0
To use it, add this generator to search_list_extensions in skin.conf:
[CheetahGenerator]
search_list_extensions = user.historygenerator.MyXSearch
1) The $alltime tag:
Allows tags such as $alltime.outTemp.max for the all-time max
temperature, or $seven_day.rain.sum for the total rainfall in the last
seven days.
2) Nice colourful tables summarising history data by month and year:
Adding the section below to your skins.conf file will create these new tags:
$min_temp_table
$max_temp_table
$avg_temp_table
$rain_table
############################################################################################
#
# HTML month/year colour coded summary table generator
#
[HistoryReport]
# minvalues, maxvalues and colours should contain the same number of elements.
#
# For example, the [min_temp] example below, if the minimum temperature measured in
# a month is between -50 and -10 (degC) then the cell will be shaded in html colour code #0029E5.
#
# colours = background colour
# fontColours = foreground colour [optional, defaults to black if omitted]
# Default is temperature scale
minvalues = -50, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35
maxvalues = -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 60
colours = "#0029E5", "#0186E7", "#02E3EA", "#04EC97", "#05EF3D2, "#2BF207", "#8AF408", "#E9F70A", "#F9A90B", "#FC4D0D", "#FF0F2D"
fontColours = "#FFFFFF", "#FFFFFF", "#000000", "#000000", "#000000", "#000000", "#000000", "#000000", "#FFFFFF", "#FFFFFF", "#FFFFFF"
monthnames = Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec
# The Raspberry Pi typically takes 15+ seconds to calculate all the summaries with a few years of weather date.
# refresh_interval is how often in minutes the tables are calculated.
refresh_interval = 60
[[min_temp]] # Create a new Cheetah tag which will have a _table suffix: $min_temp_table
obs_type = outTemp # obs_type can be any weewx observation, e.g. outTemp, barometer, wind, ...
aggregate_type = min # Any of these: 'sum', 'count', 'avg', 'max', 'min'
[[max_temp]]
obs_type = outTemp
aggregate_type = max
[[avg_temp]]
obs_type = outTemp
aggregate_type = avg
[[rain]]
obs_type = rain
aggregate_type = sum
data_binding = alternative_binding
# Override default temperature colour scheme with rain specific scale
minvalues = 0, 25, 50, 75, 100, 150
maxvalues = 25, 50, 75, 100, 150, 1000
colours = "#E0F8E0", "#A9F5A9", "#58FA58", "#2EFE2E", "#01DF01", "#01DF01"
fontColours = "#000000", "#000000", "#000000", "#000000", "#000000", "#000000"
"""
from datetime import datetime
import time
import syslog
import os.path
from weewx.cheetahgenerator import SearchList
from weewx.tags import TimespanBinder
import weeutil.weeutil
class MyXSearch(SearchList):
def __init__(self, generator):
SearchList.__init__(self, generator)
self.table_dict = generator.skin_dict['HistoryReport']
# Calculate the tables once every refresh_interval mins
self.refresh_interval = int(self.table_dict.get('refresh_interval', 5))
self.cache_time = 0
self.search_list_extension = {}
# Make bootstrap specific labels in config file available to
if 'BootstrapLabels' in generator.skin_dict:
self.search_list_extension['BootstrapLabels'] = generator.skin_dict['BootstrapLabels']
else:
syslog.syslog(syslog.LOG_DEBUG, "%s: No bootstrap specific labels found" % os.path.basename(__file__))
# Make observation labels available to templates
if 'Labels' in generator.skin_dict:
self.search_list_extension['Labels'] = generator.skin_dict['Labels']
else:
syslog.syslog(syslog.LOG_DEBUG, "%s: No observation labels found" % os.path.basename(__file__))
def get_extension_list(self, valid_timespan, db_lookup):
"""For weewx V3.x extensions. Should return a list
of objects whose attributes or keys define the extension.
valid_timespan: An instance of weeutil.weeutil.TimeSpan. This will hold the
start and stop times of the domain of valid times.
db_lookup: A function with call signature db_lookup(data_binding), which
returns a database manager and where data_binding is an optional binding
name. If not given, then a default binding will be used.
"""
# Time to recalculate?
if (time.time() - (self.refresh_interval * 60)) > self.cache_time:
self.cache_time = time.time()
#
# The html history tables
#
t1 = time.time()
ngen = 0
for table in self.table_dict.sections:
noaa = True if table == 'NOAA' else False
table_options = weeutil.weeutil.accumulateLeaves(self.table_dict[table])
# Get the binding where the data is allocated
binding = table_options.get('data_binding', 'wx_binding')
#
# The all time statistics
#
# If this generator has been called in the [SummaryByMonth] or [SummaryByYear]
# section in skin.conf then valid_timespan won't contain enough history data for
# the colourful summary tables. Use the data binding provided as table option.
alltime_timespan = weeutil.weeutil.TimeSpan(db_lookup(data_binding=binding).first_timestamp, db_lookup(data_binding=binding).last_timestamp)
# First, get a TimeSpanStats object for all time. This one is easy
# because the object valid_timespan already holds all valid times to be
# used in the report. se the data binding provided as table option.
all_stats = TimespanBinder(alltime_timespan, db_lookup, data_binding=binding, formatter=self.generator.formatter,
converter=self.generator.converter)
# Now create a small dictionary with keys 'alltime' and 'seven_day':
self.search_list_extension['alltime'] = all_stats
# Show all time unless starting date specified
startdate = table_options.get('startdate', None)
if startdate is not None:
table_timespan = weeutil.weeutil.TimeSpan(int(startdate), db_lookup(binding).last_timestamp)
table_stats = TimespanBinder(table_timespan, db_lookup, data_binding=binding, formatter=self.generator.formatter,
converter=self.generator.converter)
else:
table_stats = all_stats
table_name = table + '_table'
self.search_list_extension[table_name] = self._statsHTMLTable(table_options, table_stats, table_name, binding, NOAA=noaa)
ngen += 1
t2 = time.time()
syslog.syslog(syslog.LOG_INFO, "%s: Generated %d tables in %.2f seconds" %
(os.path.basename(__file__), ngen, t2 - t1))
return [self.search_list_extension]
def _parseTableOptions(self, table_options, table_name):
"""Create an orderly list containing lower and upper thresholds, cell background and foreground colors
"""
# Check everything's the same length
l = len(table_options['minvalues'])
for i in [table_options['maxvalues'], table_options['colours']]:
if len(i) != l:
syslog.syslog(syslog.LOG_INFO, "%s: minvalues, maxvalues and colours must have the same number of elements in table: %s"
% (os.path.basename(__file__), table_name))
return None
font_color_list = table_options['fontColours'] if 'fontColours' in table_options else ['#000000'] * l
return list(zip(table_options['minvalues'], table_options['maxvalues'], table_options['colours'], font_color_list))
def _statsHTMLTable(self, table_options, table_stats, table_name, binding, NOAA=False):
"""
table_options: Dictionary containing skin.conf options for particluar table
all_stats: Link to all_stats TimespanBinder
"""
cellColours = self._parseTableOptions(table_options, table_name)
summary_column = weeutil.weeutil.to_bool(table_options.get("summary_column", False))
if None is cellColours:
# Give up
return None
if NOAA is True:
unit_formatted = ""
else:
obs_type = table_options['obs_type']
aggregate_type = table_options['aggregate_type']
converter = table_stats.converter
# obs_type
readingBinder = getattr(table_stats, obs_type)
# Some aggregate come with an argument
if aggregate_type in ['max_ge', 'max_le', 'min_le', 'sum_ge']:
try:
threshold_value = float(table_options['aggregate_threshold'][0])
except KeyError:
syslog.syslog(syslog.LOG_INFO, "%s: Problem with aggregate_threshold. Should be in the format: [value], [units]" %
(os.path.basename(__file__)))
return "Could not generate table %s" % table_name
threshold_units = table_options['aggregate_threshold'][1]
try:
reading = getattr(readingBinder, aggregate_type)((threshold_value, threshold_units))
except IndexError:
syslog.syslog(syslog.LOG_INFO, "%s: Problem with aggregate_threshold units: %s" % (os.path.basename(__file__),
str(threshold_units)))
return "Could not generate table %s" % table_name
else:
try:
reading = getattr(readingBinder, aggregate_type)
except KeyError:
syslog.syslog(syslog.LOG_INFO, "%s: aggregate_type %s not found" % (os.path.basename(__file__),
aggregate_type))
return "Could not generate table %s" % table_name
try:
unit_type = reading.converter.group_unit_dict[reading.value_t[2]]
except KeyError:
syslog.syslog(syslog.LOG_INFO, "%s: obs_type %s no unit found" % (os.path.basename(__file__),
obs_type))
unit_formatted = ''
# 'units' option in skin.conf?
if 'units' in table_options:
unit_formatted = table_options['units']
else:
if (unit_type == 'count'):
unit_formatted = "Days"
else:
if unit_type in reading.formatter.unit_label_dict:
unit_formatted = reading.formatter.unit_label_dict[unit_type]
# For aggregrate types which return number of occurrences (e.g. max_ge), set format to integer
# Don't catch error here - we absolutely need the string format
if unit_type == 'count':
format_string = '%d'
else:
format_string = reading.formatter.unit_format_dict[unit_type]
htmlText = '<table class="table">'
htmlText += " <thead>"
htmlText += " <tr>"
htmlText += " <th>%s</th>" % unit_formatted
for mon in table_options.get('monthnames', ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']):
htmlText += " <th>%s</th>" % mon
if summary_column:
if 'summary_heading' in table_options:
htmlText += " <th></th>"
htmlText += " <th align=\"center\">%s</th>\n" % table_options['summary_heading']
htmlText += " </tr>"
htmlText += " </thead>"
htmlText += " <tbody>"
for year in table_stats.years():
year_number = datetime.fromtimestamp(year.timespan[0]).year
htmlLine = (' ' * 8) + "<tr>\n"
if NOAA is True:
htmlLine += (' ' * 12) + "%s\n" % \
self._NoaaYear(datetime.fromtimestamp(year.timespan[0]), table_options)
else:
htmlLine += (' ' * 12) + "<td>%d</td>\n" % year_number
for month in year.months():
if NOAA is True:
#for property, value in vars(month.dateTime.value_t[0]).iteritems():
# print property, ": ", value
if (month.timespan[1] < table_stats.timespan.start) or (month.timespan[0] > table_stats.timespan.stop):
# print "No data for... %d, %d" % (year_number, datetime.fromtimestamp(month.timespan[0]).month)
htmlLine += "<td>-</td>\n"
else:
htmlLine += self._NoaaCell(datetime.fromtimestamp(month.timespan[0]), table_options)
else:
# update the binding to access the right DB
obsMonth = getattr(month, obs_type)
obsMonth.data_binding = binding;
if unit_type == 'count':
try:
value = getattr(obsMonth, aggregate_type)((threshold_value, threshold_units)).value_t
except:
value = [0, 'count']
else:
value = converter.convert(getattr(obsMonth, aggregate_type).value_t)
htmlLine += (' ' * 12) + self._colorCell(value[0], format_string, cellColours)
if summary_column:
obsYear = getattr(year, obs_type)
obsYear.data_binding = binding;
if unit_type == 'count':
try:
value = getattr(obsYear, aggregate_type)((threshold_value, threshold_units)).value_t
except:
value = [0, 'count']
else:
value = converter.convert(getattr(obsYear, aggregate_type).value_t)
htmlLine += (' ' * 12) + "<td></td>\n"
htmlLine += (' ' * 12) + self._colorCell(value[0], format_string, cellColours, center=True)
htmlLine += (' ' * 8) + "</tr>\n"
htmlText += htmlLine
htmlText += (' ' * 8) + "</tr>\n"
htmlText += (' ' * 4) + "</tbody>\n"
htmlText += "</table>\n"
return htmlText
def _colorCell(self, value, format_string, cellColours, center=False):
"""Returns a '<td style= background-color: XX; color: YY"> z.zz </td>' html table entry string.
value: Numeric value for the observation
format_string: How the numberic value should be represented in the table cell.
cellColours: An array containing 4 lists. [minvalues], [maxvalues], [background color], [foreground color]
"""
cellText = "<td"
if center:
cellText += " align=\"center\""
if value is not None:
for c in cellColours:
if (value >= float(c[0])) and (value <= float(c[1])):
cellText += " style=\"background-color:%s; color:%s\"" % (c[2], c[3])
formatted_value = format_string % value
cellText += "> %s </td>\n" % formatted_value
else:
cellText += ">-</td>\n"
return cellText
def _NoaaCell(self, dt, table_options):
cellText = '<td> <a href="%s" class="btn btn-default btn-xs active" role="button"> %s </a> </td>' % \
(dt.strftime(table_options['month_filename']), dt.strftime("%m-%y"))
return cellText
def _NoaaYear(self, dt, table_options):
cellText = '<td> <a href="%s" class="btn btn-primary btn-xs active" role="button"> %s </a> </td>' % \
(dt.strftime(table_options['year_filename']), dt.strftime("%Y"))
return cellText
| gpl-2.0 |
PeterWangIntel/chromium-crosswalk | third_party/google_input_tools/third_party/closure_library/closure/bin/build/depstree_test.py | 354 | 3692 | #!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for depstree."""
__author__ = 'nnaze@google.com (Nathan Naze)'
import unittest
import depstree
def _GetProvides(sources):
"""Get all namespaces provided by a collection of sources."""
provides = set()
for source in sources:
provides.update(source.provides)
return provides
class MockSource(object):
"""Mock Source file."""
def __init__(self, provides, requires):
self.provides = set(provides)
self.requires = set(requires)
def __repr__(self):
return 'MockSource %s' % self.provides
class DepsTreeTestCase(unittest.TestCase):
"""Unit test for DepsTree. Tests several common situations and errors."""
def AssertValidDependencies(self, deps_list):
"""Validates a dependency list.
Asserts that a dependency list is valid: For every source in the list,
ensure that every require is provided by a source earlier in the list.
Args:
deps_list: A list of sources that should be in dependency order.
"""
for i in range(len(deps_list)):
source = deps_list[i]
previous_provides = _GetProvides(deps_list[:i])
for require in source.requires:
self.assertTrue(
require in previous_provides,
'Namespace "%s" not provided before required by %s' % (
require, source))
def testSimpleDepsTree(self):
a = MockSource(['A'], ['B', 'C'])
b = MockSource(['B'], [])
c = MockSource(['C'], ['D'])
d = MockSource(['D'], ['E'])
e = MockSource(['E'], [])
tree = depstree.DepsTree([a, b, c, d, e])
self.AssertValidDependencies(tree.GetDependencies('A'))
self.AssertValidDependencies(tree.GetDependencies('B'))
self.AssertValidDependencies(tree.GetDependencies('C'))
self.AssertValidDependencies(tree.GetDependencies('D'))
self.AssertValidDependencies(tree.GetDependencies('E'))
def testCircularDependency(self):
# Circular deps
a = MockSource(['A'], ['B'])
b = MockSource(['B'], ['C'])
c = MockSource(['C'], ['A'])
tree = depstree.DepsTree([a, b, c])
self.assertRaises(depstree.CircularDependencyError,
tree.GetDependencies, 'A')
def testRequiresUndefinedNamespace(self):
a = MockSource(['A'], ['B'])
b = MockSource(['B'], ['C'])
c = MockSource(['C'], ['D']) # But there is no D.
def MakeDepsTree():
return depstree.DepsTree([a, b, c])
self.assertRaises(depstree.NamespaceNotFoundError, MakeDepsTree)
def testDepsForMissingNamespace(self):
a = MockSource(['A'], ['B'])
b = MockSource(['B'], [])
tree = depstree.DepsTree([a, b])
# There is no C.
self.assertRaises(depstree.NamespaceNotFoundError,
tree.GetDependencies, 'C')
def testMultipleRequires(self):
a = MockSource(['A'], ['B'])
b = MockSource(['B'], ['C'])
c = MockSource(['C'], [])
d = MockSource(['D'], ['B'])
tree = depstree.DepsTree([a, b, c, d])
self.AssertValidDependencies(tree.GetDependencies(['D', 'A']))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
cswiercz/sympy | sympy/physics/vector/functions.py | 74 | 23381 | from __future__ import print_function, division
from sympy import (sympify, diff, sin, cos, Matrix, Symbol, integrate,
trigsimp, Function, symbols)
from sympy.core.basic import S
from sympy.core.compatibility import reduce
from .vector import Vector, _check_vector
from .frame import CoordinateSym, _check_frame
from .dyadic import Dyadic
from .printing import vprint, vsprint, vpprint, vlatex, init_vprinting
from sympy.utilities.iterables import iterable
__all__ = ['cross', 'dot', 'express', 'time_derivative', 'outer',
'kinematic_equations', 'get_motion_params', 'partial_velocity',
'dynamicsymbols', 'vprint', 'vsprint', 'vpprint', 'vlatex',
'init_vprinting']
def cross(vec1, vec2):
"""Cross product convenience wrapper for Vector.cross(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Cross product is between two vectors')
return vec1 ^ vec2
cross.__doc__ += Vector.cross.__doc__
def dot(vec1, vec2):
"""Dot product convenience wrapper for Vector.dot(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Dot product is between two vectors')
return vec1 & vec2
dot.__doc__ += Vector.dot.__doc__
def express(expr, frame, frame2=None, variables=False):
"""
Global function for 'express' functionality.
Re-expresses a Vector, scalar(sympyfiable) or Dyadic in given frame.
Refer to the local methods of Vector and Dyadic for details.
If 'variables' is True, then the coordinate variables (CoordinateSym
instances) of other frames present in the vector/scalar field or
dyadic expression are also substituted in terms of the base scalars of
this frame.
Parameters
==========
expr : Vector/Dyadic/scalar(sympyfiable)
The expression to re-express in ReferenceFrame 'frame'
frame: ReferenceFrame
The reference frame to express expr in
frame2 : ReferenceFrame
The other frame required for re-expression(only for Dyadic expr)
variables : boolean
Specifies whether to substitute the coordinate variables present
in expr, in terms of those of frame
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> from sympy.physics.vector import express
>>> express(d, B, N)
cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x)
>>> express(B.x, N)
cos(q)*N.x + sin(q)*N.y
>>> express(N[0], B, variables=True)
B_x*cos(q(t)) - B_y*sin(q(t))
"""
_check_frame(frame)
if expr == 0:
return expr
if isinstance(expr, Vector):
#Given expr is a Vector
if variables:
#If variables attribute is True, substitute
#the coordinate variables in the Vector
frame_list = [x[-1] for x in expr.args]
subs_dict = {}
for f in frame_list:
subs_dict.update(f.variable_map(frame))
expr = expr.subs(subs_dict)
#Re-express in this frame
outvec = Vector([])
for i, v in enumerate(expr.args):
if v[1] != frame:
temp = frame.dcm(v[1]) * v[0]
if Vector.simp:
temp = temp.applyfunc(lambda x:
trigsimp(x, method='fu'))
outvec += Vector([(temp, frame)])
else:
outvec += Vector([v])
return outvec
if isinstance(expr, Dyadic):
if frame2 is None:
frame2 = frame
_check_frame(frame2)
ol = Dyadic(0)
for i, v in enumerate(expr.args):
ol += express(v[0], frame, variables=variables) * \
(express(v[1], frame, variables=variables) |
express(v[2], frame2, variables=variables))
return ol
else:
if variables:
#Given expr is a scalar field
frame_set = set([])
expr = sympify(expr)
#Subsitute all the coordinate variables
for x in expr.free_symbols:
if isinstance(x, CoordinateSym)and x.frame != frame:
frame_set.add(x.frame)
subs_dict = {}
for f in frame_set:
subs_dict.update(f.variable_map(frame))
return expr.subs(subs_dict)
return expr
def time_derivative(expr, frame, order=1):
"""
Calculate the time derivative of a vector/scalar field function
or dyadic expression in given frame.
References
==========
http://en.wikipedia.org/wiki/Rotating_reference_frame#Time_derivatives_in_the_two_frames
Parameters
==========
expr : Vector/Dyadic/sympifyable
The expression whose time derivative is to be calculated
frame : ReferenceFrame
The reference frame to calculate the time derivative in
order : integer
The order of the derivative to be calculated
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> from sympy import Symbol
>>> q1 = Symbol('q1')
>>> u1 = dynamicsymbols('u1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> v = u1 * N.x
>>> A.set_ang_vel(N, 10*A.x)
>>> from sympy.physics.vector import time_derivative
>>> time_derivative(v, N)
u1'*N.x
>>> time_derivative(u1*A[0], N)
N_x*Derivative(u1(t), t)
>>> B = N.orientnew('B', 'Axis', [u1, N.z])
>>> from sympy.physics.vector import outer
>>> d = outer(N.x, N.x)
>>> time_derivative(d, B)
- u1'*(N.y|N.x) - u1'*(N.x|N.y)
"""
t = dynamicsymbols._t
_check_frame(frame)
if order == 0:
return expr
if order % 1 != 0 or order < 0:
raise ValueError("Unsupported value of order entered")
if isinstance(expr, Vector):
outvec = Vector(0)
for i, v in enumerate(expr.args):
if v[1] == frame:
outvec += Vector([(express(v[0], frame,
variables=True).diff(t), frame)])
else:
outvec += time_derivative(Vector([v]), v[1]) + \
(v[1].ang_vel_in(frame) ^ Vector([v]))
return time_derivative(outvec, frame, order - 1)
if isinstance(expr, Dyadic):
ol = Dyadic(0)
for i, v in enumerate(expr.args):
ol += (v[0].diff(t) * (v[1] | v[2]))
ol += (v[0] * (time_derivative(v[1], frame) | v[2]))
ol += (v[0] * (v[1] | time_derivative(v[2], frame)))
return time_derivative(ol, frame, order - 1)
else:
return diff(express(expr, frame, variables=True), t, order)
def outer(vec1, vec2):
"""Outer product convenience wrapper for Vector.outer():\n"""
if not isinstance(vec1, Vector):
raise TypeError('Outer product is between two Vectors')
return vec1 | vec2
outer.__doc__ += Vector.outer.__doc__
def kinematic_equations(speeds, coords, rot_type, rot_order=''):
"""Gives equations relating the qdot's to u's for a rotation type.
Supply rotation type and order as in orient. Speeds are assumed to be
body-fixed; if we are defining the orientation of B in A using by rot_type,
the angular velocity of B in A is assumed to be in the form: speed[0]*B.x +
speed[1]*B.y + speed[2]*B.z
Parameters
==========
speeds : list of length 3
The body fixed angular velocity measure numbers.
coords : list of length 3 or 4
The coordinates used to define the orientation of the two frames.
rot_type : str
The type of rotation used to create the equations. Body, Space, or
Quaternion only
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import kinematic_equations, vprint
>>> u1, u2, u3 = dynamicsymbols('u1 u2 u3')
>>> q1, q2, q3 = dynamicsymbols('q1 q2 q3')
>>> vprint(kinematic_equations([u1,u2,u3], [q1,q2,q3], 'body', '313'),
... order=None)
[-(u1*sin(q3) + u2*cos(q3))/sin(q2) + q1', -u1*cos(q3) + u2*sin(q3) + q2', (u1*sin(q3) + u2*cos(q3))*cos(q2)/sin(q2) - u3 + q3']
"""
# Code below is checking and sanitizing input
approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131',
'212', '232', '313', '323', '1', '2', '3', '')
rot_order = str(rot_order).upper() # Now we need to make sure XYZ = 123
rot_type = rot_type.upper()
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if not isinstance(speeds, (list, tuple)):
raise TypeError('Need to supply speeds in a list')
if len(speeds) != 3:
raise TypeError('Need to supply 3 body-fixed speeds')
if not isinstance(coords, (list, tuple)):
raise TypeError('Need to supply coordinates in a list')
if rot_type.lower() in ['body', 'space']:
if rot_order not in approved_orders:
raise ValueError('Not an acceptable rotation order')
if len(coords) != 3:
raise ValueError('Need 3 coordinates for body or space')
# Actual hard-coded kinematic differential equations
q1, q2, q3 = coords
q1d, q2d, q3d = [diff(i, dynamicsymbols._t) for i in coords]
w1, w2, w3 = speeds
s1, s2, s3 = [sin(q1), sin(q2), sin(q3)]
c1, c2, c3 = [cos(q1), cos(q2), cos(q3)]
if rot_type.lower() == 'body':
if rot_order == '123':
return [q1d - (w1 * c3 - w2 * s3) / c2, q2d - w1 * s3 - w2 *
c3, q3d - (-w1 * c3 + w2 * s3) * s2 / c2 - w3]
if rot_order == '231':
return [q1d - (w2 * c3 - w3 * s3) / c2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (- w2 * c3 + w3 * s3) * s2 / c2]
if rot_order == '312':
return [q1d - (-w1 * s3 + w3 * c3) / c2, q2d - w1 * c3 - w3 *
s3, q3d - (w1 * s3 - w3 * c3) * s2 / c2 - w2]
if rot_order == '132':
return [q1d - (w1 * c3 + w3 * s3) / c2, q2d + w1 * s3 - w3 *
c3, q3d - (w1 * c3 + w3 * s3) * s2 / c2 - w2]
if rot_order == '213':
return [q1d - (w1 * s3 + w2 * c3) / c2, q2d - w1 * c3 + w2 *
s3, q3d - (w1 * s3 + w2 * c3) * s2 / c2 - w3]
if rot_order == '321':
return [q1d - (w2 * s3 + w3 * c3) / c2, q2d - w2 * c3 + w3 *
s3, q3d - w1 - (w2 * s3 + w3 * c3) * s2 / c2]
if rot_order == '121':
return [q1d - (w2 * s3 + w3 * c3) / s2, q2d - w2 * c3 + w3 *
s3, q3d - w1 + (w2 * s3 + w3 * c3) * c2 / s2]
if rot_order == '131':
return [q1d - (-w2 * c3 + w3 * s3) / s2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (w2 * c3 - w3 * s3) * c2 / s2]
if rot_order == '212':
return [q1d - (w1 * s3 - w3 * c3) / s2, q2d - w1 * c3 - w3 *
s3, q3d - (-w1 * s3 + w3 * c3) * c2 / s2 - w2]
if rot_order == '232':
return [q1d - (w1 * c3 + w3 * s3) / s2, q2d + w1 * s3 - w3 *
c3, q3d + (w1 * c3 + w3 * s3) * c2 / s2 - w2]
if rot_order == '313':
return [q1d - (w1 * s3 + w2 * c3) / s2, q2d - w1 * c3 + w2 *
s3, q3d + (w1 * s3 + w2 * c3) * c2 / s2 - w3]
if rot_order == '323':
return [q1d - (-w1 * c3 + w2 * s3) / s2, q2d - w1 * s3 - w2 *
c3, q3d - (w1 * c3 - w2 * s3) * c2 / s2 - w3]
if rot_type.lower() == 'space':
if rot_order == '123':
return [q1d - w1 - (w2 * s1 + w3 * c1) * s2 / c2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / c2]
if rot_order == '231':
return [q1d - (w1 * c1 + w3 * s1) * s2 / c2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / c2]
if rot_order == '312':
return [q1d - (w1 * s1 + w2 * c1) * s2 / c2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / c2]
if rot_order == '132':
return [q1d - w1 - (-w2 * c1 + w3 * s1) * s2 / c2, q2d - w2 *
s1 - w3 * c1, q3d - (w2 * c1 - w3 * s1) / c2]
if rot_order == '213':
return [q1d - (w1 * s1 - w3 * c1) * s2 / c2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (-w1 * s1 + w3 * c1) / c2]
if rot_order == '321':
return [q1d - (-w1 * c1 + w2 * s1) * s2 / c2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (w1 * c1 - w2 * s1) / c2]
if rot_order == '121':
return [q1d - w1 + (w2 * s1 + w3 * c1) * c2 / s2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / s2]
if rot_order == '131':
return [q1d - w1 - (w2 * c1 - w3 * s1) * c2 / s2, q2d - w2 *
s1 - w3 * c1, q3d - (-w2 * c1 + w3 * s1) / s2]
if rot_order == '212':
return [q1d - (-w1 * s1 + w3 * c1) * c2 / s2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (w1 * s1 - w3 * c1) / s2]
if rot_order == '232':
return [q1d + (w1 * c1 + w3 * s1) * c2 / s2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / s2]
if rot_order == '313':
return [q1d + (w1 * s1 + w2 * c1) * c2 / s2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / s2]
if rot_order == '323':
return [q1d - (w1 * c1 - w2 * s1) * c2 / s2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (-w1 * c1 + w2 * s1) / s2]
elif rot_type.lower() == 'quaternion':
if rot_order != '':
raise ValueError('Cannot have rotation order for quaternion')
if len(coords) != 4:
raise ValueError('Need 4 coordinates for quaternion')
# Actual hard-coded kinematic differential equations
e0, e1, e2, e3 = coords
w = Matrix(speeds + [0])
E = Matrix([[e0, -e3, e2, e1], [e3, e0, -e1, e2], [-e2, e1, e0, e3],
[-e1, -e2, -e3, e0]])
edots = Matrix([diff(i, dynamicsymbols._t) for i in [e1, e2, e3, e0]])
return list(edots.T - 0.5 * w.T * E.T)
else:
raise ValueError('Not an approved rotation type for this function')
def get_motion_params(frame, **kwargs):
"""
Returns the three motion parameters - (acceleration, velocity, and
position) as vectorial functions of time in the given frame.
If a higher order differential function is provided, the lower order
functions are used as boundary conditions. For example, given the
acceleration, the velocity and position parameters are taken as
boundary conditions.
The values of time at which the boundary conditions are specified
are taken from timevalue1(for position boundary condition) and
timevalue2(for velocity boundary condition).
If any of the boundary conditions are not provided, they are taken
to be zero by default (zero vectors, in case of vectorial inputs). If
the boundary conditions are also functions of time, they are converted
to constants by substituting the time values in the dynamicsymbols._t
time Symbol.
This function can also be used for calculating rotational motion
parameters. Have a look at the Parameters and Examples for more clarity.
Parameters
==========
frame : ReferenceFrame
The frame to express the motion parameters in
acceleration : Vector
Acceleration of the object/frame as a function of time
velocity : Vector
Velocity as function of time or as boundary condition
of velocity at time = timevalue1
position : Vector
Velocity as function of time or as boundary condition
of velocity at time = timevalue1
timevalue1 : sympyfiable
Value of time for position boundary condition
timevalue2 : sympyfiable
Value of time for velocity boundary condition
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, get_motion_params, dynamicsymbols
>>> from sympy import symbols
>>> R = ReferenceFrame('R')
>>> v1, v2, v3 = dynamicsymbols('v1 v2 v3')
>>> v = v1*R.x + v2*R.y + v3*R.z
>>> get_motion_params(R, position = v)
(v1''*R.x + v2''*R.y + v3''*R.z, v1'*R.x + v2'*R.y + v3'*R.z, v1*R.x + v2*R.y + v3*R.z)
>>> a, b, c = symbols('a b c')
>>> v = a*R.x + b*R.y + c*R.z
>>> get_motion_params(R, velocity = v)
(0, a*R.x + b*R.y + c*R.z, a*t*R.x + b*t*R.y + c*t*R.z)
>>> parameters = get_motion_params(R, acceleration = v)
>>> parameters[1]
a*t*R.x + b*t*R.y + c*t*R.z
>>> parameters[2]
a*t**2/2*R.x + b*t**2/2*R.y + c*t**2/2*R.z
"""
##Helper functions
def _process_vector_differential(vectdiff, condition, \
variable, ordinate, frame):
"""
Helper function for get_motion methods. Finds derivative of vectdiff wrt
variable, and its integral using the specified boundary condition at
value of variable = ordinate.
Returns a tuple of - (derivative, function and integral) wrt vectdiff
"""
#Make sure boundary condition is independent of 'variable'
if condition != 0:
condition = express(condition, frame, variables=True)
#Special case of vectdiff == 0
if vectdiff == Vector(0):
return (0, 0, condition)
#Express vectdiff completely in condition's frame to give vectdiff1
vectdiff1 = express(vectdiff, frame)
#Find derivative of vectdiff
vectdiff2 = time_derivative(vectdiff, frame)
#Integrate and use boundary condition
vectdiff0 = Vector(0)
lims = (variable, ordinate, variable)
for dim in frame:
function1 = vectdiff1.dot(dim)
abscissa = dim.dot(condition).subs({variable : ordinate})
# Indefinite integral of 'function1' wrt 'variable', using
# the given initial condition (ordinate, abscissa).
vectdiff0 += (integrate(function1, lims) + abscissa) * dim
#Return tuple
return (vectdiff2, vectdiff, vectdiff0)
##Function body
_check_frame(frame)
#Decide mode of operation based on user's input
if 'acceleration' in kwargs:
mode = 2
elif 'velocity' in kwargs:
mode = 1
else:
mode = 0
#All the possible parameters in kwargs
#Not all are required for every case
#If not specified, set to default values(may or may not be used in
#calculations)
conditions = ['acceleration', 'velocity', 'position',
'timevalue', 'timevalue1', 'timevalue2']
for i, x in enumerate(conditions):
if x not in kwargs:
if i < 3:
kwargs[x] = Vector(0)
else:
kwargs[x] = S(0)
elif i < 3:
_check_vector(kwargs[x])
else:
kwargs[x] = sympify(kwargs[x])
if mode == 2:
vel = _process_vector_differential(kwargs['acceleration'],
kwargs['velocity'],
dynamicsymbols._t,
kwargs['timevalue2'], frame)[2]
pos = _process_vector_differential(vel, kwargs['position'],
dynamicsymbols._t,
kwargs['timevalue1'], frame)[2]
return (kwargs['acceleration'], vel, pos)
elif mode == 1:
return _process_vector_differential(kwargs['velocity'],
kwargs['position'],
dynamicsymbols._t,
kwargs['timevalue1'], frame)
else:
vel = time_derivative(kwargs['position'], frame)
acc = time_derivative(vel, frame)
return (acc, vel, kwargs['position'])
def partial_velocity(vel_list, u_list, frame):
"""Returns a list of partial velocities.
For a list of velocity or angular velocity vectors the partial derivatives
with respect to the supplied generalized speeds are computed, in the
specified ReferenceFrame.
The output is a list of lists. The outer list has a number of elements
equal to the number of supplied velocity vectors. The inner lists are, for
each velocity vector, the partial derivatives of that velocity vector with
respect to the generalized speeds supplied.
Parameters
==========
vel_list : list
List of velocities of Point's and angular velocities of ReferenceFrame's
u_list : list
List of independent generalized speeds.
frame : ReferenceFrame
The ReferenceFrame the partial derivatives are going to be taken in.
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import partial_velocity
>>> u = dynamicsymbols('u')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
>>> vel_list = [P.vel(N)]
>>> u_list = [u]
>>> partial_velocity(vel_list, u_list, N)
[[N.x]]
"""
if not iterable(vel_list):
raise TypeError('Provide velocities in an iterable')
if not iterable(u_list):
raise TypeError('Provide speeds in an iterable')
list_of_pvlists = []
for i in vel_list:
pvlist = []
for j in u_list:
vel = i.diff(j, frame)
pvlist += [vel]
list_of_pvlists += [pvlist]
return list_of_pvlists
def dynamicsymbols(names, level=0):
"""Uses symbols and Function for functions of time.
Creates a SymPy UndefinedFunction, which is then initialized as a function
of a variable, the default being Symbol('t').
Parameters
==========
names : str
Names of the dynamic symbols you want to create; works the same way as
inputs to symbols
level : int
Level of differentiation of the returned function; d/dt once of t,
twice of t, etc.
Examples
========
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy import diff, Symbol
>>> q1 = dynamicsymbols('q1')
>>> q1
q1(t)
>>> diff(q1, Symbol('t'))
Derivative(q1(t), t)
"""
esses = symbols(names, cls=Function)
t = dynamicsymbols._t
if iterable(esses):
esses = [reduce(diff, [t] * level, e(t)) for e in esses]
return esses
else:
return reduce(diff, [t] * level, esses(t))
dynamicsymbols._t = Symbol('t')
dynamicsymbols._str = '\''
| bsd-3-clause |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/learn/python/learn/datasets/base.py | 41 | 8304 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
from os import path
import random
import time
import numpy as np
from six.moves import urllib
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
@deprecated(None, 'Use tf.data instead.')
def load_csv_with_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file with a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
header = next(data_file)
n_samples = int(header[0])
n_features = int(header[1])
data = np.zeros((n_samples, n_features), dtype=features_dtype)
target = np.zeros((n_samples,), dtype=target_dtype)
for i, row in enumerate(data_file):
target[i] = np.asarray(row.pop(target_column), dtype=target_dtype)
data[i] = np.asarray(row, dtype=features_dtype)
return Dataset(data=data, target=target)
@deprecated(None, 'Use tf.data instead.')
def load_csv_without_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file without a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
data, target = [], []
for row in data_file:
target.append(row.pop(target_column))
data.append(np.asarray(row, dtype=features_dtype))
target = np.array(target, dtype=target_dtype)
data = np.array(data)
return Dataset(data=data, target=target)
@deprecated(None, 'Use tf.data instead.')
def shrink_csv(filename, ratio):
"""Create a smaller dataset of only 1/ratio of original data."""
filename_small = filename.replace('.', '_small.')
with gfile.Open(filename_small, 'w') as csv_file_small:
writer = csv.writer(csv_file_small)
with gfile.Open(filename) as csv_file:
reader = csv.reader(csv_file)
i = 0
for row in reader:
if i % ratio == 0:
writer.writerow(row)
i += 1
@deprecated(None, 'Use scikits.learn.datasets.')
def load_iris(data_path=None):
"""Load Iris dataset.
Args:
data_path: string, path to iris dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'iris.csv')
return load_csv_with_header(
data_path, target_dtype=np.int, features_dtype=np.float)
@deprecated(None, 'Use scikits.learn.datasets.')
def load_boston(data_path=None):
"""Load Boston housing dataset.
Args:
data_path: string, path to boston dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'boston_house_prices.csv')
return load_csv_with_header(
data_path, target_dtype=np.float, features_dtype=np.float)
@deprecated(None, 'Use the retry module or similar alternatives.')
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
Returns:
A function that wraps another function to automatically retry it.
"""
return _internal_retry(
initial_delay=initial_delay,
max_delay=max_delay,
factor=factor,
jitter=jitter,
is_retriable=is_retriable)
def _internal_retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions, for internal use only.
Args:
initial_delay: the initial delay.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
Returns:
A function that wraps another function to automatically retry it.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@deprecated(None, 'Please use urllib or similar directly.')
@_internal_retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
@deprecated(None, 'Please write your own downloading logic.')
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
| mit |
apark263/tensorflow | tensorflow/contrib/text/python/ops/__init__.py | 102 | 1026 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various contrib ops related to text-processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.text.python.ops.skip_gram_ops import skip_gram_sample
from tensorflow.contrib.text.python.ops.skip_gram_ops import skip_gram_sample_with_text_vocab
| apache-2.0 |
colevscode/quickkvs | quickkvs/backends.py | 1 | 4939 | import json
import time
class ExpirationMixin(object):
def _calc_ttl(self, item):
if item['expires'] < 0: return -1 # never expires
ttl = item['expires'] - time.clock()
return 0 if ttl < 0 else ttl
def _set_expires(self, item, expires):
item['expires'] = time.clock() + expires if expires > 0 else -1
class MemoryBackend(ExpirationMixin):
def __init__(self, dictionary=None):
self._dict = dictionary or {}
def get_item(self, key):
item = self._dict.get(key)
if not item:
raise KeyError
ttl = self._calc_ttl(item)
if ttl == 0:
self.del_item(key)
raise KeyError
return item['value'], ttl
def set_item(self, key, value, expires):
item = {'value': value}
self._set_expires(item, expires)
self._dict[key] = item
def del_item(self, key):
del self._dict[key]
def contains_item(self, key):
self.cleanup()
return key in self._dict.keys()
def expire(self, key, seconds):
item, exp = self.get_item(key)
self._set_expires(item, seconds)
def cleanup(self):
map = lambda x: self._calc_obj_ttl(x)
red = lambda x: x == -1 or x > 0
self._dict = dict((k, v) for k, v in self._dict.iteritems() if red(map(v)))
def search(self, **kwargs):
raise NotImplemented
class MongoBackend(ExpirationMixin):
def __init__(self, mongo=None, host="localhost:27017", user='', passwd='',
db="_mongo_token_store", col="default_token_store"):
from pymongo import MongoClient
if not mongo:
auth = '%s:%s@' % (user, passwd) if user else ''
uri = 'mongodb://' + auth + host
self._mongo = MongoClient(host=uri)
else:
self._mongo = mongo
self._db = db
self._col = col
def get_item(self, key):
col = self._mongo[self._db][self._col]
item = col.find_one({'_id':key})
if not item:
raise KeyError
ttl = self._calc_ttl(item)
if ttl == 0:
self.del_item(key)
raise KeyError
return item['value'], ttl
def set_item(self, key, value, expires):
col = self._mongo[self._db][self._col]
item = {'_id':key, 'value': value}
self._set_expires(item, expires)
col.update({'_id':key}, item, upsert=True)
def del_item(self, key):
col = self._mongo[self._db][self._col]
col.remove({'_id':key})
def contains_item(self, key):
col = self._mongo[self._db][self._col]
self.cleanup(col=col)
return col.count({'_id':key}) > 0
def expire(self, key, seconds):
col = self._mongo[self._db][self._col]
col.update({'_id':key}, {'$set': {
'expires': time.clock() + seconds if seconds > 0 else -1
}})
def cleanup(self, col=None):
col = col or self._mongo[self._db][self._col]
col.remove({'expires': {'$lt': time.clock()}})
def search(self, *args, **kwargs):
'''
Three possible ways to invoke:
search(arg) - looks for values that match the arg
search(*args) - looks for values that match any arg
search(**kwargs) -
assumes that the values are objects. Finds all objects
for which each kwarg matches a property
If both args and kwargs are passed, kwargs will be ignored
'''
col = self._mongo[self._db][self._col]
self.cleanup(col=col)
if len(args) == 1:
query = {'value': args[0]}
elif len(args) > 1:
query = {'value': {'$in': args}}
else:
query = dict((u'value.'+unicode(k), unicode(v)) for k, v in kwargs)
results = col.find(query)
return dict((item['_id'], item['value']) for item in results)
class RedisBackend(object):
def __init__(self, redis=None, host="localhost", port=6379, pw=None, db=0):
from redis import StrictRedis
self._redis = redis or StrictRedis(host=host, port=port, password=pw, db=db)
def _calc_ttl(self, item):
return self._redis.ttl(item)
def get_item(self, key):
item = self._redis.get(key)
if not item:
raise KeyError
ttl = self._calc_ttl(item)
return json.loads(unicode(item, "utf-8")), ttl
def set_item(self, key, value, expires):
data = unicode(json.dumps(value)).encode("utf-8")
self._redis.set(key, data)
if expires >= 0:
self._redis.expire(key, expires)
def del_item(self, key):
self._redis.delete(key)
def contains_item(self, key):
return self._redis.exists(key)
def expire(self, key, seconds):
self._redis.expire(key, seconds)
def cleanup(self):
pass
def search(self, **kwargs):
raise NotImplemented | mit |
kangxu/crosswalk-test-suite | usecase/usecase-apptools-tests/inst.deb.py | 14 | 3130 | #!/usr/bin/env python
import os
import sys
import subprocess
from optparse import OptionParser
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
def doCMD(cmd):
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".deb"):
debName = os.path.basename(os.path.splitext(file)[0])
pkg_id = debName.split("_")[0]
if doCMD("which %s" % pkg_id)[0] == 0:
(return_code, output) = doCMD(
"sudo dpkg -P %s" % pkg_id)
if return_code != 0:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".deb") and file.find("upgrade") == -1:
cmd = "sudo dpkg -i %s/%s" % (root, file)
(return_code, output) = doCMD(cmd)
if return_code != 0:
action_status = False
break
return action_status
def initEnv():
action_status = True
xwalk_dir = "/usr/bin"
cmd = "which xwalk"
(return_code, xwalk_path) = doCMD(cmd)
if return_code == 0:
xwalk_dir = os.path.dirname(xwalk_path[0])
cmdList = ["sudo rm -rf %s/xwalk", "sudo cp -rf xwalk.sh %s/", "sudo ln /usr/bin/xwalk.sh %s/xwalk"]
for cmdstr in cmdList:
cmd = cmdstr % xwalk_dir
(return_code, xwalk_path) = doCMD(cmd)
if return_code != 0:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
#if not initEnv():
#sys.exit(1)
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
hradec/gaffer | python/GafferDelightTest/InteractiveDelightRenderTest.py | 6 | 4100 | ##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
import GafferOSL
import GafferDelight
@unittest.skipIf( GafferTest.inCI(), "No license available in cloud" )
class InteractiveDelightRenderTest( GafferSceneTest.InteractiveRenderTest ) :
interactiveRenderNodeClass = GafferDelight.InteractiveDelightRender
# Temporarily disable this test (which is implemented in the
# base class) because it fails. The issue is that we're automatically
# instancing the geometry for the two lights, and that appears to
# trigger a bug in 3delight where the sampling goes awry.
@unittest.skip( "Awaiting feedback from 3delight developers" )
def testAddLight( self ) :
pass
# Disable this test for now as we don't have light linking support in
# 3Delight, yet.
@unittest.skip( "No light linking support just yet" )
def testLightLinking( self ) :
pass
# Disable this test for now as we don't have light linking support in
# 3Delight, yet.
@unittest.skip( "No light linking support just yet" )
def testHideLinkedLight( self ) :
pass
# Disable this test for now as we don't have light filter support in
# 3Delight, yet.
@unittest.skip( "No light filter support just yet" )
def testLightFilters( self ) :
pass
# Disable this test for now as we don't have light filter support in
# 3Delight, yet.
@unittest.skip( "No light filter support just yet" )
def testLightFiltersAndSetEdits( self ) :
pass
def _createConstantShader( self ) :
shader = GafferOSL.OSLShader()
shader.loadShader( "Surface/Constant" )
return shader, shader["parameters"]["Cs"]
def _createTraceSetShader( self ) :
return None, None
def _cameraVisibilityAttribute( self ) :
return "dl:visibility.camera"
def _createMatteShader( self ) :
shader = GafferOSL.OSLShader()
shader.loadShader( "maya/osl/lambert" )
return shader, shader["parameters"]["i_color"]
def _createPointLight( self ) :
light = GafferOSL.OSLLight()
light["shape"].setValue( light.Shape.Sphere )
light["radius"].setValue( 0.01 )
light.loadShader( "maya/osl/pointLight" )
light["attributes"].addChild( Gaffer.NameValuePlug( "dl:visibility.camera", False ) )
return light, light["parameters"]["i_color"]
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
joernhees/git-hg-remote-bug_gae-init | main/lib/flask/module.py | 850 | 1363 | # -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
| mit |
Collegeville/Homework | bin/get-dashboard.py | 5 | 3397 | #!/usr/bin/env python3
'''Create YAML for dashboard page by querying GitHub repositories.'''
import sys
import time
import yaml
CONTROLS = (
('swcarpentry/shell-novice', 'Unix Shell'),
('swcarpentry/git-novice', 'Git'),
('swcarpentry/hg-novice', 'Mercurial'),
('swcarpentry/sql-novice-survey', 'SQL'),
('swcarpentry/python-novice-inflammation', 'Python'),
('swcarpentry/r-novice-inflammation', 'R (Inflammation Data)'),
('swcarpentry/r-novice-gapminder', 'R (Gapminder Data)'),
('swcarpentry/matlab-novice-inflammation', 'MATLAB'),
('swcarpentry/make-novice', 'Make'),
('swcarpentry/capstone-novice-spreadsheet-biblio', 'From Excel to a Database via Python'),
('katyhuff/python-testing', 'Testing and Continuous Integration with Python'),
('DamienIrving/capstone-oceanography', 'Data Management in the Ocean, Weather and Climate Sciences'),
('swcarpentry/matlab-novice-capstone-biomed', 'Controlling a Quadcoptor With Your Mind'),
('swcarpentry/web-data-python', 'Working With Data on the Web'),
('swcarpentry/amy', 'Workshop Administration Tool'),
('swcarpentry/website', 'Software Carpentry Website'),
)
def get_connection(token_file):
'''Get a connection to GitHub if the library and token file are available.'''
try:
from github import Github
except:
print('Unable to import github library', file=sys.stderr)
sys.exit(1)
try:
with open(token_file, 'r') as reader:
token = reader.read().strip()
except:
print('Unable to open token file "{0}"'.format(token_file), file=sys.stderr)
sys.exit(1)
return Github(token)
def process(cnx):
'''Gather information.'''
if not cnx:
return []
all_records = []
dashboard = {
'records' : all_records,
'num_repos' : 0,
'num_issues' : 0,
'timestamp' : time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
}
for (ident, description) in CONTROLS:
print('+', ident)
dashboard['num_repos'] += 1
r = cnx.get_repo(ident)
record = {'ident' : ident,
'description' : description,
'url' : str(r.html_url)}
record['pulls'], record['issues'] = filter(r.get_issues(state='open'))
all_records.append(record)
return dashboard
def filter(raw):
'''Separate pull requests from issues.'''
issues = []
pulls = []
for i in raw:
try:
entry = {'number' : i.number,
'title' : str(i.title),
'url' : str(i.html_url),
'updated' : i.updated_at.strftime('%Y-%m-%d')}
if '/issues/' in entry['url']:
issues.append(entry)
elif '/pull/' in entry['url']:
pulls.append(entry)
except Exception as e:
print('failed with', i.number, i.title, i.html_url, i.updated_at, file=sys.stderr)
issues.sort(key=lambda x: x['updated'])
pulls.sort(key=lambda x: x['updated'])
return pulls, issues
def main():
'''Main driver.'''
token_file = sys.argv[1]
output_file = sys.argv[2]
cnx = get_connection(token_file)
dashboard = process(cnx)
with open(output_file, 'w') as writer:
yaml.dump(dashboard, writer, encoding='utf-8', allow_unicode=True)
if __name__ == '__main__':
main()
| mit |
marcoarruda/MissionPlanner | Lib/site-packages/numpy/oldnumeric/typeconv.py | 101 | 1622 | __all__ = ['oldtype2dtype', 'convtypecode', 'convtypecode2', 'oldtypecodes']
import numpy as np
oldtype2dtype = {'1': np.dtype(np.byte),
's': np.dtype(np.short),
# 'i': np.dtype(np.intc),
# 'l': np.dtype(int),
# 'b': np.dtype(np.ubyte),
'w': np.dtype(np.ushort),
'u': np.dtype(np.uintc),
# 'f': np.dtype(np.single),
# 'd': np.dtype(float),
# 'F': np.dtype(np.csingle),
# 'D': np.dtype(complex),
# 'O': np.dtype(object),
# 'c': np.dtype('c'),
None: np.dtype(int)
}
# converts typecode=None to int
def convtypecode(typecode, dtype=None):
if dtype is None:
try:
return oldtype2dtype[typecode]
except:
return np.dtype(typecode)
else:
return dtype
#if both typecode and dtype are None
# return None
def convtypecode2(typecode, dtype=None):
if dtype is None:
if typecode is None:
return None
else:
try:
return oldtype2dtype[typecode]
except:
return np.dtype(typecode)
else:
return dtype
_changedtypes = {'B': 'b',
'b': '1',
'h': 's',
'H': 'w',
'I': 'u'}
class _oldtypecodes(dict):
def __getitem__(self, obj):
char = np.dtype(obj).char
try:
return _changedtypes[char]
except KeyError:
return char
oldtypecodes = _oldtypecodes()
| gpl-3.0 |
suutari/shoop | shuup_tests/core/test_telemetry.py | 1 | 8497 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
import requests
from django.test.utils import override_settings
from django.utils.timezone import now
from requests.models import Response
import pytest
from mock import patch
from shuup.admin.modules.system import SystemModule
from shuup.admin.views.dashboard import DashboardView
from shuup.core.models import PersistentCacheEntry
from shuup.core.telemetry import (
get_daily_data, get_daily_data_for_day, get_installation_key,
get_last_submission_time, get_telemetry_data, INSTALLATION_KEY_KWARGS,
is_opt_out, LAST_DATA_KWARGS, set_opt_out,TelemetryNotSent,
try_send_telemetry
)
from shuup.testing.factories import (
create_empty_order, create_order_with_product,
create_product, create_random_company,
get_default_shop, get_default_supplier,
UserFactory)
from shuup.testing.utils import apply_request_middleware
from shuup_tests.utils import SmartClient
class MockResponse(Response):
def __init__(self, content):
self.content = content
super(MockResponse, self).__init__()
def content(self):
return self.content
def _backdate_installation_key(days=24):
get_installation_key()
PersistentCacheEntry.objects.filter(**INSTALLATION_KEY_KWARGS).update(time=now() - datetime.timedelta(days=days))
def _backdate_telemetry_submission(days=24):
PersistentCacheEntry.objects.filter(**LAST_DATA_KWARGS).update(time=now() - datetime.timedelta(days=days))
def _clear_telemetry_submission():
PersistentCacheEntry.objects.filter(**LAST_DATA_KWARGS).delete()
@pytest.mark.django_db
def test_get_telemetry_data(rf, admin_user):
data = json.loads(get_telemetry_data(rf.get("/")))
assert data.get("host")
assert data.get("admin_user") == admin_user.email
assert not json.loads(get_telemetry_data(None)).get("host")
@pytest.mark.django_db
def test_get_telemetry_data_after_login(rf, admin_user):
get_default_shop()
# create users to ensure correct admin is found
UserFactory()
UserFactory()
data = json.loads(get_telemetry_data(rf.get("/")))
assert data.get("admin_user") == admin_user.email
assert not data.get("last_login")
client = SmartClient()
client.login(username="admin", password="password")
data = json.loads(get_telemetry_data(rf.get("/")))
assert data.get("admin_user") == admin_user.email
last_login = data.get("last_login", None)
assert last_login
last_login_datetime = datetime.datetime.strptime(last_login, "%Y-%m-%dT%H:%M:%S.%fZ")
today = datetime.datetime.now()
assert last_login_datetime.year == today.year
assert last_login_datetime.month == today.month
assert last_login_datetime.day == today.day
@pytest.mark.django_db
def test_optin_optout(rf, admin_user):
with override_settings(SHUUP_TELEMETRY_ENABLED=True, DEBUG=True):
with patch.object(requests, "post", return_value=MockResponse("test")) as requestor:
_clear_telemetry_submission()
assert not set_opt_out(False) # Not opted out
assert not is_opt_out()
try_send_telemetry()
with pytest.raises(TelemetryNotSent) as ei:
try_send_telemetry(raise_on_error=True) # Still gracey
assert ei.value.code == "grace"
_backdate_installation_key()
try_send_telemetry(max_age_hours=72)
try_send_telemetry(max_age_hours=None) # Forcibly re-send for the hell of it
with pytest.raises(TelemetryNotSent) as ei:
try_send_telemetry(raise_on_error=True) # Don't ignore last-send; shouldn't send anyway
assert ei.value.code == "age"
assert len(requestor.mock_calls) == 2
assert set_opt_out(True)
assert is_opt_out()
with pytest.raises(TelemetryNotSent) as ei:
try_send_telemetry(max_age_hours=0, raise_on_error=True)
assert ei.value.code == "optout"
assert len(requestor.mock_calls) == 2
@pytest.mark.django_db
def test_disable(rf, admin_user):
with override_settings(SHUUP_TELEMETRY_ENABLED=False):
_clear_telemetry_submission()
_backdate_installation_key()
set_opt_out(False)
with pytest.raises(TelemetryNotSent) as ei:
try_send_telemetry(raise_on_error=True, max_age_hours=None) # Should re-send (if we weren't disabled)
assert ei.value.code == "disabled"
@pytest.mark.django_db
def test_graceful_error(admin_user):
def thrower(*args, **kwargs):
raise ValueError("aaaagh")
with override_settings(SHUUP_TELEMETRY_ENABLED=True):
with patch.object(requests, "post", thrower) as requestor:
_clear_telemetry_submission()
_backdate_installation_key()
set_opt_out(False)
assert try_send_telemetry(raise_on_error=True).get("error")
def test_disabling_telemetry_hides_menu_item(rf):
request = rf.get("/")
with override_settings(SHUUP_TELEMETRY_ENABLED=True):
assert any(me.original_url == "shuup_admin:telemetry" for me in SystemModule().get_menu_entries(request))
with override_settings(SHUUP_TELEMETRY_ENABLED=False):
assert not any(me.original_url == "shuup_admin:telemetry" for me in SystemModule().get_menu_entries(request))
@pytest.mark.django_db
def test_telemetry_is_sent_on_login(rf, admin_user):
shop = get_default_shop()
with patch.object(requests, "post", return_value=MockResponse("test")) as requestor:
with override_settings(SHUUP_TELEMETRY_ENABLED=True):
_backdate_installation_key(days=0) # instance was created today
request = apply_request_middleware(rf.get("/"), user=admin_user)
view_func = DashboardView.as_view()
response = view_func(request)
sent = get_last_submission_time()
response = view_func(request)
assert get_last_submission_time() == sent
response = view_func(request)
assert get_last_submission_time() == sent
assert len(requestor.mock_calls) == 1
def _create_order_for_day(shop, day):
order = create_empty_order(shop)
order.order_date = day
order.save()
def _create_product_for_day(shop, day):
product = create_product("test_product")
product.created_on = day
product.save()
def _create_customer_for_day(shop, day):
company = create_random_company()
company.created_on = day
company.save()
def _create_total_sales(shop, day):
product = create_product("test", shop=shop)
supplier = get_default_supplier()
order = create_order_with_product(product, supplier, 1, 10, shop=shop)
order.order_date = day
order.save()
def _create_total_paid_sales(shop, day):
product = create_product("test", shop=shop)
supplier = get_default_supplier()
order = create_order_with_product(product, supplier, 1, 10, shop=shop)
order.order_date = day
order.save()
order.create_payment(order.taxful_total_price)
assert order.is_paid()
@pytest.mark.parametrize("data_key, data_value, create_object", [
("orders", 1, _create_order_for_day),
("products", 1, _create_product_for_day),
("contacts", 1, _create_customer_for_day),
("total_sales", 10, _create_total_sales),
("total_paid_sales", 10, _create_total_paid_sales),
])
@pytest.mark.django_db
def test_telemetry_daily_data_components(data_key, data_value, create_object):
shop = get_default_shop()
datetime_now = now()
today = datetime.date(datetime_now.year, datetime_now.month, datetime_now.day)
create_object(shop, today)
assert get_daily_data_for_day(today)[data_key] == data_value
@pytest.mark.django_db
def test_telemetry_multiple_days(rf, admin_user):
with override_settings(SHUUP_TELEMETRY_ENABLED=True, DEBUG=True):
with patch.object(requests, "post", return_value=MockResponse("test")) as requestor:
try_send_telemetry()
day = now()
_backdate_telemetry_submission(days=0)
assert not get_daily_data(day)
_backdate_telemetry_submission(days=20)
assert len(get_daily_data(now())) == 19 # Since current day is not added to telemetry
| agpl-3.0 |
163gal/Time-Line | libs/wx/lib/pubsub/core/weakmethod.py | 9 | 4103 | '''
This module provides a basic "weak method" implementation. It is necessary
because the weakref module does not support weak methods (in the sense that,
counter-intuitively, a user who creates a weakref.ref(obj.method), a reasonable
action, get a weak ref that is None.
:copyright: Copyright 2006-2009 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE.txt for details.
'''
# for function and method parameter counting:
from inspect import ismethod
# for weakly bound methods:
from new import instancemethod as InstanceMethod
from weakref import ref as WeakRef
class WeakMethod:
"""Represent a weak bound method, i.e. a method which doesn't keep alive the
object that it is bound to. It uses WeakRef which, used on its own,
produces weak methods that are dead on creation, not very useful.
Typically, you will use the getWeakRef() module function instead of using
this class directly. """
def __init__(self, method, notifyDead = None):
"""The method must be bound. notifyDead will be called when
object that method is bound to dies. """
assert ismethod(method)
if method.im_self is None:
raise ValueError('Unbound methods cannot be weak-referenced.')
self.notifyDead = None
if notifyDead is None:
self.objRef = WeakRef(method.im_self)
else:
self.notifyDead = notifyDead
self.objRef = WeakRef(method.im_self, self.__onNotifyDeadObj)
self.fun = method.im_func
self.cls = method.im_class
def __onNotifyDeadObj(self, ref):
if self.notifyDead:
try:
self.notifyDead(self)
except Exception:
import traceback
traceback.print_exc()
def __call__(self):
"""Returns a new.instancemethod if object for method still alive.
Otherwise return None. Note that instancemethod causes a
strong reference to object to be created, so shouldn't save
the return value of this call. Note also that this __call__
is required only for compatibility with WeakRef.ref(), otherwise
there would be more efficient ways of providing this functionality."""
if self.objRef() is None:
return None
else:
return InstanceMethod(self.fun, self.objRef(), self.cls)
def __eq__(self, method2):
"""Two WeakMethod objects compare equal if they refer to the same method
of the same instance. Thanks to Josiah Carlson for patch and clarifications
on how dict uses eq/cmp and hashing. """
if not isinstance(method2, WeakMethod):
return False
return ( self.fun is method2.fun
and self.objRef() is method2.objRef()
and self.objRef() is not None )
def __hash__(self):
"""Hash is an optimization for dict searches, it need not
return different numbers for every different object. Some objects
are not hashable (eg objects of classes derived from dict) so no
hash(objRef()) in there, and hash(self.cls) would only be useful
in the rare case where instance method was rebound. """
return hash(self.fun)
def __repr__(self):
dead = ''
if self.objRef() is None:
dead = '; DEAD'
obj = '<%s at %s%s>' % (self.__class__, id(self), dead)
return obj
def refs(self, weakRef):
"""Return true if we are storing same object referred to by weakRef."""
return self.objRef == weakRef
def getWeakRef(obj, notifyDead=None):
"""Get a weak reference to obj. If obj is a bound method, a WeakMethod
object, that behaves like a WeakRef, is returned; if it is
anything else a WeakRef is returned. If obj is an unbound method,
a ValueError will be raised."""
if ismethod(obj):
createRef = WeakMethod
else:
createRef = WeakRef
return createRef(obj, notifyDead)
| gpl-3.0 |
jarshwah/django | tests/queryset_pickle/models.py | 281 | 1904 | import datetime
from django.db import DJANGO_VERSION_PICKLE_KEY, models
from django.utils import six
from django.utils.translation import ugettext_lazy as _
def standalone_number():
return 1
class Numbers(object):
@staticmethod
def get_static_number():
return 2
class PreviousDjangoVersionQuerySet(models.QuerySet):
def __getstate__(self):
state = super(PreviousDjangoVersionQuerySet, self).__getstate__()
state[DJANGO_VERSION_PICKLE_KEY] = '1.0'
return state
class MissingDjangoVersionQuerySet(models.QuerySet):
def __getstate__(self):
state = super(MissingDjangoVersionQuerySet, self).__getstate__()
del state[DJANGO_VERSION_PICKLE_KEY]
return state
class Group(models.Model):
name = models.CharField(_('name'), max_length=100)
objects = models.Manager()
previous_django_version_objects = PreviousDjangoVersionQuerySet.as_manager()
missing_django_version_objects = MissingDjangoVersionQuerySet.as_manager()
class Event(models.Model):
title = models.CharField(max_length=100)
group = models.ForeignKey(Group, models.CASCADE)
class Happening(models.Model):
when = models.DateTimeField(blank=True, default=datetime.datetime.now)
name = models.CharField(blank=True, max_length=100, default="test")
number1 = models.IntegerField(blank=True, default=standalone_number)
if six.PY3:
# default serializable on Python 3 only
number2 = models.IntegerField(blank=True, default=Numbers.get_static_number)
class Container(object):
# To test pickling we need a class that isn't defined on module, but
# is still available from app-cache. So, the Container class moves
# SomeModel outside of module level
class SomeModel(models.Model):
somefield = models.IntegerField()
class M2MModel(models.Model):
groups = models.ManyToManyField(Group)
| bsd-3-clause |
chauhanmohit/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/prettypatch.py | 140 | 2966 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tempfile
class PrettyPatch(object):
# FIXME: PrettyPatch should not require checkout_root.
def __init__(self, executive, checkout_root):
self._executive = executive
self._checkout_root = checkout_root
def pretty_diff_file(self, diff):
# Diffs can contain multiple text files of different encodings
# so we always deal with them as byte arrays, not unicode strings.
assert(isinstance(diff, str))
pretty_diff = self.pretty_diff(diff)
diff_file = tempfile.NamedTemporaryFile(suffix=".html")
diff_file.write(pretty_diff)
diff_file.flush()
return diff_file
def pretty_diff(self, diff):
# pretify.rb will hang forever if given no input.
# Avoid the hang by returning an empty string.
if not diff:
return ""
pretty_patch_path = os.path.join(self._checkout_root,
"Websites", "bugs.webkit.org",
"PrettyPatch")
prettify_path = os.path.join(pretty_patch_path, "prettify.rb")
args = [
"ruby",
"-I",
pretty_patch_path,
prettify_path,
]
# PrettyPatch does not modify the encoding of the diff output
# so we can't expect it to be utf-8.
return self._executive.run_command(args, input=diff, decode_output=False)
| bsd-3-clause |
kyvinh/home-assistant | homeassistant/components/notify/pushbullet.py | 9 | 5058 | """
PushBullet platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.pushbullet/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pushbullet.py==0.10.0']
ATTR_URL = 'url'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
})
# pylint: disable=unused-argument
def get_service(hass, config, discovery_info=None):
"""Get the PushBullet notification service."""
from pushbullet import PushBullet
from pushbullet import InvalidKeyError
try:
pushbullet = PushBullet(config[CONF_API_KEY])
except InvalidKeyError:
_LOGGER.error(
"Wrong API key supplied. "
"Get it at https://www.pushbullet.com/account")
return None
return PushBulletNotificationService(pushbullet)
class PushBulletNotificationService(BaseNotificationService):
"""Implement the notification service for Pushbullet."""
def __init__(self, pb):
"""Initialize the service."""
self.pushbullet = pb
self.pbtargets = {}
self.refresh()
def refresh(self):
"""Refresh devices, contacts, etc.
pbtargets stores all targets available from this pushbullet instance
into a dict. These are PB objects!. It sacrifices a bit of memory
for faster processing at send_message.
As of sept 2015, contacts were replaced by chats. This is not
implemented in the module yet.
"""
self.pushbullet.refresh()
self.pbtargets = {
'device': {
tgt.nickname.lower(): tgt for tgt in self.pushbullet.devices},
'channel': {
tgt.channel_tag.lower(): tgt for
tgt in self.pushbullet.channels},
}
def send_message(self, message=None, **kwargs):
"""Send a message to a specified target.
If no target specified, a 'normal' push will be sent to all devices
linked to the PB account.
Email is special, these are assumed to always exist. We use a special
call which doesn't require a push object.
"""
targets = kwargs.get(ATTR_TARGET)
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA)
url = None
if data:
url = data.get(ATTR_URL, None)
refreshed = False
if not targets:
# Backward compatebility, notify all devices in own account
if url:
self.pushbullet.push_link(title, url, body=message)
else:
self.pushbullet.push_note(title, message)
_LOGGER.info('Sent notification to self')
return
# Main loop, Process all targets specified
for target in targets:
try:
ttype, tname = target.split('/', 1)
except ValueError:
_LOGGER.error('Invalid target syntax: %s', target)
continue
# Target is email, send directly, don't use a target object
# This also seems works to send to all devices in own account
if ttype == 'email':
if url:
self.pushbullet.push_link(title, url,
body=message, email=tname)
else:
self.pushbullet.push_note(title, message, email=tname)
_LOGGER.info('Sent notification to email %s', tname)
continue
# Refresh if name not found. While awaiting periodic refresh
# solution in component, poor mans refresh ;)
if ttype not in self.pbtargets:
_LOGGER.error('Invalid target syntax: %s', target)
continue
tname = tname.lower()
if tname not in self.pbtargets[ttype] and not refreshed:
self.refresh()
refreshed = True
# Attempt push_note on a dict value. Keys are types & target
# name. Dict pbtargets has all *actual* targets.
try:
if url:
self.pbtargets[ttype][tname].push_link(title, url,
body=message)
else:
self.pbtargets[ttype][tname].push_note(title, message)
_LOGGER.info('Sent notification to %s/%s', ttype, tname)
except KeyError:
_LOGGER.error('No such target: %s/%s', ttype, tname)
continue
except self.pushbullet.errors.PushError:
_LOGGER.error('Notify failed to: %s/%s', ttype, tname)
continue
| apache-2.0 |
linjunzhe/p2pool | SOAPpy/Utilities.py | 294 | 5112 | """
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Utilities.py 1298 2006-11-07 00:54:15Z sanxiyn $'
from version import __version__
import re
import string
import sys
from types import *
# SOAPpy modules
from Errors import *
################################################################################
# Utility infielders
################################################################################
def collapseWhiteSpace(s):
return re.sub('\s+', ' ', s).strip()
def decodeHexString(data):
conv = {
'0': 0x0, '1': 0x1, '2': 0x2, '3': 0x3, '4': 0x4,
'5': 0x5, '6': 0x6, '7': 0x7, '8': 0x8, '9': 0x9,
'a': 0xa, 'b': 0xb, 'c': 0xc, 'd': 0xd, 'e': 0xe,
'f': 0xf,
'A': 0xa, 'B': 0xb, 'C': 0xc, 'D': 0xd, 'E': 0xe,
'F': 0xf,
}
ws = string.whitespace
bin = ''
i = 0
while i < len(data):
if data[i] not in ws:
break
i += 1
low = 0
while i < len(data):
c = data[i]
if c in string.whitespace:
break
try:
c = conv[c]
except KeyError:
raise ValueError, \
"invalid hex string character `%s'" % c
if low:
bin += chr(high * 16 + c)
low = 0
else:
high = c
low = 1
i += 1
if low:
raise ValueError, "invalid hex string length"
while i < len(data):
if data[i] not in string.whitespace:
raise ValueError, \
"invalid hex string character `%s'" % c
i += 1
return bin
def encodeHexString(data):
h = ''
for i in data:
h += "%02X" % ord(i)
return h
def leapMonth(year, month):
return month == 2 and \
year % 4 == 0 and \
(year % 100 != 0 or year % 400 == 0)
def cleanDate(d, first = 0):
ranges = (None, (1, 12), (1, 31), (0, 23), (0, 59), (0, 61))
months = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
names = ('year', 'month', 'day', 'hours', 'minutes', 'seconds')
if len(d) != 6:
raise ValueError, "date must have 6 elements"
for i in range(first, 6):
s = d[i]
if type(s) == FloatType:
if i < 5:
try:
s = int(s)
except OverflowError:
if i > 0:
raise
s = long(s)
if s != d[i]:
raise ValueError, "%s must be integral" % names[i]
d[i] = s
elif type(s) == LongType:
try: s = int(s)
except: pass
elif type(s) != IntType:
raise TypeError, "%s isn't a valid type" % names[i]
if i == first and s < 0:
continue
if ranges[i] != None and \
(s < ranges[i][0] or ranges[i][1] < s):
raise ValueError, "%s out of range" % names[i]
if first < 6 and d[5] >= 61:
raise ValueError, "seconds out of range"
if first < 2:
leap = first < 1 and leapMonth(d[0], d[1])
if d[2] > months[d[1]] + leap:
raise ValueError, "day out of range"
def debugHeader(title):
s = '*** ' + title + ' '
print s + ('*' * (72 - len(s)))
def debugFooter(title):
print '*' * 72
sys.stdout.flush()
| gpl-3.0 |
ecederstrand/django | tests/m2m_regress/models.py | 358 | 2826 | from django.contrib.auth import models as auth
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# No related name is needed here, since symmetrical relations are not
# explicitly reversible.
@python_2_unicode_compatible
class SelfRefer(models.Model):
name = models.CharField(max_length=10)
references = models.ManyToManyField('self')
related = models.ManyToManyField('self')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
# Regression for #11956 -- a many to many to the base class
@python_2_unicode_compatible
class TagCollection(Tag):
tags = models.ManyToManyField(Tag, related_name='tag_collections')
def __str__(self):
return self.name
# A related_name is required on one of the ManyToManyField entries here because
# they are both addressable as reverse relations from Tag.
@python_2_unicode_compatible
class Entry(models.Model):
name = models.CharField(max_length=10)
topics = models.ManyToManyField(Tag)
related = models.ManyToManyField(Tag, related_name="similar")
def __str__(self):
return self.name
# Two models both inheriting from a base model with a self-referential m2m field
class SelfReferChild(SelfRefer):
pass
class SelfReferChildSibling(SelfRefer):
pass
# Many-to-Many relation between models, where one of the PK's isn't an Autofield
@python_2_unicode_compatible
class Line(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Worksheet(models.Model):
id = models.CharField(primary_key=True, max_length=100)
lines = models.ManyToManyField(Line, blank=True)
# Regression for #11226 -- A model with the same name that another one to
# which it has a m2m relation. This shouldn't cause a name clash between
# the automatically created m2m intermediary table FK field names when
# running migrate
class User(models.Model):
name = models.CharField(max_length=30)
friends = models.ManyToManyField(auth.User)
class BadModelWithSplit(models.Model):
name = models.CharField(max_length=1)
def split(self):
raise RuntimeError('split should not be called')
class Meta:
abstract = True
class RegressionModelSplit(BadModelWithSplit):
"""
Model with a split method should not cause an error in add_lazy_relation
"""
others = models.ManyToManyField('self')
# Regression for #24505 -- Two ManyToManyFields with the same "to" model
# and related_name set to '+'.
class Post(models.Model):
primary_lines = models.ManyToManyField(Line, related_name='+')
secondary_lines = models.ManyToManyField(Line, related_name='+')
| bsd-3-clause |
TridevGuha/django | tests/select_related_regress/tests.py | 206 | 9404 | from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import (
A, B, C, Building, Chick, Child, Class, Client, ClientStatus, Connection,
Country, Device, Enrollment, Hen, Item, Organizer, Person, Port,
SpecialClient, State, Student, TUser,
)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b = Building.objects.create(name='101')
dev1 = Device.objects.create(name="router", building=b)
dev2 = Device.objects.create(name="switch", building=b)
dev3 = Device.objects.create(name="server", building=b)
port1 = Port.objects.create(port_number='4', device=dev1)
port2 = Port.objects.create(port_number='7', device=dev2)
port3 = Port.objects.create(port_number='1', device=dev3)
c1 = Connection.objects.create(start=port1, end=port2)
c2 = Connection.objects.create(start=port2, end=port3)
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person=usp)
o = Organizer.objects.create(person=uop)
c = Class.objects.create(org=o)
Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
Item.objects.create(name="item1", child=c1)
Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
def test_null_join_promotion(self):
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
bob = Client.objects.create(name='Bob', status=active)
jack = Client.objects.create(name='Jack', status=active, state=wa)
qs = Client.objects.filter(state=wa).select_related('state')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [jack])
self.assertEqual(qs[0].state, wa)
# The select_related join wasn't promoted as there was already an
# existing (even if trimmed) inner join to state.
self.assertNotIn('LEFT OUTER', str(qs.query))
qs = Client.objects.select_related('state').order_by('name')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [bob, jack])
self.assertIs(qs[0].state, None)
self.assertEqual(qs[1].state, wa)
# The select_related join was promoted as there is already an
# existing join.
self.assertIn('LEFT OUTER', str(qs.query))
def test_regression_19870(self):
hen = Hen.objects.create(name='Hen')
Chick.objects.create(name='Chick', mother=hen)
self.assertEqual(Chick.objects.all()[0].mother.name, 'Hen')
self.assertEqual(Chick.objects.select_related()[0].mother.name, 'Hen')
def test_regression_10733(self):
a = A.objects.create(name='a', lots_of_text='lots_of_text_a', a_field='a_field')
b = B.objects.create(name='b', lots_of_text='lots_of_text_b', b_field='b_field')
c = C.objects.create(name='c', lots_of_text='lots_of_text_c', is_published=True,
c_a=a, c_b=b)
results = C.objects.all().only('name', 'lots_of_text', 'c_a', 'c_b', 'c_b__lots_of_text',
'c_a__name', 'c_b__name').select_related()
self.assertQuerysetEqual(results, [c], lambda x: x)
with self.assertNumQueries(0):
qs_c = results[0]
self.assertEqual(qs_c.name, 'c')
self.assertEqual(qs_c.lots_of_text, 'lots_of_text_c')
self.assertEqual(qs_c.c_b.lots_of_text, 'lots_of_text_b')
self.assertEqual(qs_c.c_a.name, 'a')
self.assertEqual(qs_c.c_b.name, 'b')
def test_regression_22508(self):
building = Building.objects.create(name='101')
device = Device.objects.create(name="router", building=building)
Port.objects.create(port_number='1', device=device)
device = Device.objects.get()
port = device.port_set.select_related('device__building').get()
with self.assertNumQueries(0):
port.device.building
| bsd-3-clause |
tjandy/work | excelToCode/csv_to_csharp.py | 1 | 3837 | #coding:utf-8
import glob
from xml.etree import ElementTree
desc=""
def print_node(node):
'''''打印结点基本信息'''
print node.text
def read_xml(text):
'''''读xml文件'''
# 加载XML文件(2种方法,一是加载指定字符串,二是加载指定文件)
# root = ElementTree.parse(r"D:/test.xml")
root = ElementTree.fromstring(text)
# 获取element的方法
# 1 通过getiterator
lst_node = root.getiterator("descript")
for node in lst_node:
print_node(node)
lst_node = root.getiterator("import")
for node in lst_node:
print_node(node)
lst_node = root.getiterator("classhead")
for node in lst_node:
print_node(node)
# 2通过 getchildren
# lst_node_child = lst_node[0].getchildren()[0]
# print_node(lst_node_child)
# # 3 .find方法
# node_find = root.find('plist')
# print_node(node_find)
#
# # 4. findall方法
# node_findall = root.findall("plist")[1]
# print_node(node_findall)
def buildCode(filePath):
#类模版
classTemplate = """
using System;
namespace Data
{
/// <summary>
/// 静态表数据类
/// </summary>
[Serializable]
public class $className
{
$paramFied
}
}
"""
#属性模版
paramTemplate = """
/// <summary>
/// $paramNote
/// </summary>
public $paramType $paramName;"""
className = filePath.split("\\")[-1].split(".")[0] + "Info"
code = "";
try:
with open(filePath) as dataFile:
notes = dataFile.readline().strip('\n').split(",")
params = dataFile.readline().strip('\n').split(",")
for i,paramName in enumerate(params):
try:
paramNote = notes[i]
#默认类型string
paramType = "string"
#查找和判断类型
findIndex = 0;
findIndex = paramNote.rfind("[int]")
if(findIndex != -1):
paramType = "int"
paramNote = paramNote[0:findIndex]
findIndex = paramNote.rfind("[float]")
if(findIndex != -1):
paramType = "float"
paramNote = paramNote[0:findIndex]
#生成属性
code += paramTemplate.replace("$paramNote", paramNote).replace("$paramName", paramName).replace("$paramType",paramType)
except ValueError:
pass
#生成类
code = classTemplate.replace("$className", className).replace("$paramFiedZ", code)
except IOError as err:
print("The data file is missing:" + str(err))
try:
with open("csv\\" + className + ".cs", "w") as outFile:
outFile.write(code)
except IOError:
print("write file error")
def build_tableManger():
return
# 导出csv目录下所有文件
csvFiles = glob.glob("csv\\*.csv")
# for eachFile in csvFiles:
# buildCode(eachFile)
# print("csv_to_csharp ok.")
read_xml(open("CSharpCodeTemplate.xml").read())
#input()
| mit |
streamlink/streamlink | docs/conf.py | 3 | 6502 | #!/usr/bin/env python3
import os
import sys
from streamlink import __version__ as streamlink_version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '3.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'ext_argparse',
'ext_github',
'ext_releaseref',
'recommonmark'
]
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Streamlink'
copyright = '2021, Streamlink'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = streamlink_version.split('+')[0]
# The full version, including alpha/beta/rc tags.
release = streamlink_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_applications.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
github_project = 'streamlink/streamlink'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../icon.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'styles/custom.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.1/css/fontawesome.min.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.1/css/solid.min.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.1/css/brands.min.css',
]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'sidebar/scroll-start.html',
'sidebar/brand.html',
'sidebar/search.html',
'sidebar/navigation.html',
'sidebar/github-buttons.html',
'sidebar/scroll-end.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'streamlinkdoc'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('_man', 'streamlink', 'extracts streams from various services and pipes them into a video player of choice', ['Streamlink Contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# If true, make a section directory on build man page.
# Always set this to false to fix inconsistencies between recent sphinx releases
man_make_section_directory = False
| bsd-2-clause |
rajat1994/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
homework/nox | src/nox/coreapps/pyrt/componentws.py | 10 | 4528 | from nox.webapps.webservice import webservice
from nox.lib.config import version
from nox.lib.core import *
import simplejson
#
# Verifies that component name exists
#
class WSPathExistingComponent(webservice.WSPathComponent):
def __init__(self, component):
webservice.WSPathComponent.__init__(self)
self._component = component
def __str__(self):
return "<component name>"
def extract(self, pc, data):
if pc == None:
return webservice.WSPathExtractResult(error="End of requested URI")
components = self._component.ctxt.get_kernel().get_all()
for component in components:
if component.get_name() == pc:
return webservice.WSPathExtractResult(pc)
e = "Unknown component '%s'" % pc
return webservice.WSPathExtractResult(error=e)
class componentws(Component):
"""Web service interface to component runtime"""
cstate = {}
cstate[0] = 'NOT_INSTALLED'
cstate[1] = 'DESCRIBED'
cstate[2] = 'LOADED'
cstate[3] = 'FACTORY_INSTANTIATED'
cstate[4] = 'INSTANTIATED'
cstate[5] = 'CONFIGURED'
cstate[6] = 'INSTALLED'
cstate[7] = 'ERROR'
def __init__(self, ctxt):
Component.__init__(self, ctxt)
def _get_nox_components(self, request, arg):
components = self.ctxt.get_kernel().get_all()
cdict = {}
cdict['identifier'] = 'name'
cdict['items'] = []
for component in components:
comp = {}
comp['name'] = component.get_name()
comp['version'] = version
comp['uptime'] = self.ctxt.get_kernel().uptime()
comp['state'] = componentws.cstate[component.get_state()]
comp['required_state'] = componentws.cstate[component.get_required_state()]
cdict['items'].append(comp)
return simplejson.dumps(cdict)
# not implemented yet
def _get_component_uptime(self,request,arg):
return simplejson.dumps("")
# not implemented yet
def _get_component_version(self,request,arg):
return simplejson.dumps("")
def _get_nox_component_status(self, request, arg):
components = self.ctxt.get_kernel().get_all()
cname = arg['<component name>']
for component in components:
if component.get_name() == cname:
cdict = {}
cdict['name'] = component.get_name()
cdict['state'] = component.get_state()
cdict['required_state'] = component.get_required_state()
return simplejson.dumps(cdict)
def install(self):
ws = self.resolve(str(webservice.webservice))
v1 = ws.get_version("1")
reg = v1.register_request
# /ws.v1/nox
noxpath = ( webservice.WSPathStaticString("nox"), )
# /ws.v1/nox/components
noxcomponentspath = noxpath + \
( webservice.WSPathStaticString("components"), )
reg(self._get_nox_components, "GET", noxcomponentspath,
"""Get list of nox components and their status""")
# /ws.v1/component/<component name>
noxcomponentnamepath = noxpath + \
( webservice.WSPathStaticString("component"),
WSPathExistingComponent(self))
# /ws.v1/component/<component name>/status
noxcomponentstatus = noxcomponentnamepath + \
( webservice.WSPathStaticString("status"), )
reg(self._get_nox_component_status, "GET", noxcomponentstatus,
"""Get status for given nox component""")
# /ws.v1/component/<component name>/version
# XXX Currently just return nox version
noxcomponentversion = noxcomponentnamepath + \
( webservice.WSPathStaticString("version"), )
reg(self._get_component_version, "GET", noxcomponentversion,
"""Get version for given nox component""")
# /ws.v1/component/<component name>/uptime
# XXX Currently just return nox uptime
noxcomponentuptime = noxcomponentnamepath + \
( webservice.WSPathStaticString("uptime"), )
reg(self._get_component_uptime, "GET", noxcomponentuptime,
"""Get uptime for given nox component""")
def getInterface(self):
return str(componentws)
def getFactory():
class Factory:
def instance(self, ctxt):
return componentws(ctxt)
return Factory()
| gpl-3.0 |
40223236/2015cd_midterm_1 | pybean.py | 562 | 8617 | #coding: utf-8
import sqlite3
from pkg_resources import parse_version
__version__ = "0.2.1"
__author__ = "Mickael Desfrenes"
__email__ = "desfrenes@gmail.com"
# Yen 2013.04.08, 將 Python2 的 .next() 改為 next(), 以便在 Python 3 中使用
class SQLiteWriter(object):
"""
In frozen mode (the default), the writer will not alter db schema.
Just add frozen=False to enable column creation (or just add False
as second parameter):
query_writer = SQLiteWriter(":memory:", False)
"""
def __init__(self, db_path=":memory:", frozen=True):
self.db = sqlite3.connect(db_path)
self.db.isolation_level = None
self.db.row_factory = sqlite3.Row
self.frozen = frozen
self.cursor = self.db.cursor()
self.cursor.execute("PRAGMA foreign_keys=ON;")
self.cursor.execute('PRAGMA encoding = "UTF-8";')
self.cursor.execute('BEGIN;')
def __del__(self):
self.db.close()
def replace(self, bean):
keys = []
values = []
write_operation = "replace"
if "id" not in bean.__dict__:
write_operation = "insert"
keys.append("id")
values.append(None)
self.__create_table(bean.__class__.__name__)
columns = self.__get_columns(bean.__class__.__name__)
for key in bean.__dict__:
keys.append(key)
if key not in columns:
self.__create_column(bean.__class__.__name__, key,
type(bean.__dict__[key]))
values.append(bean.__dict__[key])
sql = write_operation + " into " + bean.__class__.__name__ + "("
sql += ",".join(keys) + ") values ("
sql += ",".join(["?" for i in keys]) + ")"
self.cursor.execute(sql, values)
if write_operation == "insert":
bean.id = self.cursor.lastrowid
return bean.id
def __create_column(self, table, column, sqltype):
if self.frozen:
return
if sqltype in [float, int, bool]:
sqltype = "NUMERIC"
else:
sqltype = "TEXT"
sql = "alter table " + table + " add " + column + " " + sqltype
self.cursor.execute(sql)
def __get_columns(self, table):
columns = []
if self.frozen:
return columns
self.cursor.execute("PRAGMA table_info(" + table + ")")
for row in self.cursor:
columns.append(row["name"])
return columns
def __create_table(self, table):
if self.frozen:
return
sql = "create table if not exists " + table + "(id INTEGER PRIMARY KEY AUTOINCREMENT)"
self.cursor.execute(sql)
def get_rows(self, table_name, sql = "1", replace = None):
if replace is None : replace = []
self.__create_table(table_name)
sql = "SELECT * FROM " + table_name + " WHERE " + sql
try:
self.cursor.execute(sql, replace)
for row in self.cursor:
yield row
except sqlite3.OperationalError:
return
def get_count(self, table_name, sql="1", replace = None):
if replace is None : replace = []
self.__create_table(table_name)
sql = "SELECT count(*) AS cnt FROM " + table_name + " WHERE " + sql
try:
self.cursor.execute(sql, replace)
except sqlite3.OperationalError:
return 0
for row in self.cursor:
return row["cnt"]
def delete(self, bean):
self.__create_table(bean.__class__.__name__)
sql = "delete from " + bean.__class__.__name__ + " where id=?"
self.cursor.execute(sql,[bean.id])
def link(self, bean_a, bean_b):
self.replace(bean_a)
self.replace(bean_b)
table_a = bean_a.__class__.__name__
table_b = bean_b.__class__.__name__
assoc_table = self.__create_assoc_table(table_a, table_b)
sql = "replace into " + assoc_table + "(" + table_a + "_id," + table_b
sql += "_id) values(?,?)"
self.cursor.execute(sql,
[bean_a.id, bean_b.id])
def unlink(self, bean_a, bean_b):
table_a = bean_a.__class__.__name__
table_b = bean_b.__class__.__name__
assoc_table = self.__create_assoc_table(table_a, table_b)
sql = "delete from " + assoc_table + " where " + table_a
sql += "_id=? and " + table_b + "_id=?"
self.cursor.execute(sql,
[bean_a.id, bean_b.id])
def get_linked_rows(self, bean, table_name):
bean_table = bean.__class__.__name__
assoc_table = self.__create_assoc_table(bean_table, table_name)
sql = "select t.* from " + table_name + " t inner join " + assoc_table
sql += " a on a." + table_name + "_id = t.id where a."
sql += bean_table + "_id=?"
self.cursor.execute(sql,[bean.id])
for row in self.cursor:
yield row
def __create_assoc_table(self, table_a, table_b):
assoc_table = "_".join(sorted([table_a, table_b]))
if not self.frozen:
sql = "create table if not exists " + assoc_table + "("
sql+= table_a + "_id NOT NULL REFERENCES " + table_a + "(id) ON DELETE cascade,"
sql+= table_b + "_id NOT NULL REFERENCES " + table_b + "(id) ON DELETE cascade,"
sql+= " PRIMARY KEY (" + table_a + "_id," + table_b + "_id));"
self.cursor.execute(sql)
# no real support for foreign keys until sqlite3 v3.6.19
# so here's the hack
if cmp(parse_version(sqlite3.sqlite_version),parse_version("3.6.19")) < 0:
sql = "create trigger if not exists fk_" + table_a + "_" + assoc_table
sql+= " before delete on " + table_a
sql+= " for each row begin delete from " + assoc_table + " where " + table_a + "_id = OLD.id;end;"
self.cursor.execute(sql)
sql = "create trigger if not exists fk_" + table_b + "_" + assoc_table
sql+= " before delete on " + table_b
sql+= " for each row begin delete from " + assoc_table + " where " + table_b + "_id = OLD.id;end;"
self.cursor.execute(sql)
return assoc_table
def delete_all(self, table_name, sql = "1", replace = None):
if replace is None : replace = []
self.__create_table(table_name)
sql = "DELETE FROM " + table_name + " WHERE " + sql
try:
self.cursor.execute(sql, replace)
return True
except sqlite3.OperationalError:
return False
def commit(self):
self.db.commit()
class Store(object):
"""
A SQL writer should be passed to the constructor:
beans_save = Store(SQLiteWriter(":memory"), frozen=False)
"""
def __init__(self, SQLWriter):
self.writer = SQLWriter
def new(self, table_name):
new_object = type(table_name,(object,),{})()
return new_object
def save(self, bean):
self.writer.replace(bean)
def load(self, table_name, id):
for row in self.writer.get_rows(table_name, "id=?", [id]):
return self.row_to_object(table_name, row)
def count(self, table_name, sql = "1", replace=None):
return self.writer.get_count(table_name, sql, replace if replace is not None else [])
def find(self, table_name, sql = "1", replace=None):
for row in self.writer.get_rows(table_name, sql, replace if replace is not None else []):
yield self.row_to_object(table_name, row)
def find_one(self, table_name, sql = "1", replace=None):
try:
return next(self.find(table_name, sql, replace))
except StopIteration:
return None
def delete(self, bean):
self.writer.delete(bean)
def link(self, bean_a, bean_b):
self.writer.link(bean_a, bean_b)
def unlink(self, bean_a, bean_b):
self.writer.unlink(bean_a, bean_b)
def get_linked(self, bean, table_name):
for row in self.writer.get_linked_rows(bean, table_name):
yield self.row_to_object(table_name, row)
def delete_all(self, table_name, sql = "1", replace=None):
return self.writer.delete_all(table_name, sql, replace if replace is not None else [])
def row_to_object(self, table_name, row):
new_object = type(table_name,(object,),{})()
for key in row.keys():
new_object.__dict__[key] = row[key]
return new_object
def commit(self):
self.writer.commit()
| gpl-3.0 |
ksrajkumar/openerp-6.1 | openerp/pychart/afm/Helvetica_Light.py | 15 | 1514 | # -*- coding: utf-8 -*-
# AFM font Helvetica-Light (path: /usr/share/fonts/afms/adobe/phvl8a.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Helvetica-Light"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 278, 333, 278, 556, 556, 889, 667, 222, 333, 333, 389, 660, 278, 333, 278, 278, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 278, 278, 660, 660, 660, 500, 800, 667, 667, 722, 722, 611, 556, 778, 722, 278, 500, 667, 556, 833, 722, 778, 611, 778, 667, 611, 556, 722, 611, 889, 611, 611, 611, 333, 278, 333, 660, 500, 222, 556, 611, 556, 611, 556, 278, 611, 556, 222, 222, 500, 222, 833, 556, 556, 611, 611, 333, 500, 278, 556, 500, 722, 500, 500, 500, 333, 222, 333, 660, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 333, 556, 556, 167, 556, 556, 556, 556, 222, 389, 556, 389, 389, 500, 500, 500, 500, 556, 556, 278, 500, 650, 500, 222, 389, 389, 556, 1000, 1000, 500, 500, 500, 333, 333, 333, 333, 333, 333, 333, 333, 500, 333, 333, 500, 333, 333, 333, 1000, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 1000, 500, 334, 500, 500, 500, 500, 556, 778, 1000, 334, 500, 500, 500, 500, 500, 889, 500, 500, 500, 222, 500, 500, 222, 556, 944, 500, )
| agpl-3.0 |
nitinitprof/odoo | openerp/pooler.py | 374 | 2561 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Functions kept for backward compatibility.
They are simple wrappers around a global RegistryManager methods.
"""
import logging
import openerp.conf.deprecation
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False):
"""Create and return a database connection and a newly initialized registry."""
assert openerp.conf.deprecation.openerp_pooler
_logger.warning('openerp.pooler.get_db_and_pool() is deprecated.')
registry = RegistryManager.get(db_name, force_demo, status, update_module)
return registry._db, registry
def restart_pool(db_name, force_demo=False, status=None, update_module=False):
"""Delete an existing registry and return a database connection and a newly initialized registry."""
_logger.warning('openerp.pooler.restart_pool() is deprecated.')
assert openerp.conf.deprecation.openerp_pooler
registry = RegistryManager.new(db_name, force_demo, status, update_module)
return registry._db, registry
def get_db(db_name):
"""Return a database connection. The corresponding registry is initialized."""
assert openerp.conf.deprecation.openerp_pooler
return get_db_and_pool(db_name)[0]
def get_pool(db_name, force_demo=False, status=None, update_module=False):
"""Return a model registry."""
assert openerp.conf.deprecation.openerp_pooler
return get_db_and_pool(db_name, force_demo, status, update_module)[1]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ShassAro/ShassAro | DockerAdmin/dockerVirtualEnv/lib/python2.7/site-packages/django/core/handlers/base.py | 49 | 10871 | from __future__ import unicode_literals
import logging
import sys
import types
from django import http
from django.conf import settings
from django.core import urlresolvers
from django.core import signals
from django.core.exceptions import MiddlewareNotUsed, PermissionDenied, SuspiciousOperation
from django.db import connections, transaction
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.utils import six
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._template_response_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if (db.settings_dict['ATOMIC_REQUESTS']
and db.alias not in non_atomic_requests):
view = transaction.atomic(using=db.alias)(view)
return view
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object. It returned None instead."
% (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(
sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
except SuspiciousOperation as e:
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
security_logger.error(
force_text(e),
extra={
'status_code': 400,
'request': request
})
try:
callback, param_dict = resolver.resolve400()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(
sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
response._closable_objects.append(request)
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
| gpl-2.0 |
RO-ny9/python-for-android | python3-alpha/python3-src/Lib/test/test_xml_etree_c.py | 49 | 1980 | # xml.etree test for cElementTree
from test import support
from test.support import precisionbigmemtest, _2G
import unittest
cET = support.import_module('xml.etree.cElementTree')
# cElementTree specific tests
def sanity():
r"""
Import sanity.
>>> from xml.etree import cElementTree
Issue #6697.
>>> e = cElementTree.Element('a')
>>> getattr(e, '\uD800') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
UnicodeEncodeError: ...
>>> p = cElementTree.XMLParser()
>>> p.version.split()[0]
'Expat'
>>> getattr(p, '\uD800')
Traceback (most recent call last):
...
AttributeError: 'XMLParser' object has no attribute '\ud800'
"""
class MiscTests(unittest.TestCase):
# Issue #8651.
@support.precisionbigmemtest(size=support._2G + 100, memuse=1)
def test_length_overflow(self, size):
if size < support._2G + 100:
self.skipTest("not enough free memory, need at least 2 GB")
data = b'x' * size
parser = cET.XMLParser()
try:
self.assertRaises(OverflowError, parser.feed, data)
finally:
data = None
def test_main():
from test import test_xml_etree, test_xml_etree_c
# Run the tests specific to the C implementation
support.run_doctest(test_xml_etree_c, verbosity=True)
support.run_unittest(MiscTests)
# Assign the C implementation before running the doctests
# Patch the __name__, to prevent confusion with the pure Python test
pyET = test_xml_etree.ET
py__name__ = test_xml_etree.__name__
test_xml_etree.ET = cET
if __name__ != '__main__':
test_xml_etree.__name__ = __name__
try:
# Run the same test suite as xml.etree.ElementTree
test_xml_etree.test_main(module_name='xml.etree.cElementTree')
finally:
test_xml_etree.ET = pyET
test_xml_etree.__name__ = py__name__
if __name__ == '__main__':
test_main()
| apache-2.0 |
hkawasaki/kawasaki-aio8-1 | lms/djangoapps/instructor/views/api.py | 3 | 49296 | """
Instructor Dashboard API views
JSON views which the instructor dashboard requests.
Many of these GETs may become PUTs in the future.
"""
import json
import logging
import re
import requests
from django.conf import settings
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.utils.translation import ugettext as _
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.utils.html import strip_tags
from util.json_request import JsonResponse
from courseware.access import has_access
from courseware.courses import get_course_with_access, get_course_by_id
from django.contrib.auth.models import User
from django_comment_client.utils import has_forum_access
from django_comment_common.models import (
Role,
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
)
from courseware.models import StudentModule
from student.models import unique_id_for_user, UserProfile, UserStanding
import instructor_task.api
from instructor_task.api_helper import AlreadyRunningError
from instructor_task.views import get_task_completion_info
from instructor_task.models import ReportStore
import instructor.enrollment as enrollment
from instructor.enrollment import (
enroll_email,
get_email_params,
send_beta_role_email,
unenroll_email
)
from instructor.access import list_with_level, allow_access, revoke_access, update_forum_role
import analytics.basic
import analytics.distributions
import analytics.csvs
import csv
# Submissions is a Django app that is currently installed
# from the edx-ora2 repo, although it will likely move in the future.
from submissions import api as sub_api
from bulk_email.models import CourseEmail
from survey.models import SurveySubmission
from .tools import (
dump_student_extensions,
dump_module_extensions,
find_unit,
get_student_from_identifier,
handle_dashboard_error,
parse_datetime,
set_due_date_extension,
strip_if_string,
bulk_email_is_enabled_for_course,
)
from xmodule.modulestore import Location
log = logging.getLogger(__name__)
def common_exceptions_400(func):
"""
Catches common exceptions and renders matching 400 errors.
(decorator without arguments)
"""
def wrapped(request, *args, **kwargs): # pylint: disable=C0111
use_json = (request.is_ajax() or
request.META.get("HTTP_ACCEPT", "").startswith("application/json"))
try:
return func(request, *args, **kwargs)
except User.DoesNotExist:
message = _("User does not exist.")
if use_json:
return JsonResponse({"error": message}, 400)
else:
return HttpResponseBadRequest(message)
except AlreadyRunningError:
message = _("Task is already running.")
if use_json:
return JsonResponse({"error": message}, 400)
else:
return HttpResponseBadRequest(message)
return wrapped
def require_query_params(*args, **kwargs):
"""
Checks for required paremters or renders a 400 error.
(decorator with arguments)
`args` is a *list of required GET parameter names.
`kwargs` is a **dict of required GET parameter names
to string explanations of the parameter
"""
required_params = []
required_params += [(arg, None) for arg in args]
required_params += [(key, kwargs[key]) for key in kwargs]
# required_params = e.g. [('action', 'enroll or unenroll'), ['emails', None]]
def decorator(func): # pylint: disable=C0111
def wrapped(*args, **kwargs): # pylint: disable=C0111
request = args[0]
error_response_data = {
'error': 'Missing required query parameter(s)',
'parameters': [],
'info': {},
}
for (param, extra) in required_params:
default = object()
if request.GET.get(param, default) == default:
error_response_data['parameters'].append(param)
error_response_data['info'][param] = extra
if len(error_response_data['parameters']) > 0:
return JsonResponse(error_response_data, status=400)
else:
return func(*args, **kwargs)
return wrapped
return decorator
def require_post_params(*args, **kwargs):
"""
Checks for required parameters or renders a 400 error.
(decorator with arguments)
Functions like 'require_query_params', but checks for
POST parameters rather than GET parameters.
"""
required_params = []
required_params += [(arg, None) for arg in args]
required_params += [(key, kwargs[key]) for key in kwargs]
# required_params = e.g. [('action', 'enroll or unenroll'), ['emails', None]]
def decorator(func): # pylint: disable=C0111
def wrapped(*args, **kwargs): # pylint: disable=C0111
request = args[0]
error_response_data = {
'error': 'Missing required query parameter(s)',
'parameters': [],
'info': {},
}
for (param, extra) in required_params:
default = object()
if request.POST.get(param, default) == default:
error_response_data['parameters'].append(param)
error_response_data['info'][param] = extra
if len(error_response_data['parameters']) > 0:
return JsonResponse(error_response_data, status=400)
else:
return func(*args, **kwargs)
return wrapped
return decorator
def require_level(level):
"""
Decorator with argument that requires an access level of the requesting
user. If the requirement is not satisfied, returns an
HttpResponseForbidden (403).
Assumes that request is in args[0].
Assumes that course_id is in kwargs['course_id'].
`level` is in ['instructor', 'staff']
if `level` is 'staff', instructors will also be allowed, even
if they are not in the staff group.
"""
if level not in ['instructor', 'staff']:
raise ValueError("unrecognized level '{}'".format(level))
def decorator(func): # pylint: disable=C0111
def wrapped(*args, **kwargs): # pylint: disable=C0111
request = args[0]
course = get_course_by_id(kwargs['course_id'])
if has_access(request.user, course, level):
return func(*args, **kwargs)
else:
return HttpResponseForbidden()
return wrapped
return decorator
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params(action="enroll or unenroll", identifiers="stringified list of emails and/or usernames")
def students_update_enrollment(request, course_id):
"""
Enroll or unenroll students by email.
Requires staff access.
Query Parameters:
- action in ['enroll', 'unenroll']
- identifiers is string containing a list of emails and/or usernames separated by anything split_input_list can handle.
- auto_enroll is a boolean (defaults to false)
If auto_enroll is false, students will be allowed to enroll.
If auto_enroll is true, students will be enrolled as soon as they register.
- email_students is a boolean (defaults to false)
If email_students is true, students will be sent email notification
If email_students is false, students will not be sent email notification
Returns an analog to this JSON structure: {
"action": "enroll",
"auto_enroll": false,
"results": [
{
"email": "testemail@test.org",
"before": {
"enrollment": false,
"auto_enroll": false,
"user": true,
"allowed": false
},
"after": {
"enrollment": true,
"auto_enroll": false,
"user": true,
"allowed": false
}
}
]
}
"""
action = request.GET.get('action')
identifiers_raw = request.GET.get('identifiers')
identifiers = _split_input_list(identifiers_raw)
auto_enroll = request.GET.get('auto_enroll') in ['true', 'True', True]
email_students = request.GET.get('email_students') in ['true', 'True', True]
email_params = {}
if email_students:
course = get_course_by_id(course_id)
email_params = get_email_params(course, auto_enroll)
results = []
for identifier in identifiers:
# First try to get a user object from the identifer
user = None
email = None
try:
user = get_student_from_identifier(identifier)
except User.DoesNotExist:
email = identifier
else:
email = user.email
try:
# Use django.core.validators.validate_email to check email address
# validity (obviously, cannot check if email actually /exists/,
# simply that it is plausibly valid)
validate_email(email) # Raises ValidationError if invalid
if action == 'enroll':
before, after = enroll_email(course_id, email, auto_enroll, email_students, email_params)
elif action == 'unenroll':
before, after = unenroll_email(course_id, email, email_students, email_params)
else:
return HttpResponseBadRequest(strip_tags(
"Unrecognized action '{}'".format(action)
))
except ValidationError:
# Flag this email as an error if invalid, but continue checking
# the remaining in the list
results.append({
'identifier': identifier,
'invalidIdentifier': True,
})
except Exception as exc: # pylint: disable=W0703
# catch and log any exceptions
# so that one error doesn't cause a 500.
log.exception("Error while #{}ing student")
log.exception(exc)
results.append({
'identifier': identifier,
'error': True,
})
else:
results.append({
'identifier': identifier,
'before': before.to_dict(),
'after': after.to_dict(),
})
response_payload = {
'action': action,
'results': results,
'auto_enroll': auto_enroll,
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('instructor')
@common_exceptions_400
@require_query_params(
identifiers="stringified list of emails and/or usernames",
action="add or remove",
)
def bulk_beta_modify_access(request, course_id):
"""
Enroll or unenroll users in beta testing program.
Query parameters:
- identifiers is string containing a list of emails and/or usernames separated by
anything split_input_list can handle.
- action is one of ['add', 'remove']
"""
action = request.GET.get('action')
identifiers_raw = request.GET.get('identifiers')
identifiers = _split_input_list(identifiers_raw)
email_students = request.GET.get('email_students') in ['true', 'True', True]
auto_enroll = request.GET.get('auto_enroll') in ['true', 'True', True]
results = []
rolename = 'beta'
course = get_course_by_id(course_id)
email_params = {}
if email_students:
email_params = get_email_params(course, auto_enroll=auto_enroll)
for identifier in identifiers:
try:
error = False
user_does_not_exist = False
user = get_student_from_identifier(identifier)
if action == 'add':
allow_access(course, user, rolename)
elif action == 'remove':
revoke_access(course, user, rolename)
else:
return HttpResponseBadRequest(strip_tags(
"Unrecognized action '{}'".format(action)
))
except User.DoesNotExist:
error = True
user_does_not_exist = True
# catch and log any unexpected exceptions
# so that one error doesn't cause a 500.
except Exception as exc: # pylint: disable=broad-except
log.exception("Error while #{}ing student")
log.exception(exc)
error = True
else:
# If no exception thrown, see if we should send an email
if email_students:
send_beta_role_email(action, user, email_params)
# See if we should autoenroll the student
if auto_enroll:
# Check if student is already enrolled
if not CourseEnrollment.is_enrolled(user, course_id):
CourseEnrollment.enroll(user, course_id)
finally:
# Tabulate the action result of this email address
results.append({
'identifier': identifier,
'error': error,
'userDoesNotExist': user_does_not_exist
})
response_payload = {
'action': action,
'results': results,
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('instructor')
@common_exceptions_400
@require_query_params(
unique_student_identifier="email or username of user to change access",
rolename="'instructor', 'staff', or 'beta'",
action="'allow' or 'revoke'"
)
def modify_access(request, course_id):
"""
Modify staff/instructor access of other user.
Requires instructor access.
NOTE: instructors cannot remove their own instructor access.
Query parameters:
unique_student_identifer is the target user's username or email
rolename is one of ['instructor', 'staff', 'beta']
action is one of ['allow', 'revoke']
"""
course = get_course_with_access(
request.user, course_id, 'instructor', depth=None
)
try:
user = get_student_from_identifier(request.GET.get('unique_student_identifier'))
except User.DoesNotExist:
response_payload = {
'unique_student_identifier': request.GET.get('unique_student_identifier'),
'userDoesNotExist': True,
}
return JsonResponse(response_payload)
# Check that user is active, because add_users
# in common/djangoapps/student/roles.py fails
# silently when we try to add an inactive user.
if not user.is_active:
response_payload = {
'unique_student_identifier': user.username,
'inactiveUser': True,
}
return JsonResponse(response_payload)
rolename = request.GET.get('rolename')
action = request.GET.get('action')
if not rolename in ['instructor', 'staff', 'beta']:
return HttpResponseBadRequest(strip_tags(
"unknown rolename '{}'".format(rolename)
))
# disallow instructors from removing their own instructor access.
if rolename == 'instructor' and user == request.user and action != 'allow':
response_payload = {
'unique_student_identifier': user.username,
'rolename': rolename,
'action': action,
'removingSelfAsInstructor': True,
}
return JsonResponse(response_payload)
if action == 'allow':
allow_access(course, user, rolename)
elif action == 'revoke':
revoke_access(course, user, rolename)
else:
return HttpResponseBadRequest(strip_tags(
"unrecognized action '{}'".format(action)
))
response_payload = {
'unique_student_identifier': user.username,
'rolename': rolename,
'action': action,
'success': 'yes',
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('instructor')
@require_query_params(rolename="'instructor', 'staff', or 'beta'")
def list_course_role_members(request, course_id):
"""
List instructors and staff.
Requires instructor access.
rolename is one of ['instructor', 'staff', 'beta']
Returns JSON of the form {
"course_id": "some/course/id",
"staff": [
{
"username": "staff1",
"email": "staff1@example.org",
"first_name": "Joe",
"last_name": "Shmoe",
}
]
}
"""
course = get_course_with_access(
request.user, course_id, 'instructor', depth=None
)
rolename = request.GET.get('rolename')
if not rolename in ['instructor', 'staff', 'beta']:
return HttpResponseBadRequest()
def extract_user_info(user):
""" convert user into dicts for json view """
return {
'username': user.username,
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
}
response_payload = {
'course_id': course_id,
rolename: map(extract_user_info, list_with_level(
course, rolename
)),
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def get_grading_config(request, course_id):
"""
Respond with json which contains a html formatted grade summary.
"""
course = get_course_with_access(
request.user, course_id, 'staff', depth=None
)
grading_config_summary = analytics.basic.dump_grading_context(course)
response_payload = {
'course_id': course_id,
'grading_config_summary': grading_config_summary,
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def get_students_features(request, course_id, csv=False): # pylint: disable=W0613, W0621
"""
Respond with json which contains a summary of all enrolled students profile information.
Responds with JSON
{"students": [{-student-info-}, ...]}
TO DO accept requests for different attribute sets.
"""
available_features = analytics.basic.AVAILABLE_FEATURES
query_features = [
'username', 'name', 'email', 'language', 'location', 'year_of_birth',
'gender', 'level_of_education', 'mailing_address', 'goals'
]
student_data = analytics.basic.enrolled_students_features(course_id, query_features)
# Provide human-friendly and translatable names for these features. These names
# will be displayed in the table generated in data_download.coffee. It is not (yet)
# used as the header row in the CSV, but could be in the future.
query_features_names = {
'username': _('Username'),
'name': _('Name'),
'email': _('Email'),
'language': _('Language'),
'location': _('Location'),
'year_of_birth': _('Birth Year'),
'gender': _('Gender'),
'level_of_education': _('Level of Education'),
'mailing_address': _('Mailing Address'),
'goals': _('Goals'),
}
if not csv:
response_payload = {
'course_id': course_id,
'students': student_data,
'students_count': len(student_data),
'queried_features': query_features,
'feature_names': query_features_names,
'available_features': available_features,
}
return JsonResponse(response_payload)
else:
header, datarows = analytics.csvs.format_dictlist(student_data, query_features)
return analytics.csvs.create_csv_response("enrolled_profiles.csv", header, datarows)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def get_anon_ids(request, course_id): # pylint: disable=W0613
"""
Respond with 2-column CSV output of user-id, anonymized-user-id
"""
# TODO: the User.objects query and CSV generation here could be
# centralized into analytics. Currently analytics has similar functionality
# but not quite what's needed.
def csv_response(filename, header, rows):
"""Returns a CSV http response for the given header and rows (excel/utf-8)."""
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)
writer = csv.writer(response, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
# In practice, there should not be non-ascii data in this query,
# but trying to do the right thing anyway.
encoded = [unicode(s).encode('utf-8') for s in header]
writer.writerow(encoded)
for row in rows:
encoded = [unicode(s).encode('utf-8') for s in row]
writer.writerow(encoded)
return response
students = User.objects.filter(
courseenrollment__course_id=course_id,
).order_by('id')
header = ['User ID', 'Anonymized user ID']
rows = [[s.id, unique_id_for_user(s)] for s in students]
return csv_response(course_id.replace('/', '-') + '-anon-ids.csv', header, rows)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def get_distribution(request, course_id):
"""
Respond with json of the distribution of students over selected features which have choices.
Ask for a feature through the `feature` query parameter.
If no `feature` is supplied, will return response with an
empty response['feature_results'] object.
A list of available will be available in the response['available_features']
"""
feature = request.GET.get('feature')
# alternate notations of None
if feature in (None, 'null', ''):
feature = None
else:
feature = str(feature)
available_features = analytics.distributions.AVAILABLE_PROFILE_FEATURES
# allow None so that requests for no feature can list available features
if not feature in available_features + (None,):
return HttpResponseBadRequest(strip_tags(
"feature '{}' not available.".format(feature)
))
response_payload = {
'course_id': course_id,
'queried_feature': feature,
'available_features': available_features,
'feature_display_names': analytics.distributions.DISPLAY_NAMES,
}
p_dist = None
if not feature is None:
p_dist = analytics.distributions.profile_distribution(course_id, feature)
response_payload['feature_results'] = {
'feature': p_dist.feature,
'feature_display_name': p_dist.feature_display_name,
'data': p_dist.data,
'type': p_dist.type,
}
if p_dist.type == 'EASY_CHOICE':
response_payload['feature_results']['choices_display_names'] = p_dist.choices_display_names
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@common_exceptions_400
@require_level('staff')
@require_query_params(
unique_student_identifier="email or username of student for whom to get progress url"
)
def get_student_progress_url(request, course_id):
"""
Get the progress url of a student.
Limited to staff access.
Takes query paremeter unique_student_identifier and if the student exists
returns e.g. {
'progress_url': '/../...'
}
"""
user = get_student_from_identifier(request.GET.get('unique_student_identifier'))
progress_url = reverse('student_progress', kwargs={'course_id': course_id, 'student_id': user.id})
response_payload = {
'course_id': course_id,
'progress_url': progress_url,
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params(
problem_to_reset="problem urlname to reset"
)
@common_exceptions_400
def reset_student_attempts(request, course_id):
"""
Resets a students attempts counter or starts a task to reset all students
attempts counters. Optionally deletes student state for a problem. Limited
to staff access. Some sub-methods limited to instructor access.
Takes some of the following query paremeters
- problem_to_reset is a urlname of a problem
- unique_student_identifier is an email or username
- all_students is a boolean
requires instructor access
mutually exclusive with delete_module
mutually exclusive with delete_module
- delete_module is a boolean
requires instructor access
mutually exclusive with all_students
"""
course = get_course_with_access(
request.user, course_id, 'staff', depth=None
)
problem_to_reset = strip_if_string(request.GET.get('problem_to_reset'))
student_identifier = request.GET.get('unique_student_identifier', None)
student = None
if student_identifier is not None:
student = get_student_from_identifier(student_identifier)
all_students = request.GET.get('all_students', False) in ['true', 'True', True]
delete_module = request.GET.get('delete_module', False) in ['true', 'True', True]
# parameter combinations
if all_students and student:
return HttpResponseBadRequest(
"all_students and unique_student_identifier are mutually exclusive."
)
if all_students and delete_module:
return HttpResponseBadRequest(
"all_students and delete_module are mutually exclusive."
)
# instructor authorization
if all_students or delete_module:
if not has_access(request.user, course, 'instructor'):
return HttpResponseForbidden("Requires instructor access.")
module_state_key = _msk_from_problem_urlname(course_id, problem_to_reset)
response_payload = {}
response_payload['problem_to_reset'] = problem_to_reset
if student:
try:
enrollment.reset_student_attempts(course_id, student, module_state_key, delete_module=delete_module)
except StudentModule.DoesNotExist:
return HttpResponseBadRequest(_("Module does not exist."))
except sub_api.SubmissionError:
# Trust the submissions API to log the error
error_msg = _("An error occurred while deleting the score.")
return HttpResponse(error_msg, status=500)
response_payload['student'] = student_identifier
elif all_students:
instructor_task.api.submit_reset_problem_attempts_for_all_students(request, course_id, module_state_key)
response_payload['task'] = 'created'
response_payload['student'] = 'All Students'
else:
return HttpResponseBadRequest()
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('instructor')
@require_query_params(problem_to_reset="problem urlname to reset")
@common_exceptions_400
def rescore_problem(request, course_id):
"""
Starts a background process a students attempts counter. Optionally deletes student state for a problem.
Limited to instructor access.
Takes either of the following query paremeters
- problem_to_reset is a urlname of a problem
- unique_student_identifier is an email or username
- all_students is a boolean
all_students and unique_student_identifier cannot both be present.
"""
problem_to_reset = strip_if_string(request.GET.get('problem_to_reset'))
student_identifier = request.GET.get('unique_student_identifier', None)
student = None
if student_identifier is not None:
student = get_student_from_identifier(student_identifier)
all_students = request.GET.get('all_students') in ['true', 'True', True]
if not (problem_to_reset and (all_students or student)):
return HttpResponseBadRequest("Missing query parameters.")
if all_students and student:
return HttpResponseBadRequest(
"Cannot rescore with all_students and unique_student_identifier."
)
module_state_key = _msk_from_problem_urlname(course_id, problem_to_reset)
response_payload = {}
response_payload['problem_to_reset'] = problem_to_reset
if student:
response_payload['student'] = student_identifier
instructor_task.api.submit_rescore_problem_for_student(request, course_id, module_state_key, student)
response_payload['task'] = 'created'
elif all_students:
instructor_task.api.submit_rescore_problem_for_all_students(request, course_id, module_state_key)
response_payload['task'] = 'created'
else:
return HttpResponseBadRequest()
return JsonResponse(response_payload)
def extract_task_features(task):
"""
Convert task to dict for json rendering.
Expects tasks have the following features:
* task_type (str, type of task)
* task_input (dict, input(s) to the task)
* task_id (str, celery id of the task)
* requester (str, username who submitted the task)
* task_state (str, state of task eg PROGRESS, COMPLETED)
* created (datetime, when the task was completed)
* task_output (optional)
"""
# Pull out information from the task
features = ['task_type', 'task_input', 'task_id', 'requester', 'task_state']
task_feature_dict = {feature: str(getattr(task, feature)) for feature in features}
# Some information (created, duration, status, task message) require additional formatting
task_feature_dict['created'] = task.created.isoformat()
# Get duration info, if known
duration_sec = 'unknown'
if hasattr(task, 'task_output') and task.task_output is not None:
try:
task_output = json.loads(task.task_output)
except ValueError:
log.error("Could not parse task output as valid json; task output: %s", task.task_output)
else:
if 'duration_ms' in task_output:
duration_sec = int(task_output['duration_ms'] / 1000.0)
task_feature_dict['duration_sec'] = duration_sec
# Get progress status message & success information
success, task_message = get_task_completion_info(task)
status = _("Complete") if success else _("Incomplete")
task_feature_dict['status'] = status
task_feature_dict['task_message'] = task_message
return task_feature_dict
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def list_background_email_tasks(request, course_id): # pylint: disable=unused-argument
"""
List background email tasks.
"""
task_type = 'bulk_course_email'
# Specifying for the history of a single task type
tasks = instructor_task.api.get_instructor_task_history(course_id, task_type=task_type)
response_payload = {
'tasks': map(extract_task_features, tasks),
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def list_instructor_tasks(request, course_id):
"""
List instructor tasks.
Takes optional query paremeters.
- With no arguments, lists running tasks.
- `problem_urlname` lists task history for problem
- `problem_urlname` and `unique_student_identifier` lists task
history for problem AND student (intersection)
"""
problem_urlname = strip_if_string(request.GET.get('problem_urlname', False))
student = request.GET.get('unique_student_identifier', None)
if student is not None:
student = get_student_from_identifier(student)
if student and not problem_urlname:
return HttpResponseBadRequest(
"unique_student_identifier must accompany problem_urlname"
)
if problem_urlname:
module_state_key = _msk_from_problem_urlname(course_id, problem_urlname)
if student:
# Specifying for a single student's history on this problem
tasks = instructor_task.api.get_instructor_task_history(course_id, module_state_key, student)
else:
# Specifying for single problem's history
tasks = instructor_task.api.get_instructor_task_history(course_id, module_state_key)
else:
# If no problem or student, just get currently running tasks
tasks = instructor_task.api.get_running_instructor_tasks(course_id)
response_payload = {
'tasks': map(extract_task_features, tasks),
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def list_report_downloads(_request, course_id):
"""
List grade CSV files that are available for download for this course.
"""
report_store = ReportStore.from_config()
response_payload = {
'downloads': [
dict(name=name, url=url, link='<a href="{}">{}</a>'.format(url, name))
for name, url in report_store.links_for(course_id)
]
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def calculate_grades_csv(request, course_id):
"""
AlreadyRunningError is raised if the course's grades are already being updated.
"""
try:
instructor_task.api.submit_calculate_grades_csv(request, course_id)
success_status = _("Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section.")
return JsonResponse({"status": success_status})
except AlreadyRunningError:
already_running_status = _("A grade report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below.")
return JsonResponse({
"status": already_running_status
})
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params('rolename')
def list_forum_members(request, course_id):
"""
Lists forum members of a certain rolename.
Limited to staff access.
The requesting user must be at least staff.
Staff forum admins can access all roles EXCEPT for FORUM_ROLE_ADMINISTRATOR
which is limited to instructors.
Takes query parameter `rolename`.
"""
course = get_course_by_id(course_id)
has_instructor_access = has_access(request.user, course, 'instructor')
has_forum_admin = has_forum_access(
request.user, course_id, FORUM_ROLE_ADMINISTRATOR
)
rolename = request.GET.get('rolename')
# default roles require either (staff & forum admin) or (instructor)
if not (has_forum_admin or has_instructor_access):
return HttpResponseBadRequest(
"Operation requires staff & forum admin or instructor access"
)
# EXCEPT FORUM_ROLE_ADMINISTRATOR requires (instructor)
if rolename == FORUM_ROLE_ADMINISTRATOR and not has_instructor_access:
return HttpResponseBadRequest("Operation requires instructor access.")
# filter out unsupported for roles
if not rolename in [FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA]:
return HttpResponseBadRequest(strip_tags(
"Unrecognized rolename '{}'.".format(rolename)
))
try:
role = Role.objects.get(name=rolename, course_id=course_id)
users = role.users.all().order_by('username')
except Role.DoesNotExist:
users = []
def extract_user_info(user):
""" Convert user to dict for json rendering. """
return {
'username': user.username,
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
}
response_payload = {
'course_id': course_id,
rolename: map(extract_user_info, users),
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_post_params(send_to="sending to whom", subject="subject line", message="message text")
def send_email(request, course_id):
"""
Send an email to self, staff, or everyone involved in a course.
Query Parameters:
- 'send_to' specifies what group the email should be sent to
Options are defined by the CourseEmail model in
lms/djangoapps/bulk_email/models.py
- 'subject' specifies email's subject
- 'message' specifies email's content
"""
if not bulk_email_is_enabled_for_course(course_id):
return HttpResponseForbidden("Email is not enabled for this course.")
send_to = request.POST.get("send_to")
subject = request.POST.get("subject")
message = request.POST.get("message")
# Create the CourseEmail object. This is saved immediately, so that
# any transaction that has been pending up to this point will also be
# committed.
email = CourseEmail.create(course_id, request.user, send_to, subject, message)
# Submit the task, so that the correct InstructorTask object gets created (for monitoring purposes)
instructor_task.api.submit_bulk_course_email(request, course_id, email.id) # pylint: disable=E1101
response_payload = {
'course_id': course_id,
'success': True,
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params(
unique_student_identifier="email or username of user to change access",
rolename="the forum role",
action="'allow' or 'revoke'",
)
@common_exceptions_400
def update_forum_role_membership(request, course_id):
"""
Modify user's forum role.
The requesting user must be at least staff.
Staff forum admins can access all roles EXCEPT for FORUM_ROLE_ADMINISTRATOR
which is limited to instructors.
No one can revoke an instructors FORUM_ROLE_ADMINISTRATOR status.
Query parameters:
- `email` is the target users email
- `rolename` is one of [FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA]
- `action` is one of ['allow', 'revoke']
"""
course = get_course_by_id(course_id)
has_instructor_access = has_access(request.user, course, 'instructor')
has_forum_admin = has_forum_access(
request.user, course_id, FORUM_ROLE_ADMINISTRATOR
)
unique_student_identifier = request.GET.get('unique_student_identifier')
rolename = request.GET.get('rolename')
action = request.GET.get('action')
# default roles require either (staff & forum admin) or (instructor)
if not (has_forum_admin or has_instructor_access):
return HttpResponseBadRequest(
"Operation requires staff & forum admin or instructor access"
)
# EXCEPT FORUM_ROLE_ADMINISTRATOR requires (instructor)
if rolename == FORUM_ROLE_ADMINISTRATOR and not has_instructor_access:
return HttpResponseBadRequest("Operation requires instructor access.")
if not rolename in [FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA]:
return HttpResponseBadRequest(strip_tags(
"Unrecognized rolename '{}'.".format(rolename)
))
user = get_student_from_identifier(unique_student_identifier)
target_is_instructor = has_access(user, course, 'instructor')
# cannot revoke instructor
if target_is_instructor and action == 'revoke' and rolename == FORUM_ROLE_ADMINISTRATOR:
return HttpResponseBadRequest("Cannot revoke instructor forum admin privileges.")
try:
update_forum_role(course_id, user, rolename, action)
except Role.DoesNotExist:
return HttpResponseBadRequest("Role does not exist.")
response_payload = {
'course_id': course_id,
'action': action,
}
return JsonResponse(response_payload)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params(
aname="name of analytic to query",
)
@common_exceptions_400
def proxy_legacy_analytics(request, course_id):
"""
Proxies to the analytics cron job server.
`aname` is a query parameter specifying which analytic to query.
"""
analytics_name = request.GET.get('aname')
# abort if misconfigured
if not (hasattr(settings, 'ANALYTICS_SERVER_URL') and hasattr(settings, 'ANALYTICS_API_KEY')):
return HttpResponse("Analytics service not configured.", status=501)
url = "{}get?aname={}&course_id={}&apikey={}".format(
settings.ANALYTICS_SERVER_URL,
analytics_name,
course_id,
settings.ANALYTICS_API_KEY,
)
try:
res = requests.get(url)
except Exception: # pylint: disable=broad-except
log.exception("Error requesting from analytics server at %s", url)
return HttpResponse("Error requesting from analytics server.", status=500)
if res.status_code is 200:
# return the successful request content
return HttpResponse(res.content, content_type="application/json")
elif res.status_code is 404:
# forward the 404 and content
return HttpResponse(res.content, content_type="application/json", status=404)
else:
# 500 on all other unexpected status codes.
log.error(
"Error fetching {}, code: {}, msg: {}".format(
url, res.status_code, res.content
)
)
return HttpResponse(
"Error from analytics server ({}).".format(res.status_code),
status=500
)
def _display_unit(unit):
"""
Gets string for displaying unit to user.
"""
name = getattr(unit, 'display_name', None)
if name:
return u'{0} ({1})'.format(name, unit.location.url())
else:
return unit.location.url()
@handle_dashboard_error
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params('student', 'url', 'due_datetime')
def change_due_date(request, course_id):
"""
Grants a due date extension to a student for a particular unit.
"""
course = get_course_by_id(course_id)
student = get_student_from_identifier(request.GET.get('student'))
unit = find_unit(course, request.GET.get('url'))
due_date = parse_datetime(request.GET.get('due_datetime'))
set_due_date_extension(course, unit, student, due_date)
return JsonResponse(_(
'Successfully changed due date for student {0} for {1} '
'to {2}').format(student.profile.name, _display_unit(unit),
due_date.strftime('%Y-%m-%d %H:%M')))
@handle_dashboard_error
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params('student', 'url')
def reset_due_date(request, course_id):
"""
Rescinds a due date extension for a student on a particular unit.
"""
course = get_course_by_id(course_id)
student = get_student_from_identifier(request.GET.get('student'))
unit = find_unit(course, request.GET.get('url'))
set_due_date_extension(course, unit, student, None)
return JsonResponse(_(
'Successfully reset due date for student {0} for {1} '
'to {2}').format(student.profile.name, _display_unit(unit),
unit.due.strftime('%Y-%m-%d %H:%M')))
@handle_dashboard_error
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params('url')
def show_unit_extensions(request, course_id):
"""
Shows all of the students which have due date extensions for the given unit.
"""
course = get_course_by_id(course_id)
unit = find_unit(course, request.GET.get('url'))
return JsonResponse(dump_module_extensions(course, unit))
@handle_dashboard_error
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
@require_query_params('student')
def show_student_extensions(request, course_id):
"""
Shows all of the due date extensions granted to a particular student in a
particular course.
"""
student = get_student_from_identifier(request.GET.get('student'))
course = get_course_by_id(course_id)
return JsonResponse(dump_student_extensions(course, student))
def _split_input_list(str_list):
"""
Separate out individual student email from the comma, or space separated string.
e.g.
in: "Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed"
out: ['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus', 'ut@lacinia.Sed']
`str_list` is a string coming from an input text area
returns a list of separated values
"""
new_list = re.split(r'[\n\r\s,]', str_list)
new_list = [s.strip() for s in new_list]
new_list = [s for s in new_list if s != '']
return new_list
def _msk_from_problem_urlname(course_id, urlname):
"""
Convert a 'problem urlname' (name that instructor's input into dashboard)
to a module state key (db field)
"""
if urlname.endswith(".xml"):
urlname = urlname[:-4]
# Combined open ended problems also have state that can be deleted. However,
# prepending "problem" will only allow capa problems to be reset.
# Get around this for xblock problems.
if "/" not in urlname:
urlname = "problem/" + urlname
parts = Location.parse_course_id(course_id)
parts['urlname'] = urlname
module_state_key = u"i4x://{org}/{course}/{urlname}".format(**parts)
return module_state_key
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def get_survey(request, course_id): # pylint: disable=W0613
"""
Gets survey result as a CSV file.
"""
def csv_response(filename, header, rows):
"""Returns a CSV http response for the given header and rows (excel/cp932)."""
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)
writer = csv.writer(response, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
encoded = [unicode(s).encode('cp932') for s in header]
writer.writerow(encoded)
for row in rows:
# NOTE: this data is mostly Japanese, so encode cp932
encoded = [unicode(s).encode('cp932') for s in row]
writer.writerow(encoded)
return response
header = ['Unit ID', 'Survey Name', 'Created', 'User Name', 'Gender', 'Year of Birth',
'Level of Education', 'Disabled']
rows = []
submissions = list(SurveySubmission.objects.raw(
'''SELECT s.*, u.*, p.*, t.account_status
FROM survey_surveysubmission s
LEFT OUTER JOIN auth_user u
ON s.user_id = u.id
LEFT OUTER JOIN auth_userprofile p
ON s.user_id = p.user_id
LEFT OUTER JOIN student_userstanding t
ON s.user_id = t.user_id
WHERE course_id = %s
ORDER BY s.unit_id, s.created''',
[course_id]
))
if len(submissions) > 0:
keys = sorted(submissions[0].get_survey_answer().keys())
header.extend(keys)
for s in submissions:
row = [s.unit_id, s.survey_name, s.created, s.username]
row.append(dict(UserProfile.GENDER_CHOICES).get(s.gender, s.gender) or '')
row.append(s.year_of_birth or '')
row.append(dict(UserProfile.LEVEL_OF_EDUCATION_CHOICES).get(s.level_of_education, s.level_of_education) or '')
row.append(s.account_status if s.account_status == UserStanding.ACCOUNT_DISABLED else '')
for key in keys:
value = s.get_survey_answer().get(key)
# NOTE: replace list into commified str
if isinstance(value, list):
value = ','.join(value)
row.append(value)
rows.append(row)
return csv_response(course_id.replace('/', '-') + '-survey.csv', header, rows)
| agpl-3.0 |
hanvo/MusicCloud | Crawler/Install Files/pygame/test/image_test.py | 4 | 15119 | if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils \
import test_not_implemented, example_path, unittest, png
else:
from test.test_utils \
import test_not_implemented, example_path, unittest, png
import pygame, pygame.image, pygame.pkgdata
from pygame.compat import xrange_, ord_
import os
import array
import tempfile
def test_magic(f, magic_hex):
""" tests a given file to see if the magic hex matches.
"""
data = f.read(len(magic_hex))
if len(data) != len(magic_hex):
return 0
for i in range(len(magic_hex)):
if magic_hex[i] != ord_(data[i]):
return 0
return 1
class ImageModuleTest( unittest.TestCase ):
def testLoadIcon(self):
""" see if we can load the pygame icon.
"""
f = pygame.pkgdata.getResource("pygame_icon.bmp")
self.assertEqual(f.mode, "rb")
surf = pygame.image.load_basic(f)
self.assertEqual(surf.get_at((0,0)),(5, 4, 5, 255))
self.assertEqual(surf.get_height(),32)
self.assertEqual(surf.get_width(),32)
def testLoadPNG(self):
""" see if we can load a png with color values in the proper channels.
"""
# Create a PNG file with known colors
reddish_pixel = (210, 0, 0, 255)
greenish_pixel = (0, 220, 0, 255)
bluish_pixel = (0, 0, 230, 255)
greyish_pixel = (110, 120, 130, 140)
pixel_array = [reddish_pixel + greenish_pixel,
bluish_pixel + greyish_pixel]
f_descriptor, f_path = tempfile.mkstemp(suffix='.png')
f = os.fdopen(f_descriptor, 'wb')
w = png.Writer(2, 2, alpha=True)
w.write(f, pixel_array)
f.close()
# Read the PNG file and verify that pygame interprets it correctly
surf = pygame.image.load(f_path)
pixel_x0_y0 = surf.get_at((0, 0))
pixel_x1_y0 = surf.get_at((1, 0))
pixel_x0_y1 = surf.get_at((0, 1))
pixel_x1_y1 = surf.get_at((1, 1))
self.assertEquals(pixel_x0_y0, reddish_pixel)
self.assertEquals(pixel_x1_y0, greenish_pixel)
self.assertEquals(pixel_x0_y1, bluish_pixel)
self.assertEquals(pixel_x1_y1, greyish_pixel)
# Read the PNG file obj. and verify that pygame interprets it correctly
f = open(f_path, 'rb')
surf = pygame.image.load(f)
f.close()
pixel_x0_y0 = surf.get_at((0, 0))
pixel_x1_y0 = surf.get_at((1, 0))
pixel_x0_y1 = surf.get_at((0, 1))
pixel_x1_y1 = surf.get_at((1, 1))
self.assertEquals(pixel_x0_y0, reddish_pixel)
self.assertEquals(pixel_x1_y0, greenish_pixel)
self.assertEquals(pixel_x0_y1, bluish_pixel)
self.assertEquals(pixel_x1_y1, greyish_pixel)
os.remove(f_path)
def testLoadJPG(self):
""" see if we can load a jpg.
"""
f = example_path('data/alien1.jpg') # normalized
# f = os.path.join("examples", "data", "alien1.jpg")
surf = pygame.image.load(f)
f = open(f, "rb")
# f = open(os.path.join("examples", "data", "alien1.jpg"), "rb")
surf = pygame.image.load(f)
# surf = pygame.image.load(open(os.path.join("examples", "data", "alien1.jpg"), "rb"))
def testSavePNG(self):
""" see if we can save a png with color values in the proper channels.
"""
# Create a PNG file with known colors
reddish_pixel = (215, 0, 0, 255)
greenish_pixel = (0, 225, 0, 255)
bluish_pixel = (0, 0, 235, 255)
greyish_pixel = (115, 125, 135, 145)
surf = pygame.Surface((1, 4), pygame.SRCALPHA, 32)
surf.set_at((0, 0), reddish_pixel)
surf.set_at((0, 1), greenish_pixel)
surf.set_at((0, 2), bluish_pixel)
surf.set_at((0, 3), greyish_pixel)
f_path = tempfile.mktemp(suffix='.png')
pygame.image.save(surf, f_path)
# Read the PNG file and verify that pygame saved it correctly
width, height, pixels, metadata = png.Reader(filename=f_path).asRGBA8()
pixels_as_tuples = []
for pixel in pixels:
pixels_as_tuples.append(tuple(pixel))
self.assertEquals(pixels_as_tuples[0], reddish_pixel)
self.assertEquals(pixels_as_tuples[1], greenish_pixel)
self.assertEquals(pixels_as_tuples[2], bluish_pixel)
self.assertEquals(pixels_as_tuples[3], greyish_pixel)
os.remove(f_path)
def test_save(self):
s = pygame.Surface((10,10))
s.fill((23,23,23))
magic_hex = {}
magic_hex['jpg'] = [0xff, 0xd8, 0xff, 0xe0]
magic_hex['png'] = [0x89 ,0x50 ,0x4e ,0x47]
magic_hex['tga'] = [0x0, 0x0, 0xa]
magic_hex['bmp'] = [0x42, 0x4d]
formats = ["jpg", "png", "tga", "bmp"]
# uppercase too... JPG
formats = formats + [x.upper() for x in formats]
for fmt in formats:
try:
temp_filename = "%s.%s" % ("tmpimg", fmt)
pygame.image.save(s, temp_filename)
# test the magic numbers at the start of the file to ensure they are saved
# as the correct file type.
self.assertEqual((1, fmt), (test_magic(open(temp_filename, "rb"), magic_hex[fmt.lower()]), fmt))
# load the file to make sure it was saved correctly.
# Note load can load a jpg saved with a .png file name.
s2 = pygame.image.load(temp_filename)
#compare contents, might only work reliably for png...
# but because it's all one color it seems to work with jpg.
self.assertEquals(s2.get_at((0,0)), s.get_at((0,0)))
finally:
#clean up the temp file, comment out to leave tmp file after run.
os.remove(temp_filename)
pass
def test_save_colorkey(self):
""" make sure the color key is not changed when saving.
"""
s = pygame.Surface((10,10), pygame.SRCALPHA, 32)
s.fill((23,23,23))
s.set_colorkey((0,0,0))
colorkey1 = s.get_colorkey()
p1 = s.get_at((0,0))
temp_filename = "tmpimg.png"
try:
pygame.image.save(s, temp_filename)
s2 = pygame.image.load(temp_filename)
finally:
os.remove(temp_filename)
colorkey2 = s.get_colorkey()
# check that the pixel and the colorkey is correct.
self.assertEqual(colorkey1, colorkey2)
self.assertEqual(p1, s2.get_at((0,0)))
def assertPremultipliedAreEqual(self, string1, string2, source_string):
self.assertEqual(len(string1), len(string2))
block_size = 20
if string1 != string2:
for block_start in xrange_(0, len(string1), block_size):
block_end = min(block_start + block_size, len(string1))
block1 = string1[block_start:block_end]
block2 = string2[block_start:block_end]
if block1 != block2:
source_block = source_string[block_start:block_end]
msg = "string difference in %d to %d of %d:\n%s\n%s\nsource:\n%s" % (block_start, block_end, len(string1), block1.encode("hex"), block2.encode("hex"), source_block.encode("hex"))
self.fail(msg)
def test_to_string__premultiplied(self):
""" test to make sure we can export a surface to a premultiplied alpha string
"""
def convertRGBAtoPremultiplied(surface_to_modify):
for x in xrange_(surface_to_modify.get_width()):
for y in xrange_(surface_to_modify.get_height()):
color = surface_to_modify.get_at((x, y))
premult_color = (color[0]*color[3]/255,
color[1]*color[3]/255,
color[2]*color[3]/255,
color[3])
surface_to_modify.set_at((x, y), premult_color)
test_surface = pygame.Surface((256, 256), pygame.SRCALPHA, 32)
for x in xrange_(test_surface.get_width()):
for y in xrange_(test_surface.get_height()):
i = x + y*test_surface.get_width()
test_surface.set_at((x,y), ((i*7) % 256, (i*13) % 256, (i*27) % 256, y))
premultiplied_copy = test_surface.copy()
convertRGBAtoPremultiplied(premultiplied_copy)
self.assertPremultipliedAreEqual(pygame.image.tostring(test_surface, "RGBA_PREMULT"),
pygame.image.tostring(premultiplied_copy, "RGBA"),
pygame.image.tostring(test_surface, "RGBA"))
self.assertPremultipliedAreEqual(pygame.image.tostring(test_surface, "ARGB_PREMULT"),
pygame.image.tostring(premultiplied_copy, "ARGB"),
pygame.image.tostring(test_surface, "ARGB"))
no_alpha_surface = pygame.Surface((256, 256), 0, 24)
self.assertRaises(ValueError, pygame.image.tostring, no_alpha_surface, "RGBA_PREMULT")
def test_fromstring__and_tostring(self):
""" see if fromstring, and tostring methods are symmetric.
"""
def AreSurfacesIdentical(surf_a, surf_b):
if surf_a.get_width() != surf_b.get_width() or surf_a.get_height() != surf_b.get_height():
return False
for y in xrange_(surf_a.get_height()):
for x in xrange_(surf_b.get_width()):
if surf_a.get_at((x,y)) != surf_b.get_at((x,y)):
return False
return True
####################################################################
def RotateRGBAtoARGB(str_buf):
byte_buf = array.array("B", str_buf)
num_quads = len(byte_buf)//4
for i in xrange_(num_quads):
alpha = byte_buf[i*4 + 3]
byte_buf[i*4 + 3] = byte_buf[i*4 + 2]
byte_buf[i*4 + 2] = byte_buf[i*4 + 1]
byte_buf[i*4 + 1] = byte_buf[i*4 + 0]
byte_buf[i*4 + 0] = alpha
return byte_buf.tostring()
####################################################################
def RotateARGBtoRGBA(str_buf):
byte_buf = array.array("B", str_buf)
num_quads = len(byte_buf)//4
for i in xrange_(num_quads):
alpha = byte_buf[i*4 + 0]
byte_buf[i*4 + 0] = byte_buf[i*4 + 1]
byte_buf[i*4 + 1] = byte_buf[i*4 + 2]
byte_buf[i*4 + 2] = byte_buf[i*4 + 3]
byte_buf[i*4 + 3] = alpha
return byte_buf.tostring()
####################################################################
test_surface = pygame.Surface((64, 256), flags=pygame.SRCALPHA, depth=32)
for i in xrange_(256):
for j in xrange_(16):
intensity = j*16 + 15
test_surface.set_at((j + 0, i), (intensity, i, i, i))
test_surface.set_at((j + 16, i), (i, intensity, i, i))
test_surface.set_at((j + 32, i), (i, i, intensity, i))
test_surface.set_at((j + 32, i), (i, i, i, intensity))
self.assert_(AreSurfacesIdentical(test_surface, test_surface))
rgba_buf = pygame.image.tostring(test_surface, "RGBA")
rgba_buf = RotateARGBtoRGBA(RotateRGBAtoARGB(rgba_buf))
test_rotate_functions = pygame.image.fromstring(rgba_buf, test_surface.get_size(), "RGBA")
self.assert_(AreSurfacesIdentical(test_surface, test_rotate_functions))
rgba_buf = pygame.image.tostring(test_surface, "RGBA")
argb_buf = RotateRGBAtoARGB(rgba_buf)
test_from_argb_string = pygame.image.fromstring(argb_buf, test_surface.get_size(), "ARGB")
self.assert_(AreSurfacesIdentical(test_surface, test_from_argb_string))
#"ERROR: image.fromstring with ARGB failed"
argb_buf = pygame.image.tostring(test_surface, "ARGB")
rgba_buf = RotateARGBtoRGBA(argb_buf)
test_to_argb_string = pygame.image.fromstring(rgba_buf, test_surface.get_size(), "RGBA")
self.assert_(AreSurfacesIdentical(test_surface, test_to_argb_string))
#"ERROR: image.tostring with ARGB failed"
argb_buf = pygame.image.tostring(test_surface, "ARGB")
test_to_from_argb_string = pygame.image.fromstring(argb_buf, test_surface.get_size(), "ARGB")
self.assert_(AreSurfacesIdentical(test_surface, test_to_from_argb_string))
#"ERROR: image.fromstring and image.tostring with ARGB are not symmetric"
def todo_test_frombuffer(self):
# __doc__ (as of 2008-08-02) for pygame.image.frombuffer:
# pygame.image.frombuffer(string, size, format): return Surface
# create a new Surface that shares data inside a string buffer
#
# Create a new Surface that shares pixel data directly from the string
# buffer. This method takes the same arguments as
# pygame.image.fromstring(), but is unable to vertically flip the
# source data.
#
# This will run much faster than pygame.image.fromstring, since no
# pixel data must be allocated and copied.
self.fail()
def todo_test_get_extended(self):
# __doc__ (as of 2008-08-02) for pygame.image.get_extended:
# pygame.image.get_extended(): return bool
# test if extended image formats can be loaded
#
# If pygame is built with extended image formats this function will
# return True. It is still not possible to determine which formats
# will be available, but generally you will be able to load them all.
self.fail()
def todo_test_load_basic(self):
# __doc__ (as of 2008-08-02) for pygame.image.load_basic:
# pygame.image.load(filename): return Surface
# pygame.image.load(fileobj, namehint=): return Surface
# load new image from a file
self.fail()
def todo_test_load_extended(self):
# __doc__ (as of 2008-08-02) for pygame.image.load_extended:
# pygame module for image transfer
self.fail()
def todo_test_save_extended(self):
# __doc__ (as of 2008-08-02) for pygame.image.save_extended:
# pygame module for image transfer
self.fail()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
hsoft/moneyguru | core/gui/completable_edit.py | 2 | 4063 | # Copyright 2019 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from core.util import nonone, dedupe
from .base import DocumentGUIObject
from ..model.completion import CompletionList
class CompletableEdit(DocumentGUIObject):
def __init__(self, mainwindow):
super().__init__(mainwindow.document)
self.mainwindow = mainwindow
self._attrname = ''
self._completions = None
self._complete_completion = ''
self.completion = ''
self._text = ''
# If doing completion for an entry table, set the account attribute
self.account = None
# --- Private
def _revalidate(self):
if self.mainwindow is None or not self.attrname:
return
doc = self.mainwindow.document
attrname = self.attrname
if attrname == 'description':
self._candidates = doc.transactions.descriptions
elif attrname == 'payee':
self._candidates = doc.transactions.payees
elif attrname in {'from', 'to', 'account', 'transfer'}:
result = doc.transactions.account_names
# `result` doesn't contain empty accounts' name, so we'll add them.
result += [a.name for a in doc.accounts if not a.inactive]
if attrname == 'transfer' and self.account is not None:
result = [name for name in result if name != self.account.name]
self._candidates = result
self._candidates = dedupe([name for name in self._candidates if name.strip()])
def _set_completion(self, completion):
completion = nonone(completion, '')
self._complete_completion = completion
self.completion = completion[len(self._text):]
if self.completion:
self.view.refresh()
# --- Override
# We override view to allow it to be set multiple times because
# CompletableEdit, as a special case, can have more than one view swapping
# each other out
@property
def view(self):
return self._view
@view.setter
def view(self, value):
self._view = value
# --- Public
def commit(self):
"""Accepts current completion and updates the text with it.
If the text is a substring of the completion, completion's case will prevail. If, however,
the completion is the same length as the text, it means that the user completly types the
string. In this case, we assume that the user wants his own case to prevail.
"""
if len(self._text) < len(self._complete_completion):
self._text = self._complete_completion
self.completion = ''
self.view.refresh()
def down(self):
if self._completions:
self._set_completion(self._completions.prev())
def up(self):
if self._completions:
self._set_completion(self._completions.next())
def lookup(self):
self.mainwindow.completion_lookup.show(self)
def set_lookup_choice(self, text):
self._text = text
self.completion = ''
self.view.refresh()
# --- Properties
@property
def attrname(self):
return self._attrname
@attrname.setter
def attrname(self, value):
if self._attrname == value:
return
self._attrname = value
self.invalidate()
self._text = ''
self._set_completion('')
self._completions = None
@property
def candidates(self):
self.revalidate()
return self._candidates
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
if self.candidates:
self._completions = CompletionList(value, self._candidates)
self._set_completion(self._completions.current())
else:
self._completions = None
| gpl-3.0 |
pgoeser/gnuradio | gnuradio-core/src/python/gnuradio/gr/qa_interleave.py | 11 | 2859 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import math
class test_interleave (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_int_001 (self):
lenx = 64
src0 = gr.vector_source_f (range (0, lenx, 4))
src1 = gr.vector_source_f (range (1, lenx, 4))
src2 = gr.vector_source_f (range (2, lenx, 4))
src3 = gr.vector_source_f (range (3, lenx, 4))
op = gr.interleave (gr.sizeof_float)
dst = gr.vector_sink_f ()
self.tb.connect (src0, (op, 0))
self.tb.connect (src1, (op, 1))
self.tb.connect (src2, (op, 2))
self.tb.connect (src3, (op, 3))
self.tb.connect (op, dst)
self.tb.run ()
expected_result = tuple (range (lenx))
result_data = dst.data ()
self.assertFloatTuplesAlmostEqual (expected_result, result_data)
def test_deint_001 (self):
lenx = 64
src = gr.vector_source_f (range (lenx))
op = gr.deinterleave (gr.sizeof_float)
dst0 = gr.vector_sink_f ()
dst1 = gr.vector_sink_f ()
dst2 = gr.vector_sink_f ()
dst3 = gr.vector_sink_f ()
self.tb.connect (src, op)
self.tb.connect ((op, 0), dst0)
self.tb.connect ((op, 1), dst1)
self.tb.connect ((op, 2), dst2)
self.tb.connect ((op, 3), dst3)
self.tb.run ()
expected_result0 = tuple (range (0, lenx, 4))
expected_result1 = tuple (range (1, lenx, 4))
expected_result2 = tuple (range (2, lenx, 4))
expected_result3 = tuple (range (3, lenx, 4))
self.assertFloatTuplesAlmostEqual (expected_result0, dst0.data ())
self.assertFloatTuplesAlmostEqual (expected_result1, dst1.data ())
self.assertFloatTuplesAlmostEqual (expected_result2, dst2.data ())
self.assertFloatTuplesAlmostEqual (expected_result3, dst3.data ())
if __name__ == '__main__':
gr_unittest.run(test_interleave, "test_interleave.xml")
| gpl-3.0 |
zq317157782/Narukami | external/googletest/googletest/scripts/upload.py | 32 | 51025 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (output for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| mit |
dermoth/gramps | gramps/plugins/mapservices/eniroswedenmap.py | 3 | 7555 | # Gramps - a GTK+/GNOME based genealogy program
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Peter Landgren
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# plugins/mapservices/eniroswedenmap.py
"""
Eniro Sweden (Denmark) map service plugin. Opens place in kartor.eniro.se
"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.plugins.lib.libmapservice import MapService
from gramps.gui.dialog import WarningDialog
from gramps.gen.utils.location import get_main_location
from gramps.gen.display.place import displayer as place_displayer
from gramps.gen.lib import PlaceType
# Make upper case of translaed country so string search works later
MAP_NAMES_SWEDEN = [_("Sweden").upper(),
"SVERIGE",
"SWEDEN",
"SUEDOIS",
"ROUTSI",
"SCHWEDEN", ]
MAP_NAMES_DENMARK = [_("Denmark").upper(),
"DANMARK",
"DENMARK",
"DANOIS",
"TANSKA",
"DÄNEMARK", ]
def _strip_leading_comma(descr):
""" Strips leading comma
and leading and trailing spaces
"""
if len(descr) > 0 and descr.strip()[0] == ",":
descr = descr.strip()[1:]
return descr.strip()
def _build_title(db, place):
""" Builds descrition string for title parameter in url """
descr = place_displayer.display(db, place)
location = get_main_location(db, place)
parish = location.get(PlaceType.PARISH)
city = location.get(PlaceType.CITY)
state = location.get(PlaceType.STATE)
title_descr = ""
if descr:
title_descr += descr.strip()
if parish:
# TODO for Arabic, should the next line's comma be translated?
title_descr += ', ' + parish.strip() + _(" parish")
if city:
# TODO for Arabic, should the next line's comma be translated?
title_descr += ', ' + city.strip()
if state:
# TODO for Arabic, should the next line's comma be translated?
title_descr += ', ' + state.strip() + _(" state")
return _strip_leading_comma(title_descr)
def _build_city(db, place):
""" Builds description string for city parameter in url """
location = get_main_location(db, place)
county = location.get(PlaceType.COUNTY)
# Build a title description string that will work for Eniro
city_descr = _build_area(db, place)
if county:
# TODO for Arabic, should the next line's comma be translated?
city_descr += ', ' + county
return _strip_leading_comma(city_descr)
def _build_area(db, place):
""" Builds string for area parameter in url """
location = get_main_location(db, place)
street = location.get(PlaceType.STREET)
city = location.get(PlaceType.CITY)
# Build a title description string that will work for Eniro
area_descr = ""
if street:
area_descr += street.strip()
if city:
# TODO for Arabic, should the next line's comma be translated?
area_descr += ', ' + city
return _strip_leading_comma(area_descr)
class EniroSVMapService(MapService):
"""Map service using http://kartor.eniro.se"""
def __init__(self):
MapService.__init__(self)
def calc_url(self):
""" Determine the url to use on maps.google.com
Logic: valid for places within Sweden and
Denmark, only if lat lon avalible
use lat lon if present
otherwise use city and country if present
otherwise use description of the place
"""
place = self._get_first_place()[0]
path = ""
# First see if we are in or near Sweden or Denmark
# Change country to upper case
location = get_main_location(self.database, place)
country = location.get(PlaceType.COUNTRY, '').upper().strip()
country_given = (country in MAP_NAMES_SWEDEN or \
country in MAP_NAMES_DENMARK) and (country != "")
# if no country given, check if we might be in the vicinity defined by
# 54 33' 0" < lat < 66 9' 0", 54.55 and 69.05
# 8 3' 0" < long < 24 9' 0", 8.05 and 24.15
latitude, longitude = self._lat_lon(place)
if latitude is None or longitude is None:
coord_ok = False
else:
latitude = float(latitude)
longitude = float(longitude)
# Check if coordinates are inside Sweden and Denmark
if (54.55 < latitude < 69.05) and (8.05 < longitude < 24.15):
coord_ok = True
else:
msg2 = _("Latitude not within '54.55' to '69.05'\n") + \
_("Longitude not within '8.05' to '24.15'")
WarningDialog(_("Eniro map not available"), msg2,
parent=self.uistate.window)
return
if coord_ok:
place_title = _build_title(self.database, place)
place_city = _build_city(self.database, place)
x_coord, y_coord = self._lat_lon(place, format="RT90")
# Set zoom level to 5 if Sweden/Denmark, others 3
zoom = 5
if not country_given:
zoom = 3
path = "http://www.eniro.se/partner.fcgi?pis=1&x=%s&y=%s" \
"&zoom_level=%i&map_size=0&title=%s&city=%s&partner=gramps"
# Note x and y are swapped!
path = path % (y_coord , x_coord, zoom, place_title, place_city)
self.url = path.replace(" ","%20")
return
place_area = _build_area(self.database, place)
if country_given and place_area:
if country in MAP_NAMES_SWEDEN:
path = "http://kartor.eniro.se/query?&what=map_adr&mop=aq" \
"&geo_area=%s&partner=gramps"
path = path % (place_area)
self.url = path.replace(" ","%20")
return
else:
WarningDialog(_("Eniro map not available"),
_("Coordinates needed in Denmark"),
parent=self.uistate.window)
self.url = ""
return
WarningDialog(_("Eniro map not available"),
_("Latitude and longitude,\n"
"or street and city needed"),
parent=self.uistate.window)
return
| gpl-2.0 |
whosyourban/pyspider | tests/test_response.py | 66 | 2738 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2015-01-18 11:10:27
import os
import copy
import time
import httpbin
import unittest2 as unittest
import logging
import logging.config
logging.config.fileConfig("pyspider/logging.conf")
from pyspider.libs import utils
from pyspider.libs.response import rebuild_response
from pyspider.fetcher.tornado_fetcher import Fetcher
class TestResponse(unittest.TestCase):
sample_task_http = {
'taskid': 'taskid',
'project': 'project',
'url': '',
}
@classmethod
def setUpClass(self):
self.fetcher = Fetcher(None, None, async=False)
self.httpbin_thread = utils.run_in_subprocess(httpbin.app.run, port=14887)
self.httpbin = 'http://127.0.0.1:14887'
time.sleep(0.5)
@classmethod
def tearDownClass(self):
self.httpbin_thread.terminate()
def get(self, url, **kwargs):
if not url.startswith('http://'):
url = self.httpbin + url
request = copy.deepcopy(self.sample_task_http)
request['url'] = url
request.update(kwargs)
task, result = self.fetcher.fetch(request)
response = rebuild_response(result)
return response
def test_10_html(self):
response = self.get('/html')
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.doc('h1'))
def test_20_xml(self):
response = self.get('/xml')
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.doc('item'))
def test_30_gzip(self):
response = self.get('/gzip')
self.assertEqual(response.status_code, 200)
self.assertIn('gzipped', response.text)
def test_40_deflate(self):
response = self.get('/deflate')
self.assertEqual(response.status_code, 200)
self.assertIn('deflated', response.text)
def test_50_ok(self):
response = self.get('/status/200')
self.assertTrue(response.ok)
self.assertTrue(response)
response = self.get('/status/302')
self.assertTrue(response.ok)
self.assertTrue(response)
with self.assertRaises(Exception):
self.raise_for_status(allow_redirects=False)
def test_60_not_ok(self):
response = self.get('/status/400')
self.assertFalse(response.ok)
self.assertFalse(response)
response = self.get('/status/500')
self.assertFalse(response.ok)
self.assertFalse(response)
response = self.get('/status/600')
self.assertFalse(response.ok)
self.assertFalse(response)
| apache-2.0 |
axelkennedal/dissen | dissenEnv/lib/python3.5/site-packages/pip/_vendor/html5lib/treeadapters/sax.py | 1835 | 1661 | from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
| mit |
fding/llama | analyze_fof.py | 1 | 4633 | import argparse
import math
import re
class Experiment(object):
def __init__(self, commit, params):
self.commit = commit
self.params = dict([
tuple(kv.split('=')) for kv in params.split(',')
])
self.outputs = []
class Results(dict):
def __init__(self, *args, **kwargs):
super(Results, self).__init__(*args, **kwargs)
class LogReader(object):
def __init__(self, fname):
with open(fname, 'r') as f:
self.lines = f.read().split('\n')
self.current_line = 0
def wait_for_line(self, pattern):
p = re.compile(pattern)
n = len(self.lines)
i = self.current_line
while i < n and not p.match(self.lines[i].strip()):
i = i + 1
return i
def parse_trial(self, max_i):
output = Results()
n = len(self.lines)
j = self.wait_for_line('(NO_MADVISE)|(WITH_MADVISE)')
if j >= max_i:
return None
else:
self.current_line = j
if self.lines[self.current_line].strip() == 'WITH_MADVISE':
output.with_madvise = True
else:
output.with_madvise = False
self.current_line = self.wait_for_line('TRIAL \d+')
if self.current_line >= n: return None
self.current_line = self.wait_for_line('==========BEFORE VM STAT==========')
if self.current_line >= n: return None
j = self.wait_for_line('==========LLAMA OUTPUT==========')
if j >= n: return None
for line in self.lines[self.current_line+1: j]:
parts = line.split(':')
if len(parts) == 2:
output['before ' + parts[0].strip()] = parts[1].strip()
self.current_line = j
j = self.wait_for_line('==========AFTER VM STAT==========')
if j >= n: return None
for line in self.lines[self.current_line+1: j]:
parts = line.split(':')
if len(parts) == 2:
output[parts[0].strip()] = parts[1].strip()
self.current_line = j+1
return output
def parse(self):
experiments = []
n = len(self.lines)
while self.current_line < n:
self.current_line = self.wait_for_line('==========START EXPERIMENT==========')
if self.current_line >= n:
break
a = Experiment(self.lines[self.current_line+1],
self.lines[self.current_line+2])
self.current_line += 3
next_expt = self.wait_for_line('==========START EXPERIMENT==========')
while True:
if self.current_line >= next_expt:
break
output = self.parse_trial(next_expt)
if output:
a.outputs.append(output)
else:
break
experiments.append(a)
return experiments
def floatify(s):
return float(s.split()[0])
def mean(ls):
if ls:
return sum(ls)/len(ls)
return 0
def stddev(ls):
if ls:
return math.sqrt(mean([l**2 for l in ls])-mean(ls)**2)
return 0
def main():
parser = argparse.ArgumentParser(description='Analyze LLAMA benchmark log files')
parser.add_argument('logfile', type=str,
help='Log file to analyze')
parser.add_argument('-o', '--output', type=str,
default='output.csv',
help='Output csv file')
args = parser.parse_args()
log_reader = LogReader(args.logfile)
experiments = log_reader.parse()
with open(args.output, 'w') as output:
output.write('Number of queries'
',Time (no madvise; s),Time (madvise; s),'
'Warmup Time (no madvise; s),Warmup Time (madvise; s)\n'
)
for e in experiments:
nm_time = [floatify(k['Time']) for k in e.outputs
if not k.with_madvise]
wm_time = [floatify(k['Time']) for k in e.outputs
if k.with_madvise]
nm_warmup = [floatify(k['Warmup Time']) for k in e.outputs
if not k.with_madvise]
wm_warmup = [floatify(k['Warmup Time']) for k in e.outputs
if k.with_madvise]
output.write('%s,%s,%s,%s,%s\n' % (
e.params['PARAM_NUM_VERTICES'],
mean(nm_time),
mean(wm_time),
mean(nm_warmup) / 1000.,
mean(wm_warmup) / 1000.,
))
if __name__=='__main__':
main()
| bsd-3-clause |
turbokongen/home-assistant | tests/components/humidifier/test_device_trigger.py | 3 | 12065 | """The tests for Humidifier device triggers."""
import datetime
import pytest
import voluptuous_serialize
import homeassistant.components.automation as automation
from homeassistant.components.humidifier import DOMAIN, const, device_trigger
from homeassistant.const import ATTR_SUPPORTED_FEATURES, STATE_OFF, STATE_ON
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_fire_time_changed,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a humidifier device."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
entity_id = f"{DOMAIN}.test_5678"
hass.states.async_set(
entity_id,
STATE_ON,
{
const.ATTR_HUMIDITY: 23,
const.ATTR_MODE: "home",
const.ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_SUPPORTED_FEATURES: 1,
},
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "target_humidity_changed",
"device_id": device_entry.id,
"entity_id": entity_id,
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set(
"humidifier.entity",
STATE_ON,
{
const.ATTR_HUMIDITY: 23,
const.ATTR_MODE: "home",
const.ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_SUPPORTED_FEATURES: 1,
},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "target_humidity_changed",
"below": 20,
},
"action": {
"service": "test.automation",
"data_template": {"some": "target_humidity_changed_below"},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "target_humidity_changed",
"above": 30,
},
"action": {
"service": "test.automation",
"data_template": {"some": "target_humidity_changed_above"},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "target_humidity_changed",
"above": 30,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "target_humidity_changed_above_for"},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
# Fake that the humidity is changing
hass.states.async_set("humidifier.entity", STATE_ON, {const.ATTR_HUMIDITY: 7})
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "target_humidity_changed_below"
# Fake that the humidity is changing
hass.states.async_set("humidifier.entity", STATE_ON, {const.ATTR_HUMIDITY: 37})
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "target_humidity_changed_above"
# Wait 6 minutes
async_fire_time_changed(hass, dt_util.utcnow() + datetime.timedelta(minutes=6))
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[2].data["some"] == "target_humidity_changed_above_for"
# Fake turn off
hass.states.async_set("humidifier.entity", STATE_OFF, {const.ATTR_HUMIDITY: 37})
await hass.async_block_till_done()
assert len(calls) == 4
assert (
calls[3].data["some"] == "turn_off device - humidifier.entity - on - off - None"
)
# Fake turn on
hass.states.async_set("humidifier.entity", STATE_ON, {const.ATTR_HUMIDITY: 37})
await hass.async_block_till_done()
assert len(calls) == 5
assert (
calls[4].data["some"] == "turn_on device - humidifier.entity - off - on - None"
)
async def test_invalid_config(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set(
"humidifier.entity",
STATE_ON,
{
const.ATTR_HUMIDITY: 23,
const.ATTR_MODE: "home",
const.ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_SUPPORTED_FEATURES: 1,
},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "target_humidity_changed",
"below": 20,
"invalid": "invalid",
},
"action": {
"service": "test.automation",
"data_template": {"some": "target_humidity_changed"},
},
},
]
},
)
# Fake that the humidity is changing
hass.states.async_set("humidifier.entity", STATE_ON, {const.ATTR_HUMIDITY: 7})
await hass.async_block_till_done()
# Should not trigger for invalid config
assert len(calls) == 0
async def test_get_trigger_capabilities_on(hass):
"""Test we get the expected capabilities from a humidifier trigger."""
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": "humidifier",
"type": "turned_on",
"entity_id": "humidifier.upstairs",
"above": "23",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "for", "optional": True, "type": "positive_time_period_dict"}]
async def test_get_trigger_capabilities_off(hass):
"""Test we get the expected capabilities from a humidifier trigger."""
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": "humidifier",
"type": "turned_off",
"entity_id": "humidifier.upstairs",
"above": "23",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "for", "optional": True, "type": "positive_time_period_dict"}]
async def test_get_trigger_capabilities_humidity(hass):
"""Test we get the expected capabilities from a humidifier trigger."""
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": "humidifier",
"type": "target_humidity_changed",
"entity_id": "humidifier.upstairs",
"above": "23",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"description": {"suffix": "%"},
"name": "above",
"optional": True,
"type": "integer",
},
{
"description": {"suffix": "%"},
"name": "below",
"optional": True,
"type": "integer",
},
{"name": "for", "optional": True, "type": "positive_time_period_dict"},
]
| apache-2.0 |
varunarya10/posix_ipc | tests/test_module.py | 5 | 4122 | # Python imports
# Don't add any from __future__ imports here. This code should execute
# against standard Python.
import unittest
import datetime
import mmap
import os
import resource
# Project imports
import posix_ipc
# Hack -- add tests directory to sys.path so Python 3 can find base.py.
import sys
sys.path.insert(0, os.path.join(os.getcwd(), 'tests'))
import base as tests_base
ONE_MILLION = 1000000
class TestModule(tests_base.Base):
"""Exercise the posix_ipc module-level functions and constants"""
def test_constant_values(self):
"""test that constants are what I expect"""
self.assertEqual(posix_ipc.O_CREAT, os.O_CREAT)
self.assertEqual(posix_ipc.O_EXCL, os.O_EXCL)
self.assertEqual(posix_ipc.O_CREX, posix_ipc.O_CREAT | posix_ipc.O_EXCL)
self.assertEqual(posix_ipc.O_TRUNC, os.O_TRUNC)
self.assertEqual(posix_ipc.PAGE_SIZE, resource.getpagesize())
self.assertIn(posix_ipc.SEMAPHORE_TIMEOUT_SUPPORTED, (True, False))
self.assertIn(posix_ipc.SEMAPHORE_VALUE_SUPPORTED, (True, False))
self.assertGreaterEqual(posix_ipc.SEMAPHORE_VALUE_MAX, 1)
self.assertIn(posix_ipc.MESSAGE_QUEUES_SUPPORTED, (True, False))
if posix_ipc.MESSAGE_QUEUES_SUPPORTED:
self.assertGreaterEqual(posix_ipc.QUEUE_MESSAGES_MAX_DEFAULT, 1)
self.assertGreaterEqual(posix_ipc.QUEUE_MESSAGE_SIZE_MAX_DEFAULT, 1)
self.assertGreaterEqual(posix_ipc.QUEUE_PRIORITY_MAX, 0)
if hasattr(posix_ipc, 'USER_SIGNAL_MIN'):
self.assertGreaterEqual(posix_ipc.USER_SIGNAL_MIN, 1)
if hasattr(posix_ipc, 'USER_SIGNAL_MAX'):
self.assertGreaterEqual(posix_ipc.USER_SIGNAL_MAX, 1)
self.assertTrue(isinstance(posix_ipc.VERSION, str))
def test_unlink_semaphore(self):
"""Exercise unlink_semaphore"""
sem = posix_ipc.Semaphore(None, posix_ipc.O_CREX)
posix_ipc.unlink_semaphore(sem.name)
sem.close()
self.assertRaises(posix_ipc.ExistentialError, posix_ipc.Semaphore,
sem.name)
def test_unlink_shared_memory(self):
"""Exercise unlink_shared_memory"""
mem = posix_ipc.SharedMemory(None, posix_ipc.O_CREX, size=1024)
mem.close_fd()
posix_ipc.unlink_shared_memory(mem.name)
self.assertRaises(posix_ipc.ExistentialError, posix_ipc.SharedMemory,
mem.name)
if posix_ipc.MESSAGE_QUEUES_SUPPORTED:
def test_unlink_message_queue(self):
"""Exercise unlink_message_queue"""
mq = posix_ipc.MessageQueue(None, posix_ipc.O_CREX)
posix_ipc.unlink_message_queue(mq.name)
mq.close()
self.assertRaises(posix_ipc.ExistentialError,
posix_ipc.MessageQueue, mq.name)
def test_constant_queue_priority_max(self):
"""Test that QUEUE_PRIORITY_MAX is reported correctly"""
mq = posix_ipc.MessageQueue(None, posix_ipc.O_CREX)
if posix_ipc.QUEUE_PRIORITY_MAX < ONE_MILLION:
for sent_priority in range(posix_ipc.QUEUE_PRIORITY_MAX + 1):
mq.send('', priority=sent_priority)
msg, received_priority = mq.receive()
self.assertEqual(sent_priority, received_priority)
# else:
# QUEUE_PRIORITY_MAX is probably LONG_MAX or larger and
# testing every value will take too long.
self.assertRaises(ValueError, mq.send, '',
priority=posix_ipc.QUEUE_PRIORITY_MAX + 1)
mq.unlink()
mq.close()
def test_errors(self):
self.assertTrue(issubclass(posix_ipc.Error, Exception))
self.assertTrue(issubclass(posix_ipc.SignalError, posix_ipc.Error))
self.assertTrue(issubclass(posix_ipc.PermissionsError, posix_ipc.Error))
self.assertTrue(issubclass(posix_ipc.ExistentialError, posix_ipc.Error))
self.assertTrue(issubclass(posix_ipc.BusyError, posix_ipc.Error))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
dparks1134/DBB | dbb/merge.py | 1 | 7637 | #!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import os
import logging
from collections import defaultdict
from common import checkFileExists
from defaultValues import DefaultValues
from seqUtils import readSeqStats
from greedy import Greedy, Contig, Core, readContigIdtoBinId
from genomicSignatures import GenomicSignatures
from distributions import Distributions, readDistributions
class Merge(object):
def __init__(self, bDebug=False):
self.logger = logging.getLogger()
def run(self, preprocessDir, binningFile, gcDistPer, tdDistPer, covDistPer, outputFile):
# verify inputs
contigStatsFile = os.path.join(preprocessDir, 'contigs.seq_stats.tsv')
checkFileExists(contigStatsFile)
contigTetraFile = os.path.join(preprocessDir, 'contigs.tetra.tsv')
checkFileExists(contigTetraFile)
# read contig stats
self.logger.info(' Reading contig statistics.')
contigStats = readSeqStats(contigStatsFile)
# read tetranucleotide signatures
self.logger.info(' Reading contig tetranucleotide signatures.')
genomicSig = GenomicSignatures(DefaultValues.DEFAULT_KMER_SIZE, 1)
tetraSigs = genomicSig.read(contigTetraFile)
# read bin assignments
self.logger.info(' Reading core bin assignments.')
contigIdToBinId = readContigIdtoBinId(binningFile)
# read distributions
self.logger.info(' Reading GC, TD, and coverage distributions.')
gcDist, tdDist, covDist = readDistributions(preprocessDir, gcDistPer, tdDistPer, covDistPer)
distributions = Distributions(gcDist, gcDistPer, tdDist, tdDistPer, covDist, covDistPer)
# calculate statistics of core bins and unbinned contigs
self.logger.info('')
self.logger.info(' Calculating statistics of bins.')
bins = {}
binnedContigs = defaultdict(list)
for contigId, contigStat in contigStats.iteritems():
binId = contigIdToBinId.get(contigId, Greedy.UNBINNED)
if binId != Greedy.UNBINNED:
contigLen, contigGC, contigCov = contigStat
contigTetraSig = tetraSigs[contigId]
contig = Contig(contigId, contigLen, contigGC, contigCov, contigTetraSig)
contig.binId = binId
binnedContigs[binId].append(contig)
# build statistics for core bins
if binId not in bins:
bins[binId] = Core(binId, 0, 0, 0, [0]*genomicSig.numKmers())
bin = bins[binId]
bin.length += contigLen
weight = float(contigLen)/bin.length
bin.GC = contigGC*weight + bin.GC*(1.0-weight)
bin.coverage = contigCov*weight + bin.coverage*(1.0-weight)
for i in xrange(0, len(bin.tetraSig)):
bin.tetraSig[i] = contigTetraSig[i]*weight + bin.tetraSig[i]*(1.0-weight)
# compare contigs to bin
fout = open(outputFile, 'w')
fout.write('Src Bin Id\tGC\tCoverage\tDest Bin Id\tGC\tCoverage\tContigs in Src Bin\tWithin GC\tWithin coverage\tWithin tetra\tClosest GC\tClosest coverage\tClosest tetra\tMerging score\n')
for binId1, bin1 in bins.iteritems():
contigs = binnedContigs[binId1]
for binId2, bin2 in bins.iteritems():
if binId1 == binId2:
continue
fout.write('%s\t%.1f\t%.1f\t%s\t%.1f\t%.1f\t%d' % (binId1, bin1.GC*100, bin1.coverage, binId2, bin2.GC*100, bin2.coverage, len(contigs)))
numWithinGC = 0
numWithinCov = 0
numWithinTetra = 0
closestGC = 0
closestCov = 0
closestTetra = 0
for contig in contigs:
tdDistanceQuery = genomicSig.distance(contig.tetraSig, bin2.tetraSig)
if distributions.withinDistGC(contig, bin2):
numWithinGC += 1
if distributions.withinDistCov(contig, bin2):
numWithinCov += 1
if distributions.witinDistTD(tdDistanceQuery, contig):
numWithinTetra += 1
closestBin = [None]*3
minDistances = [1e9]*3
for binId, bin in bins.iteritems():
if binId == binId1:
continue
if not distributions.withinDistGC(contig, bin):
continue
if not distributions.withinDistCov(contig, bin):
continue
tdDistance = genomicSig.distance(contig.tetraSig, bin.tetraSig)
if not distributions.witinDistTD(tdDistance, contig):
continue
gcDistance = distributions.gcDistance(contig, bin)
covDistance = distributions.covDistance(contig, bin)
distances = [gcDistance, tdDistance, covDistance]
for i, distance in enumerate(distances):
if distance < minDistances[i]:
minDistances[i] = distance
closestBin[i] = binId
if closestBin[0] == binId2:
closestGC += 1
if closestBin[1] == binId2:
closestCov += 1
if closestBin[2] == binId2:
closestTetra += 1
fout.write('\t%d\t%d\t%d\t%d\t%d\t%d' % (numWithinGC, numWithinCov, numWithinTetra, closestGC, closestCov, closestTetra))
fout.write('\t%.2f\n' % (float(closestGC + closestCov + closestTetra) / (3*len(contigs))))
fout.close()
| gpl-3.0 |
Jamlum/pytomo | pytomo/kaa_metadata/image/png.py | 3 | 4354 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# png.py - png file parsing
# -----------------------------------------------------------------------------
# $Id: png.py 3079 2008-02-13 20:10:40Z dmeyer $
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer
#
# First Edition: Thomas Schueppel <stain@acm.org>
# Maintainer: Dirk Meyer <dischi@freevo.org>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
from __future__ import absolute_import
__all__ = ['Parser']
# python imports
import struct
import zlib
import logging
# kaa imports
#import kaa
# use strutils instead of kaa
from .. import strutils as kaa
# import kaa_metadata.image core
from . import core
# get logging object
log = logging.getLogger('metadata')
# interesting file format info:
# http://www.libpng.org/pub/png/png-sitemap.html#programming
# http://pmt.sourceforge.net/pngmeta/
# http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html
PNGSIGNATURE = "\211PNG\r\n\032\n"
class PNG(core.Image):
def __init__(self,file):
core.Image.__init__(self)
self.mime = 'image/png'
self.type = 'PNG image'
signature = file.read(8)
if ( signature != PNGSIGNATURE ):
raise core.ParseError()
self.meta = {}
while self._readChunk(file):
pass
if len(self.meta.keys()):
self._appendtable( 'PNGMETA', self.meta )
for key, value in self.meta.items():
if key.startswith('Thumb:') or key == 'Software':
self._set(key, value)
def _readChunk(self,file):
try:
(length, type) = struct.unpack('>I4s', file.read(8))
except (OSError, IOError, struct.error):
return 0
key = None
if type == 'IEND':
return 0
elif type == 'IHDR':
data = file.read(length+4)
self.width, self.height, self.depth = struct.unpack(">IIb", data[:9])
elif type == 'tEXt':
log.debug('latin-1 Text found.')
(data, crc) = struct.unpack('>%isI' % length,file.read(length+4))
(key, value) = data.split('\0')
self.meta[key] = kaa.str_to_unicode(value)
elif type == 'zTXt':
log.debug('Compressed Text found.')
(data,crc) = struct.unpack('>%isI' % length,file.read(length+4))
split = data.split('\0')
key = split[0]
value = "".join(split[1:])
compression = ord(value[0])
value = value[1:]
if compression == 0:
decompressed = zlib.decompress(value)
log.debug("%s (Compressed %i) -> %s" % \
(key,compression,decompressed))
else:
log.debug("%s has unknown Compression %c" % (key,compression))
self.meta[key] = kaa.str_to_unicode(value)
elif type == 'iTXt':
log.debug('International Text found.')
(data,crc) = struct.unpack('>%isI' % length,file.read(length+4))
(key, value) = data.split('\0')
self.meta[key] = kaa.str_to_unicode(value)
else:
file.seek(length+4,1)
log.debug("%s of length %d ignored." % (type, length))
if key is not None and key.lower() == "comment":
self.comment = self.meta[key]
return 1
Parser = PNG
| gpl-2.0 |
jjmleiro/hue | desktop/core/ext-py/lxml-3.3.6/src/lxml/tests/test_isoschematron.py | 17 | 34491 | # -*- coding: utf-8 -*-
"""
Test cases related to ISO-Schematron parsing and validation
"""
import unittest, sys, os.path
from lxml import isoschematron
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, HelperTestCase, fileInTestDir
from common_imports import doctest, make_doctest
class ETreeISOSchematronTestCase(HelperTestCase):
def test_schematron(self):
tree_valid = self.parse('<AAA><BBB/><CCC/></AAA>')
tree_invalid = self.parse('<AAA><BBB/><CCC/><DDD/></AAA>')
schema = self.parse('''\
<schema xmlns="http://purl.oclc.org/dsdl/schematron" >
<pattern id="OpenModel">
<title>Open Model</title>
<rule context="AAA">
<assert test="BBB"> BBB element is not present</assert>
<assert test="CCC"> CCC element is not present</assert>
</rule>
</pattern>
<pattern id="ClosedModel">
<title>Closed model"</title>
<rule context="AAA">
<assert test="BBB"> BBB element is not present</assert>
<assert test="CCC"> CCC element is not present</assert>
<assert test="count(BBB|CCC) = count (*)">There is an extra element</assert>
</rule>
</pattern>
</schema>
''')
schema = isoschematron.Schematron(schema)
self.assertTrue(schema.validate(tree_valid))
self.assertTrue(not schema.validate(tree_invalid))
def test_schematron_elementtree_error(self):
self.assertRaises(ValueError, isoschematron.Schematron, etree.ElementTree())
# an empty pattern is valid in iso schematron
def test_schematron_empty_pattern(self):
schema = self.parse('''\
<schema xmlns="http://purl.oclc.org/dsdl/schematron" >
<pattern id="OpenModel">
<title>Open model</title>
</pattern>
</schema>
''')
schema = isoschematron.Schematron(schema)
self.assertTrue(schema)
def test_schematron_invalid_schema_empty(self):
schema = self.parse('''\
<schema xmlns="http://purl.oclc.org/dsdl/schematron" />
''')
self.assertRaises(etree.SchematronParseError,
isoschematron.Schematron, schema)
def test_schematron_invalid_schema_namespace(self):
schema = self.parse('''\
<schema xmlns="mynamespace" />
''')
self.assertRaises(etree.SchematronParseError,
isoschematron.Schematron, schema)
def test_schematron_from_tree(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(isinstance(schematron, isoschematron.Schematron))
def test_schematron_from_element(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
schematron = isoschematron.Schematron(schema.getroot())
self.assertTrue(isinstance(schematron, isoschematron.Schematron))
def test_schematron_from_file(self):
schematron = isoschematron.Schematron(file=fileInTestDir('test.sch'))
self.assertTrue(isinstance(schematron, isoschematron.Schematron))
def test_schematron_call(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
valid = schematron(tree_invalid)
self.assertTrue(not valid)
def test_schematron_validate(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron.validate(tree_valid), schematron.error_log)
valid = schematron.validate(tree_invalid)
self.assertTrue(not valid)
def test_schematron_assertValid(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
self.assertRaises(etree.DocumentInvalid, schematron.assertValid,
tree_invalid)
def test_schematron_error_log(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(len(schematron.error_log), 1,
'expected single error: %s (%s errors)' %
(schematron.error_log, len(schematron.error_log)))
def test_schematron_result_report(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
schematron = isoschematron.Schematron(schema, store_report=True)
self.assertTrue(schematron(tree_valid), schematron.error_log)
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertTrue(
isinstance(schematron.validation_report, etree._ElementTree),
'expected a validation report result tree, got: %s' %
(schematron.validation_report))
schematron = isoschematron.Schematron(schema, store_report=False)
self.assertTrue(schematron(tree_valid), schematron.error_log)
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertTrue(schematron.validation_report is None,
'validation reporting switched off, still: %s' %
(schematron.validation_report))
def test_schematron_store_schematron(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron.validator_xslt is None)
schematron = isoschematron.Schematron(schema, store_schematron=True)
self.assertTrue(isinstance(schematron.schematron, etree._ElementTree),
'expected schematron schema to be stored')
def test_schematron_store_xslt(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron.validator_xslt is None)
schematron = isoschematron.Schematron(schema, store_xslt=True)
self.assertTrue(isinstance(schematron.validator_xslt, etree._ElementTree),
'expected validator xslt to be stored')
def test_schematron_abstract(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:title>iso schematron validation</sch:title>
<sch:ns uri="http://www.w3.org/2001/XMLSchema-instance" prefix="xsi"/>
<sch:ns uri="http://codespeak.net/lxml/objectify/pytype" prefix="py"/>
<!-- of course, these only really make sense when combined with a schema that
ensures datatype xs:dateTime -->
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="$lastchar='Z' or $tz='00:00'">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc_nillable">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="@xsi:nil='true' or ($lastchar='Z' or $tz='00:00')">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern is-a="abstract.dateTime.tz_utc" id="datetime" >
<sch:param name="datetime" value="datetime"/>
</sch:pattern>
<sch:pattern is-a="abstract.dateTime.tz_utc_nillable" id="nillableDatetime">
<sch:param name="datetime" value="nillableDatetime"/>
</sch:pattern>
</sch:schema>
''')
valid_trees = [
self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00Z</datetime>
<nillableDatetime xsi:nil="true"/>
</root>
'''),
self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00Z</datetime>
<nillableDatetime>2009-12-10T15:21:00Z</nillableDatetime>
</root>
'''),
self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00+00:00</datetime>
<nillableDatetime>2009-12-10T15:21:00-00:00</nillableDatetime>
</root>
'''),
]
schematron = isoschematron.Schematron(schema)
for tree_valid in valid_trees:
self.assertTrue(schematron(tree_valid), schematron.error_log)
tree_invalid = self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T16:21:00+01:00</datetime>
<nillableDatetime>2009-12-10T16:21:00+01:00</nillableDatetime>
</root>
''')
expected = 2
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
tree_invalid = self.parse('''\
<root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime xsi:nil="true"/>
<nillableDatetime>2009-12-10T16:21:00Z</nillableDatetime>
</root>
''')
expected = 1
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
def test_schematron_phases(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:title>iso schematron validation</sch:title>
<sch:ns uri="http://www.w3.org/2001/XMLSchema-instance" prefix="xsi"/>
<sch:ns uri="http://codespeak.net/lxml/objectify/pytype" prefix="py"/>
<sch:phase id="mandatory">
<sch:active pattern="number_of_entries"/>
</sch:phase>
<sch:phase id="datetime_checks">
<sch:active pattern="datetime"/>
<sch:active pattern="nillableDatetime"/>
</sch:phase>
<sch:phase id="full">
<sch:active pattern="number_of_entries"/>
<sch:active pattern="datetime"/>
<sch:active pattern="nillableDatetime"/>
</sch:phase>
<!-- of course, these only really make sense when combined with a schema that
ensures datatype xs:dateTime -->
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="$lastchar='Z' or $tz='00:00'">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc_nillable">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="@xsi:nil='true' or ($lastchar='Z' or $tz='00:00')">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries test</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern id="datetime" is-a="abstract.dateTime.tz_utc">
<sch:param name="datetime" value="datetime"/>
</sch:pattern>
<sch:pattern id="nillableDatetime" is-a="abstract.dateTime.tz_utc_nillable">
<sch:param name="datetime" value="nillableDatetime"/>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00Z</datetime>
<nillableDatetime xsi:nil="true"/>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<datetime>2009-12-10T16:21:00+01:00</datetime>
<nillableDatetime>2009-12-10T16:21:00+01:00</nillableDatetime>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
# check everything (default phase #ALL)
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 3
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase mandatory
schematron = isoschematron.Schematron(
schema, compile_params={'phase': 'mandatory'})
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 1
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase datetime_checks
schematron = isoschematron.Schematron(
schema, compile_params={'phase': 'datetime_checks'})
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 2
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase full
schematron = isoschematron.Schematron(
schema, compile_params={'phase': 'full'})
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 3
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
def test_schematron_phases_kwarg(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:title>iso schematron validation</sch:title>
<sch:ns uri="http://www.w3.org/2001/XMLSchema-instance" prefix="xsi"/>
<sch:ns uri="http://codespeak.net/lxml/objectify/pytype" prefix="py"/>
<sch:phase id="mandatory">
<sch:active pattern="number_of_entries"/>
</sch:phase>
<sch:phase id="datetime_checks">
<sch:active pattern="datetime"/>
<sch:active pattern="nillableDatetime"/>
</sch:phase>
<sch:phase id="full">
<sch:active pattern="number_of_entries"/>
<sch:active pattern="datetime"/>
<sch:active pattern="nillableDatetime"/>
</sch:phase>
<!-- of course, these only really make sense when combined with a schema that
ensures datatype xs:dateTime -->
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="$lastchar='Z' or $tz='00:00'">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern abstract="true" id="abstract.dateTime.tz_utc_nillable">
<sch:rule context="$datetime">
<sch:let name="tz" value="concat(substring-after(substring-after(./text(), 'T'), '+'), substring-after(substring-after(./text(), 'T'), '-'))"/>
<sch:let name="lastchar" value="substring(./text(), string-length(./text()))"/>
<sch:assert test="@xsi:nil='true' or ($lastchar='Z' or $tz='00:00')">[ERROR] element (<sch:value-of select="name(.)"/>) dateTime value (<sch:value-of select="."/>) is not qualified as UTC (tz: <sch:value-of select="$tz"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries test</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<sch:pattern id="datetime" is-a="abstract.dateTime.tz_utc">
<sch:param name="datetime" value="datetime"/>
</sch:pattern>
<sch:pattern id="nillableDatetime" is-a="abstract.dateTime.tz_utc_nillable">
<sch:param name="datetime" value="nillableDatetime"/>
</sch:pattern>
</sch:schema>
''')
tree_valid = self.parse('''\
<message xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<datetime>2009-12-10T15:21:00Z</datetime>
<nillableDatetime xsi:nil="true"/>
<number_of_entries>0</number_of_entries>
<entries>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<datetime>2009-12-10T16:21:00+01:00</datetime>
<nillableDatetime>2009-12-10T16:21:00+01:00</nillableDatetime>
<number_of_entries>3</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
# check everything (default phase #ALL)
schematron = isoschematron.Schematron(schema)
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 3
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase mandatory
schematron = isoschematron.Schematron(schema, phase='mandatory')
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 1
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase datetime_checks
schematron = isoschematron.Schematron(schema, phase='datetime_checks')
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 2
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected,
'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
# check phase full
schematron = isoschematron.Schematron(schema, phase='full')
self.assertTrue(schematron(tree_valid), schematron.error_log)
expected = 3
valid = schematron(tree_invalid)
self.assertTrue(not valid)
self.assertEqual(
len(schematron.error_log), expected, 'expected %s errors: %s (%s errors)' %
(expected, schematron.error_log, len(schematron.error_log)))
def test_schematron_xmlschema_embedded(self):
schema = self.parse('''\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<xs:element name="message">
<xs:complexType>
<xs:sequence>
<xs:element name="number_of_entries" type="xs:positiveInteger">
<xs:annotation>
<xs:appinfo>
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</xs:appinfo>
</xs:annotation>
</xs:element>
<xs:element name="entries">
<xs:complexType>
<xs:sequence>
<xs:element name="entry" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>2</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>1</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
xmlschema = etree.XMLSchema(schema)
schematron = isoschematron.Schematron(schema)
# fwiw, this must also be XMLSchema-valid
self.assertTrue(xmlschema(tree_valid), xmlschema.error_log)
self.assertTrue(schematron(tree_valid))
# still schema-valid
self.assertTrue(xmlschema(tree_invalid), xmlschema.error_log)
self.assertTrue(not schematron(tree_invalid))
def test_schematron_relaxng_embedded(self):
schema = self.parse('''\
<grammar xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:sch="http://purl.oclc.org/dsdl/schematron"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<start>
<ref name="message"/>
</start>
<define name="message">
<element name="message">
<element name="number_of_entries">
<!-- RelaxNG can be mixed freely with stuff from other namespaces -->
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
<data type="positiveInteger"/>
</element>
<element name="entries">
<zeroOrMore>
<element name="entry"><data type="string"/></element>
</zeroOrMore>
</element>
</element>
</define>
</grammar>
''')
tree_valid = self.parse('''\
<message>
<number_of_entries>2</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
tree_invalid = self.parse('''\
<message>
<number_of_entries>1</number_of_entries>
<entries>
<entry>Entry 1</entry>
<entry>Entry 2</entry>
</entries>
</message>
''')
relaxng = etree.RelaxNG(schema)
schematron = isoschematron.Schematron(schema)
# fwiw, this must also be RelaxNG-valid
self.assertTrue(relaxng(tree_valid), relaxng.error_log)
self.assertTrue(schematron(tree_valid))
# still schema-valid
self.assertTrue(relaxng(tree_invalid), relaxng.error_log)
self.assertTrue(not schematron(tree_invalid))
def test_schematron_invalid_args(self):
schema = self.parse('''\
<sch:schema xmlns:sch="http://purl.oclc.org/dsdl/schematron">
<sch:pattern id="number_of_entries">
<sch:title>mandatory number_of_entries tests</sch:title>
<sch:rule context="number_of_entries">
<sch:assert test="text()=count(../entries/entry)">[ERROR] number_of_entries (<sch:value-of select="."/>) must equal the number of entries/entry elements (<sch:value-of select="count(../entries/entry)"/>)</sch:assert>
</sch:rule>
</sch:pattern>
</sch:schema>
''')
# handing phase as keyword arg will *not* raise the type error
self.assertRaises(TypeError, isoschematron.Schematron, schema,
compile_params={'phase': None})
def test_schematron_customization(self):
class MySchematron(isoschematron.Schematron):
def _extract(self, root):
schematron = (root.xpath(
'//sch:schema',
namespaces={'sch': "http://purl.oclc.org/dsdl/schematron"})
or [None])[0]
return schematron
def _include(self, schematron, **kwargs):
raise RuntimeError('inclusion unsupported')
def _expand(self, schematron, **kwargs):
raise RuntimeError('expansion unsupported')
def _validation_errors(self, validationReport):
valid = etree.XPath(
'count(//svrl:successful-report[@flag="critical"])=1',
namespaces={'svrl': isoschematron.SVRL_NS})(
validationReport)
if valid:
return []
error = etree.Element('Error')
error.text = 'missing critical condition report'
return [error]
tree_valid = self.parse('<AAA><BBB/><CCC/></AAA>')
tree_invalid = self.parse('<AAA><BBB/><CCC/><DDD/></AAA>')
schema = self.parse('''\
<schema xmlns="http://www.example.org/yet/another/schema/dialect">
<schema xmlns="http://purl.oclc.org/dsdl/schematron" >
<pattern id="OpenModel">
<title>Open Model</title>
<rule context="AAA">
<report test="BBB" flag="info">BBB element must be present</report>
<report test="CCC" flag="info">CCC element must be present</report>
</rule>
</pattern>
<pattern id="ClosedModel">
<title>Closed model"</title>
<rule context="AAA">
<report test="BBB" flag="info">BBB element must be present</report>
<report test="CCC" flag="info">CCC element must be present</report>
<report test="count(BBB|CCC) = count(*)" flag="critical">Only BBB and CCC children must be present</report>
</rule>
</pattern>
</schema>
</schema>
''')
# check if overridden _include is run
self.assertRaises(RuntimeError, MySchematron, schema, store_report=True)
# check if overridden _expand is run
self.assertRaises(RuntimeError, MySchematron, schema, store_report=True,
include=False)
schema = MySchematron(schema, store_report=True, include=False,
expand=False)
self.assertTrue(schema.validate(tree_valid))
self.assertTrue(not schema.validate(tree_invalid))
#TODO: test xslt parameters for inclusion, expand & compile steps (?)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeISOSchematronTestCase)])
suite.addTests(
[make_doctest('../../../doc/validation.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| apache-2.0 |
jwi078/incubator-airflow | airflow/operators/hive_to_mysql.py | 5 | 3572 | import logging
from airflow.hooks import HiveServer2Hook, MySqlHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from tempfile import NamedTemporaryFile
class HiveToMySqlTransfer(BaseOperator):
"""
Moves data from Hive to MySQL, note that for now the data is loaded
into memory before being pushed to MySQL, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the MySQL database
:type sql: str
:param mysql_table: target MySQL table, use dot notation to target a
specific database
:type mysql_table: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param hiveserver2_conn_id: destination hive connection
:type hiveserver2_conn_id: str
:param mysql_preoperator: sql statement to run against mysql prior to
import, typically use to truncate of delete in place of the data
coming in, allowing the task to be idempotent (running the task
twice won't double load data)
:type mysql_preoperator: str
:param mysql_postoperator: sql statement to run against mysql after the
import, typically used to move data from staging to production
and issue cleanup commands.
:type mysql_postoperator: str
:param bulk_load: flag to use bulk_load option. This loads mysql directly
from a tab-delimited text file using the LOAD DATA LOCAL INFILE command.
This option requires an extra connection parameter for the
destination MySQL connection: {'local_infile': true}.
:type bulk_load: bool
"""
template_fields = ('sql', 'mysql_table', 'mysql_preoperator',
'mysql_postoperator')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
mysql_table,
hiveserver2_conn_id='hiveserver2_default',
mysql_conn_id='mysql_default',
mysql_preoperator=None,
mysql_postoperator=None,
bulk_load=False,
*args, **kwargs):
super(HiveToMySqlTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.mysql_postoperator = mysql_postoperator
self.hiveserver2_conn_id = hiveserver2_conn_id
self.bulk_load = bulk_load
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
logging.info("Extracting data from Hive")
logging.info(self.sql)
if self.bulk_load:
tmpfile = NamedTemporaryFile()
hive.to_csv(self.sql, tmpfile.name, delimiter='\t',
lineterminator='\n', output_header=False)
else:
results = hive.get_records(self.sql)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
if self.mysql_preoperator:
logging.info("Running MySQL preoperator")
mysql.run(self.mysql_preoperator)
logging.info("Inserting rows into MySQL")
if self.bulk_load:
mysql.bulk_load(table=self.mysql_table, tmp_file=tmpfile.name)
tmpfile.close()
else:
mysql.insert_rows(table=self.mysql_table, rows=results)
if self.mysql_postoperator:
logging.info("Running MySQL postoperator")
mysql.run(self.mysql_postoperator)
logging.info("Done.")
| apache-2.0 |
JackDandy/SickGear | lib/hachoir_py3/field/basic_field_set.py | 2 | 4812 | from hachoir_py3.field import Field, FieldError
from hachoir_py3.stream import InputStream
from hachoir_py3.core.endian import BIG_ENDIAN, LITTLE_ENDIAN, MIDDLE_ENDIAN
from hachoir_py3.core.event_handler import EventHandler
class ParserError(FieldError):
"""
Error raised by a field set.
@see: L{FieldError}
"""
pass
class MatchError(FieldError):
"""
Error raised by a field set when the stream content doesn't
match to file format.
@see: L{FieldError}
"""
pass
class BasicFieldSet(Field):
_event_handler = None
is_field_set = True
endian = None
def __init__(self, parent, name, stream, description, size):
# Sanity checks (preconditions)
assert not parent or issubclass(parent.__class__, BasicFieldSet)
assert issubclass(stream.__class__, InputStream)
# Set field set size
if size is None and self.static_size:
assert isinstance(self.static_size, int)
size = self.static_size
# Set Field attributes
self._parent = parent
self._name = name
self._size = size
self._description = description
self.stream = stream
self._field_array_count = {}
# Set endian
if not self.endian:
assert parent and parent.endian
self.endian = parent.endian
if parent:
# This field set is one of the root leafs
self._address = parent.nextFieldAddress()
self.root = parent.root
assert id(self.stream) == id(parent.stream)
else:
# This field set is the root
self._address = 0
self.root = self
self._global_event_handler = None
# Sanity checks (post-conditions)
assert self.endian in (BIG_ENDIAN, LITTLE_ENDIAN, MIDDLE_ENDIAN)
if (self._size is not None) and (self._size <= 0):
raise ParserError("Invalid parser '%s' size: %s" %
(self.path, self._size))
def reset(self):
self._field_array_count = {}
def createValue(self):
return None
def connectEvent(self, event_name, handler, local=True):
assert event_name in (
# Callback prototype: def f(field)
# Called when new value is already set
"field-value-changed",
# Callback prototype: def f(field)
# Called when field size is already set
"field-resized",
# A new field has been inserted in the field set
# Callback prototype: def f(index, new_field)
"field-inserted",
# Callback prototype: def f(old_field, new_field)
# Called when new field is already in field set
"field-replaced",
# Callback prototype: def f(field, new_value)
# Called to ask to set new value
"set-field-value"
), "Event name %r is invalid" % event_name
if local:
if self._event_handler is None:
self._event_handler = EventHandler()
self._event_handler.connect(event_name, handler)
else:
if self.root._global_event_handler is None:
self.root._global_event_handler = EventHandler()
self.root._global_event_handler.connect(event_name, handler)
def raiseEvent(self, event_name, *args):
# Transfer event to local listeners
if self._event_handler is not None:
self._event_handler.raiseEvent(event_name, *args)
# Transfer event to global listeners
if self.root._global_event_handler is not None:
self.root._global_event_handler.raiseEvent(event_name, *args)
def setUniqueFieldName(self, field):
key = field._name[:-2]
try:
self._field_array_count[key] += 1
except KeyError:
self._field_array_count[key] = 0
field._name = key + "[%u]" % self._field_array_count[key]
def readFirstFields(self, number):
"""
Read first number fields if they are not read yet.
Returns number of new added fields.
"""
number = number - self.current_length
if 0 < number:
return self.readMoreFields(number)
else:
return 0
def createFields(self):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def getField(self, key, const=True):
raise NotImplementedError()
def nextFieldAddress(self):
raise NotImplementedError()
def getFieldIndex(self, field):
raise NotImplementedError()
def readMoreFields(self, number):
raise NotImplementedError()
| gpl-3.0 |
leckman/exocode | src/array_processing.py | 1 | 2059 | '''
Copyright (c) 2015, Laura Eckman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import numpy as np
def lingray(x, survey='', a=None, b=None):
"""
Auxiliary function that specifies the linear gray scale.
a and b are the cutoffs : if not specified, min and max are used
Prancer Physics Louisville
"""
if a == None:
a = np.min(x)
if b == None:
b = np.max(x)
if survey == 'DSS':
return (x-float(a))/(b-a)*255.0
return (x-float(a))/(b-a)
def loggray(x, survey='',a=None, b=None):
"""
Auxiliary function that specifies the logarithmic gray scale.
a and b are the cutoffs : if not specified, min and max are used
Prancer Physics Louisville
"""
if a == None:
a = np.min(x)
if b == None:
b = np.max(x)
linval = 10.0 + 990.0 * (x-float(a))/(b-a)
if survey == 'DSS':
return (np.log10(linval)-1.0)*0.5*255.0
return (np.log10(linval)-1.0)*0.5
if __name__ == '__main__':
#example usage
'''
WISE_image = <numpy array>
usable_WISE = loggray(WISE_image,survey='WISE')
DSS_image = <numpy array)
usable_DSS = lingray(DSS_image,survey='DSS')
'''
pass
| mit |
vsajip/django | django/utils/unittest/__init__.py | 571 | 3069 | """
unittest2
unittest2 is a backport of the new features added to the unittest testing
framework in Python 2.7. It is tested to run on Python 2.4 - 2.6.
To use unittest2 instead of unittest simply replace ``import unittest`` with
``import unittest2``.
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
import sys
# Django hackery to load the appropriate version of unittest
try:
# check the system path first
from unittest2 import *
except ImportError:
if sys.version_info >= (2,7):
# unittest2 features are native in Python 2.7
from unittest import *
else:
# otherwise use our bundled version
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', '__version__', 'collector']
__version__ = '0.5.1'
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
from django.utils.unittest.collector import collector
from django.utils.unittest.result import TestResult
from django.utils.unittest.case import \
TestCase, FunctionTestCase, SkipTest, skip, skipIf,\
skipUnless, expectedFailure
from django.utils.unittest.suite import BaseTestSuite, TestSuite
from django.utils.unittest.loader import \
TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,\
findTestCases
from django.utils.unittest.main import TestProgram, main, main_
from django.utils.unittest.runner import TextTestRunner, TextTestResult
try:
from django.utils.unittest.signals import\
installHandler, registerResult, removeResult, removeHandler
except ImportError:
# Compatibility with platforms that don't have the signal module
pass
else:
__all__.extend(['installHandler', 'registerResult', 'removeResult',
'removeHandler'])
# deprecated
_TextTestResult = TextTestResult
__unittest = True
| bsd-3-clause |
pbaesse/Sissens | lib/python2.7/site-packages/eventlet/greenio/base.py | 1 | 17913 | import errno
import os
import socket
import sys
import time
import warnings
import eventlet
from eventlet.hubs import trampoline, notify_opened, IOClosed
from eventlet.support import get_errno, six
__all__ = [
'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
'SOCKET_BLOCKING', 'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS',
'shutdown_safe', 'SSL',
'socket_timeout',
]
BUFFER_SIZE = 4096
CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
CONNECT_SUCCESS = set((0, errno.EISCONN))
if sys.platform[:3] == "win":
CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67
if six.PY2:
_python2_fileobject = socket._fileobject
_original_socket = eventlet.patcher.original('socket').socket
socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout)
def socket_connect(descriptor, address):
"""
Attempts to connect to the address, returns the descriptor if it succeeds,
returns None if it needs to trampoline, and raises any exceptions.
"""
err = descriptor.connect_ex(address)
if err in CONNECT_ERR:
return None
if err not in CONNECT_SUCCESS:
raise socket.error(err, errno.errorcode[err])
return descriptor
def socket_checkerr(descriptor):
err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err not in CONNECT_SUCCESS:
raise socket.error(err, errno.errorcode[err])
def socket_accept(descriptor):
"""
Attempts to accept() on the descriptor, returns a client,address tuple
if it succeeds; returns None if it needs to trampoline, and raises
any exceptions.
"""
try:
return descriptor.accept()
except socket.error as e:
if get_errno(e) == errno.EWOULDBLOCK:
return None
raise
if sys.platform[:3] == "win":
# winsock sometimes throws ENOTCONN
SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK,))
SOCKET_CLOSED = set((errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN))
else:
# oddly, on linux/darwin, an unconnected socket is expected to block,
# so we treat ENOTCONN the same as EWOULDBLOCK
SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN))
SOCKET_CLOSED = set((errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE))
def set_nonblocking(fd):
"""
Sets the descriptor to be nonblocking. Works on many file-like
objects as well as sockets. Only sockets can be nonblocking on
Windows, however.
"""
try:
setblocking = fd.setblocking
except AttributeError:
# fd has no setblocking() method. It could be that this version of
# Python predates socket.setblocking(). In that case, we can still set
# the flag "by hand" on the underlying OS fileno using the fcntl
# module.
try:
import fcntl
except ImportError:
# Whoops, Windows has no fcntl module. This might not be a socket
# at all, but rather a file-like object with no setblocking()
# method. In particular, on Windows, pipes don't support
# non-blocking I/O and therefore don't have that method. Which
# means fcntl wouldn't help even if we could load it.
raise NotImplementedError("set_nonblocking() on a file object "
"with no setblocking() method "
"(Windows pipes don't support non-blocking I/O)")
# We managed to import fcntl.
fileno = fd.fileno()
orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
new_flags = orig_flags | os.O_NONBLOCK
if new_flags != orig_flags:
fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
else:
# socket supports setblocking()
setblocking(0)
try:
from socket import _GLOBAL_DEFAULT_TIMEOUT
except ImportError:
_GLOBAL_DEFAULT_TIMEOUT = object()
class GreenSocket(object):
"""
Green version of socket.socket class, that is intended to be 100%
API-compatible.
It also recognizes the keyword parameter, 'set_nonblocking=True'.
Pass False to indicate that socket is already in non-blocking mode
to save syscalls.
"""
# This placeholder is to prevent __getattr__ from creating an infinite call loop
fd = None
def __init__(self, family=socket.AF_INET, *args, **kwargs):
should_set_nonblocking = kwargs.pop('set_nonblocking', True)
if isinstance(family, six.integer_types):
fd = _original_socket(family, *args, **kwargs)
# Notify the hub that this is a newly-opened socket.
notify_opened(fd.fileno())
else:
fd = family
# import timeout from other socket, if it was there
try:
self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
except AttributeError:
self._timeout = socket.getdefaulttimeout()
if should_set_nonblocking:
set_nonblocking(fd)
self.fd = fd
# when client calls setblocking(0) or settimeout(0) the socket must
# act non-blocking
self.act_non_blocking = False
# Copy some attributes from underlying real socket.
# This is the easiest way that i found to fix
# https://bitbucket.org/eventlet/eventlet/issue/136
# Only `getsockopt` is required to fix that issue, others
# are just premature optimization to save __getattr__ call.
self.bind = fd.bind
self.close = fd.close
self.fileno = fd.fileno
self.getsockname = fd.getsockname
self.getsockopt = fd.getsockopt
self.listen = fd.listen
self.setsockopt = fd.setsockopt
self.shutdown = fd.shutdown
self._closed = False
@property
def _sock(self):
return self
if six.PY3:
def _get_io_refs(self):
return self.fd._io_refs
def _set_io_refs(self, value):
self.fd._io_refs = value
_io_refs = property(_get_io_refs, _set_io_refs)
# Forward unknown attributes to fd, cache the value for future use.
# I do not see any simple attribute which could be changed
# so caching everything in self is fine.
# If we find such attributes - only attributes having __get__ might be cached.
# For now - I do not want to complicate it.
def __getattr__(self, name):
if self.fd is None:
raise AttributeError(name)
attr = getattr(self.fd, name)
setattr(self, name, attr)
return attr
def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
""" We need to trampoline via the event hub.
We catch any signal back from the hub indicating that the operation we
were waiting on was associated with a filehandle that's since been
invalidated.
"""
if self._closed:
# If we did any logging, alerting to a second trampoline attempt on a closed
# socket here would be useful.
raise IOClosed()
try:
return trampoline(fd, read=read, write=write, timeout=timeout,
timeout_exc=timeout_exc,
mark_as_closed=self._mark_as_closed)
except IOClosed:
# This socket's been obsoleted. De-fang it.
self._mark_as_closed()
raise
def accept(self):
if self.act_non_blocking:
res = self.fd.accept()
notify_opened(res[0].fileno())
return res
fd = self.fd
_timeout_exc = socket_timeout('timed out')
while True:
res = socket_accept(fd)
if res is not None:
client, addr = res
notify_opened(client.fileno())
set_nonblocking(client)
return type(self)(client), addr
self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
def _mark_as_closed(self):
""" Mark this socket as being closed """
self._closed = True
def __del__(self):
# This is in case self.close is not assigned yet (currently the constructor does it)
close = getattr(self, 'close', None)
if close is not None:
close()
def connect(self, address):
if self.act_non_blocking:
return self.fd.connect(address)
fd = self.fd
_timeout_exc = socket_timeout('timed out')
if self.gettimeout() is None:
while not socket_connect(fd, address):
try:
self._trampoline(fd, write=True)
except IOClosed:
raise socket.error(errno.EBADFD)
socket_checkerr(fd)
else:
end = time.time() + self.gettimeout()
while True:
if socket_connect(fd, address):
return
if time.time() >= end:
raise _timeout_exc
timeout = end - time.time()
try:
self._trampoline(fd, write=True, timeout=timeout, timeout_exc=_timeout_exc)
except IOClosed:
# ... we need some workable errno here.
raise socket.error(errno.EBADFD)
socket_checkerr(fd)
def connect_ex(self, address):
if self.act_non_blocking:
return self.fd.connect_ex(address)
fd = self.fd
if self.gettimeout() is None:
while not socket_connect(fd, address):
try:
self._trampoline(fd, write=True)
socket_checkerr(fd)
except socket.error as ex:
return get_errno(ex)
except IOClosed:
return errno.EBADFD
else:
end = time.time() + self.gettimeout()
timeout_exc = socket.timeout(errno.EAGAIN)
while True:
try:
if socket_connect(fd, address):
return 0
if time.time() >= end:
raise timeout_exc
self._trampoline(fd, write=True, timeout=end - time.time(),
timeout_exc=timeout_exc)
socket_checkerr(fd)
except socket.error as ex:
return get_errno(ex)
except IOClosed:
return errno.EBADFD
def dup(self, *args, **kw):
sock = self.fd.dup(*args, **kw)
newsock = type(self)(sock, set_nonblocking=False)
newsock.settimeout(self.gettimeout())
return newsock
if six.PY3:
def makefile(self, *args, **kwargs):
return _original_socket.makefile(self, *args, **kwargs)
else:
def makefile(self, *args, **kwargs):
dupped = self.dup()
res = _python2_fileobject(dupped, *args, **kwargs)
if hasattr(dupped, "_drop"):
dupped._drop()
# Making the close function of dupped None so that when garbage collector
# kicks in and tries to call del, which will ultimately call close, _drop
# doesn't get called on dupped twice as it has been already explicitly called in
# previous line
dupped.close = None
return res
def makeGreenFile(self, *args, **kw):
warnings.warn("makeGreenFile has been deprecated, please use "
"makefile instead", DeprecationWarning, stacklevel=2)
return self.makefile(*args, **kw)
def _read_trampoline(self):
self._trampoline(
self.fd,
read=True,
timeout=self.gettimeout(),
timeout_exc=socket_timeout('timed out'))
def _recv_loop(self, recv_meth, empty_val, *args):
fd = self.fd
if self.act_non_blocking:
return recv_meth(*args)
while True:
try:
# recv: bufsize=0?
# recv_into: buffer is empty?
# This is needed because behind the scenes we use sockets in
# nonblocking mode and builtin recv* methods. Attempting to read
# 0 bytes from a nonblocking socket using a builtin recv* method
# does not raise a timeout exception. Since we're simulating
# a blocking socket here we need to produce a timeout exception
# if needed, hence the call to trampoline.
if not args[0]:
self._read_trampoline()
return recv_meth(*args)
except socket.error as e:
if get_errno(e) in SOCKET_BLOCKING:
pass
elif get_errno(e) in SOCKET_CLOSED:
return empty_val
else:
raise
try:
self._read_trampoline()
except IOClosed as e:
# Perhaps we should return '' instead?
raise EOFError()
def recv(self, bufsize, flags=0):
return self._recv_loop(self.fd.recv, b'', bufsize, flags)
def recvfrom(self, bufsize, flags=0):
return self._recv_loop(self.fd.recvfrom, b'', bufsize, flags)
def recv_into(self, buffer, nbytes=0, flags=0):
return self._recv_loop(self.fd.recv_into, 0, buffer, nbytes, flags)
def recvfrom_into(self, buffer, nbytes=0, flags=0):
return self._recv_loop(self.fd.recvfrom_into, 0, buffer, nbytes, flags)
def _send_loop(self, send_method, data, *args):
if self.act_non_blocking:
return send_method(data, *args)
_timeout_exc = socket_timeout('timed out')
while True:
try:
return send_method(data, *args)
except socket.error as e:
eno = get_errno(e)
if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
raise
try:
self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
timeout_exc=_timeout_exc)
except IOClosed:
raise socket.error(errno.ECONNRESET, 'Connection closed by another thread')
def send(self, data, flags=0):
return self._send_loop(self.fd.send, data, flags)
def sendto(self, data, *args):
return self._send_loop(self.fd.sendto, data, *args)
def sendall(self, data, flags=0):
tail = self.send(data, flags)
len_data = len(data)
while tail < len_data:
tail += self.send(data[tail:], flags)
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
self._timeout = None
else:
self.act_non_blocking = True
self._timeout = 0.0
def settimeout(self, howlong):
if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
self.setblocking(True)
return
try:
f = howlong.__float__
except AttributeError:
raise TypeError('a float is required')
howlong = f()
if howlong < 0.0:
raise ValueError('Timeout value out of range')
if howlong == 0.0:
self.act_non_blocking = True
self._timeout = 0.0
else:
self.act_non_blocking = False
self._timeout = howlong
def gettimeout(self):
return self._timeout
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
if "__pypy__" in sys.builtin_module_names:
def _reuse(self):
getattr(self.fd, '_sock', self.fd)._reuse()
def _drop(self):
getattr(self.fd, '_sock', self.fd)._drop()
def _operation_on_closed_file(*args, **kwargs):
raise ValueError("I/O operation on closed file")
greenpipe_doc = """
GreenPipe is a cooperative replacement for file class.
It will cooperate on pipes. It will block on regular file.
Differneces from file class:
- mode is r/w property. Should re r/o
- encoding property not implemented
- write/writelines will not raise TypeError exception when non-string data is written
it will write str(data) instead
- Universal new lines are not supported and newlines property not implementeded
- file argument can be descriptor, file name or file object.
"""
# import SSL module here so we can refer to greenio.SSL.exceptionclass
try:
from OpenSSL import SSL
except ImportError:
# pyOpenSSL not installed, define exceptions anyway for convenience
class SSL(object):
class WantWriteError(Exception):
pass
class WantReadError(Exception):
pass
class ZeroReturnError(Exception):
pass
class SysCallError(Exception):
pass
def shutdown_safe(sock):
""" Shuts down the socket. This is a convenience method for
code that wants to gracefully handle regular sockets, SSL.Connection
sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.6
interchangeably. Both types of ssl socket require a shutdown() before
close, but they have different arity on their shutdown method.
Regular sockets don't need a shutdown before close, but it doesn't hurt.
"""
try:
try:
# socket, ssl.SSLSocket
return sock.shutdown(socket.SHUT_RDWR)
except TypeError:
# SSL.Connection
return sock.shutdown()
except socket.error as e:
# we don't care if the socket is already closed;
# this will often be the case in an http server context
if get_errno(e) not in (errno.ENOTCONN, errno.EBADF, errno.ENOTSOCK):
raise
| gpl-3.0 |
hbrunn/OpenUpgrade | openerp/report/render/rml2pdf/utils.py | 381 | 7143 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2003, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import copy
import locale
import logging
import re
import reportlab
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.misc import ustr
_logger = logging.getLogger(__name__)
_regex = re.compile('\[\[(.+?)\]\]')
def str2xml(s):
return (s or '').replace('&', '&').replace('<', '<').replace('>', '>')
def xml2str(s):
return (s or '').replace('&','&').replace('<','<').replace('>','>')
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop'):
for ctx in eval(n.get('rml_loop'),{}, self.localcontext):
self.localcontext.update(ctx)
if (tagname is None) or (n.tag==tagname):
if n.get('rml_except', False):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr)
yield n2
except GeneratorExit:
yield n
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
yield n
else:
yield n
continue
if self and self.localcontext and n.get('rml_except'):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if self and self.localcontext and n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr or {})
yield n2
tagname = ''
except GeneratorExit:
pass
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
pass
if (tagname is None) or (n.tag==tagname):
yield n
def _process_text(self, txt):
"""Translate ``txt`` according to the language in the local context,
replace dynamic ``[[expr]]`` with their real value, then escape
the result for XML.
:param str txt: original text to translate (must NOT be XML-escaped)
:return: translated text, with dynamic expressions evaluated and
with special XML characters escaped (``&,<,>``).
"""
if not self.localcontext:
return str2xml(txt)
if not txt:
return ''
result = ''
sps = _regex.split(txt)
while sps:
# This is a simple text to translate
to_translate = tools.ustr(sps.pop(0))
result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate))
if sps:
txt = None
try:
expr = sps.pop(0)
txt = eval(expr, self.localcontext)
if txt and isinstance(txt, basestring):
txt = tools.ustr(txt)
except Exception:
_logger.error("Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext)
if isinstance(txt, basestring):
result += txt
elif txt and (txt is not None) and (txt is not False):
result += ustr(txt)
return str2xml(result)
def text_get(node):
return ''.join([ustr(n.text) for n in node])
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
if size:
if size.find('.') == -1:
decimal_point = '.'
try:
decimal_point = locale.nl_langinfo(locale.RADIXCHAR)
except Exception:
decimal_point = locale.localeconv()['decimal_point']
size = size.replace(decimal_point, '.')
for unit in units:
res = unit[0].search(size, 0)
if res:
return unit[1]*float(res.group(1))
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
return map(int, node.get(attr_name).split(','))
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = tools.ustr(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
elif dict[key]=='unit':
res[key] = unit_get(node.get(key))
elif dict[key] == 'float' :
res[key] = float(node.get(key))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EraYaN/CouchPotatoServer | couchpotato/core/notifications/core/main.py | 52 | 8645 | from operator import itemgetter
import threading
import time
import traceback
import uuid
from CodernityDB.database import RecordDeleted
from couchpotato import get_db
from couchpotato.api import addApiView, addNonBlockApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from .index import NotificationIndex, NotificationUnreadIndex
from couchpotato.environment import Env
from tornado.ioloop import IOLoop
log = CPLog(__name__)
class CoreNotifier(Notification):
_database = {
'notification': NotificationIndex,
'notification_unread': NotificationUnreadIndex
}
m_lock = None
listen_to = [
'media.available',
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message', 'core.message.important',
]
def __init__(self):
super(CoreNotifier, self).__init__()
addEvent('notify', self.notify)
addEvent('notify.frontend', self.frontend)
addApiView('notification.markread', self.markAsRead, docs = {
'desc': 'Mark notifications as read',
'params': {
'ids': {'desc': 'Notification id you want to mark as read. All if ids is empty.', 'type': 'int (comma separated)'},
},
})
addApiView('notification.list', self.listView, docs = {
'desc': 'Get list of notifications',
'params': {
'limit_offset': {'desc': 'Limit and offset the notification list. Examples: "50" or "50,30"'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any notification returned or not,
'notifications': array, notifications found,
}"""}
})
addNonBlockApiView('notification.listener', (self.addListener, self.removeListener))
addApiView('notification.listener', self.listener)
fireEvent('schedule.interval', 'core.check_messages', self.checkMessages, hours = 12, single = True)
fireEvent('schedule.interval', 'core.clean_messages', self.cleanMessages, seconds = 15, single = True)
addEvent('app.load', self.clean)
if not Env.get('dev'):
addEvent('app.load', self.checkMessages)
self.messages = []
self.listeners = []
self.m_lock = threading.Lock()
def clean(self):
try:
db = get_db()
for n in db.all('notification', with_doc = True):
if n['doc'].get('time', 0) <= (int(time.time()) - 2419200):
db.delete(n['doc'])
except:
log.error('Failed cleaning notification: %s', traceback.format_exc())
def markAsRead(self, ids = None, **kwargs):
ids = splitString(ids) if ids else None
try:
db = get_db()
for x in db.all('notification_unread', with_doc = True):
if not ids or x['_id'] in ids:
x['doc']['read'] = True
db.update(x['doc'])
return {
'success': True
}
except:
log.error('Failed mark as read: %s', traceback.format_exc())
return {
'success': False
}
def listView(self, limit_offset = None, **kwargs):
db = get_db()
if limit_offset:
splt = splitString(limit_offset)
limit = tryInt(splt[0])
offset = tryInt(0 if len(splt) is 1 else splt[1])
results = db.all('notification', limit = limit, offset = offset, with_doc = True)
else:
results = db.all('notification', limit = 200, with_doc = True)
notifications = []
for n in results:
notifications.append(n['doc'])
return {
'success': True,
'empty': len(notifications) == 0,
'notifications': notifications
}
def checkMessages(self):
prop_name = 'messages.last_check'
last_check = tryInt(Env.prop(prop_name, default = 0))
messages = fireEvent('cp.messages', last_check = last_check, single = True) or []
for message in messages:
if message.get('time') > last_check:
message['sticky'] = True # Always sticky core messages
message_type = 'core.message.important' if message.get('important') else 'core.message'
fireEvent(message_type, message = message.get('message'), data = message)
if last_check < message.get('time'):
last_check = message.get('time')
Env.prop(prop_name, value = last_check)
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
n = {
'_t': 'notification',
'time': int(time.time()),
}
try:
db = get_db()
n['message'] = toUnicode(message)
if data.get('sticky'):
n['sticky'] = True
if data.get('important'):
n['important'] = True
db.insert(n)
self.frontend(type = listener, data = n)
return True
except:
log.error('Failed notify "%s": %s', (n, traceback.format_exc()))
def frontend(self, type = 'notification', data = None, message = None):
if not data: data = {}
log.debug('Notifying frontend')
self.m_lock.acquire()
notification = {
'message_id': str(uuid.uuid4()),
'time': time.time(),
'type': type,
'data': data,
'message': message,
}
self.messages.append(notification)
while len(self.listeners) > 0 and not self.shuttingDown():
try:
listener, last_id = self.listeners.pop()
IOLoop.current().add_callback(listener, {
'success': True,
'result': [notification],
})
except:
log.debug('Failed sending to listener: %s', traceback.format_exc())
self.listeners = []
self.m_lock.release()
log.debug('Done notifying frontend')
def addListener(self, callback, last_id = None):
if last_id:
messages = self.getMessages(last_id)
if len(messages) > 0:
return callback({
'success': True,
'result': messages,
})
self.m_lock.acquire()
self.listeners.append((callback, last_id))
self.m_lock.release()
def removeListener(self, callback):
self.m_lock.acquire()
new_listeners = []
for list_tuple in self.listeners:
try:
listener, last_id = list_tuple
if listener != callback:
new_listeners.append(list_tuple)
except:
log.debug('Failed removing listener: %s', traceback.format_exc())
self.listeners = new_listeners
self.m_lock.release()
def cleanMessages(self):
if len(self.messages) == 0:
return
log.debug('Cleaning messages')
self.m_lock.acquire()
time_ago = (time.time() - 15)
self.messages[:] = [m for m in self.messages if (m['time'] > time_ago)]
self.m_lock.release()
log.debug('Done cleaning messages')
def getMessages(self, last_id):
log.debug('Getting messages with id: %s', last_id)
self.m_lock.acquire()
recent = []
try:
index = map(itemgetter('message_id'), self.messages).index(last_id)
recent = self.messages[index + 1:]
except:
pass
self.m_lock.release()
log.debug('Returning for %s %s messages', (last_id, len(recent)))
return recent
def listener(self, init = False, **kwargs):
messages = []
# Get last message
if init:
db = get_db()
notifications = db.all('notification')
for n in notifications:
try:
doc = db.get('id', n.get('_id'))
if doc.get('time') > (time.time() - 604800):
messages.append(doc)
except RecordDeleted:
pass
return {
'success': True,
'result': messages,
}
| gpl-3.0 |
xingzhou/kubernetes | cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | 7 | 36222 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
_apply_node_label(label, delete=True)
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
# Set label for application name
_apply_node_label('juju-application={}'.format(hookenv.service_name()),
overwrite=True)
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend:1.4"
if arch() == 's390x':
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend-s390x:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = \
"gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13"
if arch() == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
class GetNodeNameFailed(Exception):
pass
def get_node_name():
# Get all the nodes in the cluster
cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
try:
raw = check_output(cmd)
break
except CalledProcessError:
hookenv.log('Failed to get node name for node %s.'
' Will retry.' % (gethostname()))
time.sleep(1)
else:
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
result = json.loads(raw.decode('utf-8'))
if 'items' in result:
for node in result['items']:
if 'status' not in node:
continue
if 'addresses' not in node['status']:
continue
# find the hostname
for address in node['status']['addresses']:
if address['type'] == 'Hostname':
if address['address'] == gethostname():
return node['metadata']['name']
# if we didn't match, just bail to the next node
break
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
nodename = get_node_name()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, nodename, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, nodename, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
break
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
label, code))
time.sleep(1)
else:
msg = 'Failed to apply label %s' % label
raise ApplyNodeLabelFailed(msg)
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
| apache-2.0 |
redhat-openstack/swift | swift/container/reconciler.py | 33 | 31483 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import defaultdict
import socket
import itertools
import logging
from eventlet import GreenPile, GreenPool, Timeout
from swift.common import constraints
from swift.common.daemon import Daemon
from swift.common.direct_client import (
direct_head_container, direct_delete_container_object,
direct_put_container_object, ClientException)
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.utils import get_logger, split_path, quorum_size, \
FileLikeIter, Timestamp, last_modified_date_to_timestamp, \
LRUCache
MISPLACED_OBJECTS_ACCOUNT = '.misplaced_objects'
MISPLACED_OBJECTS_CONTAINER_DIVISOR = 3600 # 1 hour
CONTAINER_POLICY_TTL = 30
def cmp_policy_info(info, remote_info):
"""
You have to squint to see it, but the general strategy is just:
if either has been recreated:
return the newest (of the recreated)
else
return the oldest
I tried cleaning it up for awhile, but settled on just writing a bunch of
tests instead. Once you get an intuitive sense for the nuance here you
can try and see there's a better way to spell the boolean logic but it all
ends up looking sorta hairy.
:returns: -1 if info is correct, 1 if remote_info is better
"""
def is_deleted(info):
return (info['delete_timestamp'] > info['put_timestamp'] and
info.get('count', info.get('object_count', 0)) == 0)
deleted = is_deleted(info)
remote_deleted = is_deleted(remote_info)
if any([deleted, remote_deleted]):
if not deleted:
return -1
elif not remote_deleted:
return 1
return cmp(remote_info['status_changed_at'],
info['status_changed_at'])
def has_been_recreated(info):
return (info['put_timestamp'] > info['delete_timestamp'] >
Timestamp(0))
remote_recreated = has_been_recreated(remote_info)
recreated = has_been_recreated(info)
if any([remote_recreated, recreated]):
if not recreated:
return 1
elif not remote_recreated:
return -1
return cmp(remote_info['status_changed_at'],
info['status_changed_at'])
return cmp(info['status_changed_at'], remote_info['status_changed_at'])
def incorrect_policy_index(info, remote_info):
"""
Compare remote_info to info and decide if the remote storage policy index
should be used instead of ours.
"""
if 'storage_policy_index' not in remote_info:
return False
if remote_info['storage_policy_index'] == \
info['storage_policy_index']:
return False
return info['storage_policy_index'] != sorted(
[info, remote_info], cmp=cmp_policy_info)[0]['storage_policy_index']
def translate_container_headers_to_info(headers):
default_timestamp = Timestamp(0).internal
return {
'storage_policy_index': int(headers['X-Backend-Storage-Policy-Index']),
'put_timestamp': headers.get('x-backend-put-timestamp',
default_timestamp),
'delete_timestamp': headers.get('x-backend-delete-timestamp',
default_timestamp),
'status_changed_at': headers.get('x-backend-status-changed-at',
default_timestamp),
}
def best_policy_index(headers):
container_info = map(translate_container_headers_to_info, headers)
container_info.sort(cmp=cmp_policy_info)
return container_info[0]['storage_policy_index']
def get_reconciler_container_name(obj_timestamp):
return str(int(Timestamp(obj_timestamp)) //
MISPLACED_OBJECTS_CONTAINER_DIVISOR *
MISPLACED_OBJECTS_CONTAINER_DIVISOR)
def get_reconciler_obj_name(policy_index, account, container, obj):
return "%(policy_index)d:/%(acc)s/%(con)s/%(obj)s" % {
'policy_index': policy_index, 'acc': account,
'con': container, 'obj': obj}
def get_reconciler_content_type(op):
try:
return {
'put': 'application/x-put',
'delete': 'application/x-delete',
}[op.lower()]
except KeyError:
raise ValueError('invalid operation type %r' % op)
def get_row_to_q_entry_translator(broker):
account = broker.account
container = broker.container
op_type = {
0: get_reconciler_content_type('put'),
1: get_reconciler_content_type('delete'),
}
def translator(obj_info):
name = get_reconciler_obj_name(obj_info['storage_policy_index'],
account, container,
obj_info['name'])
return {
'name': name,
'deleted': 0,
'created_at': obj_info['created_at'],
'etag': obj_info['created_at'],
'content_type': op_type[obj_info['deleted']],
'size': 0,
}
return translator
def add_to_reconciler_queue(container_ring, account, container, obj,
obj_policy_index, obj_timestamp, op,
force=False, conn_timeout=5, response_timeout=15):
"""
Add an object to the container reconciler's queue. This will cause the
container reconciler to move it from its current storage policy index to
the correct storage policy index.
:param container_ring: container ring
:param account: the misplaced object's account
:param container: the misplaced object's container
:param obj: the misplaced object
:param obj_policy_index: the policy index where the misplaced object
currently is
:param obj_timestamp: the misplaced object's X-Timestamp. We need this to
ensure that the reconciler doesn't overwrite a newer
object with an older one.
:param op: the method of the operation (DELETE or PUT)
:param force: over-write queue entries newer than obj_timestamp
:param conn_timeout: max time to wait for connection to container server
:param response_timeout: max time to wait for response from container
server
:returns: .misplaced_object container name, False on failure. "Success"
means a quorum of containers got the update.
"""
container_name = get_reconciler_container_name(obj_timestamp)
object_name = get_reconciler_obj_name(obj_policy_index, account,
container, obj)
if force:
# this allows an operator to re-enqueue an object that has
# already been popped from the queue to be reprocessed, but
# could potentially prevent out of order updates from making it
# into the queue
x_timestamp = Timestamp(time.time()).internal
else:
x_timestamp = obj_timestamp
q_op_type = get_reconciler_content_type(op)
headers = {
'X-Size': 0,
'X-Etag': obj_timestamp,
'X-Timestamp': x_timestamp,
'X-Content-Type': q_op_type,
}
def _check_success(*args, **kwargs):
try:
direct_put_container_object(*args, **kwargs)
return 1
except (ClientException, Timeout, socket.error):
return 0
pile = GreenPile()
part, nodes = container_ring.get_nodes(MISPLACED_OBJECTS_ACCOUNT,
container_name)
for node in nodes:
pile.spawn(_check_success, node, part, MISPLACED_OBJECTS_ACCOUNT,
container_name, object_name, headers=headers,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
successes = sum(pile)
if successes >= quorum_size(len(nodes)):
return container_name
else:
return False
def slightly_later_timestamp(ts, offset=1):
return Timestamp(ts, offset=offset).internal
def parse_raw_obj(obj_info):
"""
Translate a reconciler container listing entry to a dictionary
containing the parts of the misplaced object queue entry.
:param obj_info: an entry in an a container listing with the
required keys: name, content_type, and hash
:returns: a queue entry dict with the keys: q_policy_index, account,
container, obj, q_op, q_ts, q_record, and path
"""
raw_obj_name = obj_info['name'].encode('utf-8')
policy_index, obj_name = raw_obj_name.split(':', 1)
q_policy_index = int(policy_index)
account, container, obj = split_path(obj_name, 3, 3, rest_with_last=True)
try:
q_op = {
'application/x-put': 'PUT',
'application/x-delete': 'DELETE',
}[obj_info['content_type']]
except KeyError:
raise ValueError('invalid operation type %r' %
obj_info.get('content_type', None))
return {
'q_policy_index': q_policy_index,
'account': account,
'container': container,
'obj': obj,
'q_op': q_op,
'q_ts': Timestamp(obj_info['hash']),
'q_record': last_modified_date_to_timestamp(
obj_info['last_modified']),
'path': '/%s/%s/%s' % (account, container, obj)
}
@LRUCache(maxtime=CONTAINER_POLICY_TTL)
def direct_get_container_policy_index(container_ring, account_name,
container_name):
"""
Talk directly to the primary container servers to figure out the storage
policy index for a given container.
:param container_ring: ring in which to look up the container locations
:param account_name: name of the container's account
:param container_name: name of the container
:returns: storage policy index, or None if it couldn't get a quorum
"""
def _eat_client_exception(*args):
try:
return direct_head_container(*args)
except ClientException as err:
if err.http_status == 404:
return err.http_headers
except (Timeout, socket.error):
pass
pile = GreenPile()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pile.spawn(_eat_client_exception, node, part, account_name,
container_name)
headers = [x for x in pile if x is not None]
if len(headers) < quorum_size(len(nodes)):
return
return best_policy_index(headers)
def direct_delete_container_entry(container_ring, account_name, container_name,
object_name, headers=None):
"""
Talk directly to the primary container servers to delete a particular
object listing. Does not talk to object servers; use this only when a
container entry does not actually have a corresponding object.
"""
pool = GreenPool()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pool.spawn_n(direct_delete_container_object, node, part, account_name,
container_name, object_name, headers=headers)
# This either worked or it didn't; if it didn't, we'll retry on the next
# reconciler loop when we see the queue entry again.
pool.waitall()
class ContainerReconciler(Daemon):
"""
Move objects that are in the wrong storage policy.
"""
def __init__(self, conf):
self.conf = conf
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.interval = int(conf.get('interval', 30))
conf_path = conf.get('__file__') or \
'/etc/swift/container-reconciler.conf'
self.logger = get_logger(conf, log_route='container-reconciler')
request_tries = int(conf.get('request_tries') or 3)
self.swift = InternalClient(conf_path,
'Swift Container Reconciler',
request_tries)
self.stats = defaultdict(int)
self.last_stat_time = time.time()
def stats_log(self, metric, msg, *args, **kwargs):
"""
Update stats tracking for metric and emit log message.
"""
level = kwargs.pop('level', logging.DEBUG)
log_message = '%s: ' % metric + msg
self.logger.log(level, log_message, *args, **kwargs)
self.stats[metric] += 1
def log_stats(self, force=False):
"""
Dump stats to logger, noop when stats have been already been
logged in the last minute.
"""
now = time.time()
should_log = force or (now - self.last_stat_time > 60)
if should_log:
self.last_stat_time = now
self.logger.info('Reconciler Stats: %r', dict(**self.stats))
def pop_queue(self, container, obj, q_ts, q_record):
"""
Issue a delete object request to the container for the misplaced
object queue entry.
:param container: the misplaced objects container
:param obj: the name of the misplaced object
:param q_ts: the timestamp of the misplaced object
:param q_record: the timestamp of the queue entry
N.B. q_ts will normally be the same time as q_record except when
an object was manually re-enqued.
"""
q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj)
x_timestamp = slightly_later_timestamp(max(q_record, q_ts))
self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)',
q_path, q_ts, x_timestamp)
headers = {'X-Timestamp': x_timestamp}
direct_delete_container_entry(
self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT,
container, obj, headers=headers)
def throw_tombstones(self, account, container, obj, timestamp,
policy_index, path):
"""
Issue a delete object request to the given storage_policy.
:param account: the account name
:param container: the container name
:param obj: the object name
:param timestamp: the timestamp of the object to delete
:param policy_index: the policy index to direct the request
:param path: the path to be used for logging
"""
x_timestamp = slightly_later_timestamp(timestamp)
self.stats_log('cleanup_attempt', '%r (%f) from policy_index '
'%s (%s) will be deleted',
path, timestamp, policy_index, x_timestamp)
headers = {
'X-Timestamp': x_timestamp,
'X-Backend-Storage-Policy-Index': policy_index,
}
success = False
try:
self.swift.delete_object(account, container, obj,
acceptable_statuses=(2, 404),
headers=headers)
except UnexpectedResponse as err:
self.stats_log('cleanup_failed', '%r (%f) was not cleaned up '
'in storage_policy %s (%s)', path, timestamp,
policy_index, err)
else:
success = True
self.stats_log('cleanup_success', '%r (%f) was successfully '
'removed from policy_index %s', path, timestamp,
policy_index)
return success
def _reconcile_object(self, account, container, obj, q_policy_index, q_ts,
q_op, path, **kwargs):
"""
Perform object reconciliation.
:param account: the account name of the misplaced object
:param container: the container name of the misplaced object
:param obj: the object name
:param q_policy_index: the policy index of the source indicated by the
queue entry.
:param q_ts: the timestamp of the misplaced object
:param q_op: the operation of the misplaced request
:param path: the full path of the misplaced object for logging
:returns: True to indicate the request is fully processed
successfully, otherwise False.
"""
container_policy_index = direct_get_container_policy_index(
self.swift.container_ring, account, container)
if container_policy_index is None:
self.stats_log('unavailable_container', '%r (%f) unable to '
'determine the destination policy_index',
path, q_ts)
return False
if container_policy_index == q_policy_index:
self.stats_log('noop_object', '%r (%f) container policy_index '
'%s matches queue policy index %s', path, q_ts,
container_policy_index, q_policy_index)
return True
# check if object exists in the destination already
self.logger.debug('checking for %r (%f) in destination '
'policy_index %s', path, q_ts,
container_policy_index)
headers = {
'X-Backend-Storage-Policy-Index': container_policy_index}
dest_obj = self.swift.get_object_metadata(account, container, obj,
headers=headers,
acceptable_statuses=(2, 4))
dest_ts = Timestamp(dest_obj.get('x-backend-timestamp', 0))
if dest_ts >= q_ts:
self.stats_log('found_object', '%r (%f) in policy_index %s '
'is newer than queue (%f)', path, dest_ts,
container_policy_index, q_ts)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
# object is misplaced
self.stats_log('misplaced_object', '%r (%f) in policy_index %s '
'should be in policy_index %s', path, q_ts,
q_policy_index, container_policy_index)
# fetch object from the source location
self.logger.debug('fetching %r (%f) from storage policy %s', path,
q_ts, q_policy_index)
headers = {
'X-Backend-Storage-Policy-Index': q_policy_index}
try:
source_obj_status, source_obj_info, source_obj_iter = \
self.swift.get_object(account, container, obj,
headers=headers,
acceptable_statuses=(2, 4))
except UnexpectedResponse as err:
source_obj_status = err.resp.status_int
source_obj_info = {}
source_obj_iter = None
source_ts = Timestamp(source_obj_info.get('x-backend-timestamp', 0))
if source_obj_status == 404 and q_op == 'DELETE':
return self.ensure_tombstone_in_right_location(
q_policy_index, account, container, obj, q_ts, path,
container_policy_index, source_ts)
else:
return self.ensure_object_in_right_location(
q_policy_index, account, container, obj, q_ts, path,
container_policy_index, source_ts, source_obj_status,
source_obj_info, source_obj_iter)
def ensure_object_in_right_location(self, q_policy_index, account,
container, obj, q_ts, path,
container_policy_index, source_ts,
source_obj_status, source_obj_info,
source_obj_iter, **kwargs):
"""
Validate source object will satisfy the misplaced object queue entry
and move to destination.
:param q_policy_index: the policy_index for the source object
:param account: the account name of the misplaced object
:param container: the container name of the misplaced object
:param obj: the name of the misplaced object
:param q_ts: the timestamp of the misplaced object
:param path: the full path of the misplaced object for logging
:param container_policy_index: the policy_index of the destination
:param source_ts: the timestamp of the source object
:param source_obj_status: the HTTP status source object request
:param source_obj_info: the HTTP headers of the source object request
:param source_obj_iter: the body iter of the source object request
"""
if source_obj_status // 100 != 2 or source_ts < q_ts:
if q_ts < time.time() - self.reclaim_age:
# it's old and there are no tombstones or anything; give up
self.stats_log('lost_source', '%r (%s) was not available in '
'policy_index %s and has expired', path,
q_ts.internal, q_policy_index,
level=logging.CRITICAL)
return True
# the source object is unavailable or older than the queue
# entry; a version that will satisfy the queue entry hopefully
# exists somewhere in the cluster, so wait and try again
self.stats_log('unavailable_source', '%r (%s) in '
'policy_index %s responded %s (%s)', path,
q_ts.internal, q_policy_index, source_obj_status,
source_ts.internal, level=logging.WARNING)
return False
# optimistically move any source with a timestamp >= q_ts
ts = max(Timestamp(source_ts), q_ts)
# move the object
put_timestamp = slightly_later_timestamp(ts, offset=2)
self.stats_log('copy_attempt', '%r (%f) in policy_index %s will be '
'moved to policy_index %s (%s)', path, source_ts,
q_policy_index, container_policy_index, put_timestamp)
headers = source_obj_info.copy()
headers['X-Backend-Storage-Policy-Index'] = container_policy_index
headers['X-Timestamp'] = put_timestamp
try:
self.swift.upload_object(
FileLikeIter(source_obj_iter), account, container, obj,
headers=headers)
except UnexpectedResponse as err:
self.stats_log('copy_failed', 'upload %r (%f) from '
'policy_index %s to policy_index %s '
'returned %s', path, source_ts, q_policy_index,
container_policy_index, err, level=logging.WARNING)
return False
except: # noqa
self.stats_log('unhandled_error', 'unable to upload %r (%f) '
'from policy_index %s to policy_index %s ', path,
source_ts, q_policy_index, container_policy_index,
level=logging.ERROR, exc_info=True)
return False
self.stats_log('copy_success', '%r (%f) moved from policy_index %s '
'to policy_index %s (%s)', path, source_ts,
q_policy_index, container_policy_index, put_timestamp)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
def ensure_tombstone_in_right_location(self, q_policy_index, account,
container, obj, q_ts, path,
container_policy_index, source_ts,
**kwargs):
"""
Issue a DELETE request against the destination to match the
misplaced DELETE against the source.
"""
delete_timestamp = slightly_later_timestamp(q_ts, offset=2)
self.stats_log('delete_attempt', '%r (%f) in policy_index %s '
'will be deleted from policy_index %s (%s)', path,
source_ts, q_policy_index, container_policy_index,
delete_timestamp)
headers = {
'X-Backend-Storage-Policy-Index': container_policy_index,
'X-Timestamp': delete_timestamp,
}
try:
self.swift.delete_object(account, container, obj,
headers=headers)
except UnexpectedResponse as err:
self.stats_log('delete_failed', 'delete %r (%f) from '
'policy_index %s (%s) returned %s', path,
source_ts, container_policy_index,
delete_timestamp, err, level=logging.WARNING)
return False
except: # noqa
self.stats_log('unhandled_error', 'unable to delete %r (%f) '
'from policy_index %s (%s)', path, source_ts,
container_policy_index, delete_timestamp,
level=logging.ERROR, exc_info=True)
return False
self.stats_log('delete_success', '%r (%f) deleted from '
'policy_index %s (%s)', path, source_ts,
container_policy_index, delete_timestamp,
level=logging.INFO)
return self.throw_tombstones(account, container, obj, q_ts,
q_policy_index, path)
def reconcile_object(self, info):
"""
Process a possibly misplaced object write request. Determine correct
destination storage policy by checking with primary containers. Check
source and destination, copying or deleting into destination and
cleaning up the source as needed.
This method wraps _reconcile_object for exception handling.
:param info: a queue entry dict
:returns: True to indicate the request is fully processed
successfully, otherwise False.
"""
self.logger.debug('checking placement for %r (%f) '
'in policy_index %s', info['path'],
info['q_ts'], info['q_policy_index'])
success = False
try:
success = self._reconcile_object(**info)
except: # noqa
self.logger.exception('Unhandled Exception trying to '
'reconcile %r (%f) in policy_index %s',
info['path'], info['q_ts'],
info['q_policy_index'])
if success:
metric = 'success'
msg = 'was handled successfully'
else:
metric = 'retry'
msg = 'must be retried'
msg = '%(path)r (%(q_ts)f) in policy_index %(q_policy_index)s ' + msg
self.stats_log(metric, msg, info, level=logging.INFO)
self.log_stats()
return success
def _iter_containers(self):
"""
Generate a list of containers to process.
"""
# hit most recent container first instead of waiting on the updaters
current_container = get_reconciler_container_name(time.time())
yield current_container
container_gen = self.swift.iter_containers(MISPLACED_OBJECTS_ACCOUNT)
self.logger.debug('looking for containers in %s',
MISPLACED_OBJECTS_ACCOUNT)
while True:
one_page = None
try:
one_page = list(itertools.islice(
container_gen, constraints.CONTAINER_LISTING_LIMIT))
except UnexpectedResponse as err:
self.logger.error('Error listing containers in '
'account %s (%s)',
MISPLACED_OBJECTS_ACCOUNT, err)
if not one_page:
# don't generally expect more than one page
break
# reversed order since we expect older containers to be empty
for c in reversed(one_page):
# encoding here is defensive
container = c['name'].encode('utf8')
if container == current_container:
continue # we've already hit this one this pass
yield container
def _iter_objects(self, container):
"""
Generate a list of objects to process.
:param container: the name of the container to process
If the given container is empty and older than reclaim_age this
processor will attempt to reap it.
"""
self.logger.debug('looking for objects in %s', container)
found_obj = False
try:
for raw_obj in self.swift.iter_objects(
MISPLACED_OBJECTS_ACCOUNT, container):
found_obj = True
yield raw_obj
except UnexpectedResponse as err:
self.logger.error('Error listing objects in container %s (%s)',
container, err)
if float(container) < time.time() - self.reclaim_age and \
not found_obj:
# Try to delete old empty containers so the queue doesn't
# grow without bound. It's ok if there's a conflict.
self.swift.delete_container(
MISPLACED_OBJECTS_ACCOUNT, container,
acceptable_statuses=(2, 404, 409, 412))
def reconcile(self):
"""
Main entry point for processing misplaced objects.
Iterate over all queue entries and delegate to reconcile_object.
"""
self.logger.debug('pulling items from the queue')
for container in self._iter_containers():
for raw_obj in self._iter_objects(container):
try:
obj_info = parse_raw_obj(raw_obj)
except Exception:
self.stats_log('invalid_record',
'invalid queue record: %r', raw_obj,
level=logging.ERROR, exc_info=True)
continue
finished = self.reconcile_object(obj_info)
if finished:
self.pop_queue(container, raw_obj['name'],
obj_info['q_ts'],
obj_info['q_record'])
self.log_stats()
self.logger.debug('finished container %s', container)
def run_once(self, *args, **kwargs):
"""
Process every entry in the queue.
"""
try:
self.reconcile()
except: # noqa
self.logger.exception('Unhandled Exception trying to reconcile')
self.log_stats(force=True)
def run_forever(self, *args, **kwargs):
while True:
self.run_once(*args, **kwargs)
self.stats = defaultdict(int)
self.logger.info('sleeping between intervals (%ss)', self.interval)
time.sleep(self.interval)
| apache-2.0 |
Silmathoron/nest-simulator | extras/ConnPlotter/setup.py | 15 | 1286 | # -*- coding: utf-8 -*-
#
# setup.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
from distutils.core import setup
setup(name='ConnPlotter',
version='0.7a',
description=('ConnPlotter is a tool to create ' +
'connectivity pattern tables'),
author='Hans Ekkehard Plesser (Idea: Eilen Nordlie)',
author_email='hans.ekkehard.plesser@umb.no',
url='https://www.nest-simulator.org',
license='GNU Public License v2 or later',
packages=['ConnPlotter', 'ConnPlotter.examples'],
package_dir={'ConnPlotter': ''}
)
| gpl-2.0 |
oihane/server-tools | __unported__/configuration_helper/config.py | 56 | 4901 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: David BEAL, Copyright 2014 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp.osv import orm, fields
class AbstractConfigSettings(orm.AbstractModel):
_name = 'abstract.config.settings'
_description = 'Abstract configuration settings'
# prefix field name to differentiate fields in company with those in config
_prefix = 'setting_'
# this is the class name to import in your module
# (it should be ResCompany or res_company, depends of your code)
_companyObject = None
def _filter_field(self, field_key):
"""Inherit in your module to define for which company field
you don't want have a matching related field"""
return True
def __init__(self, pool, cr):
super(AbstractConfigSettings, self).__init__(pool, cr)
if self._companyObject:
for field_key in self._companyObject._columns:
# allows to exclude some field
if self._filter_field(field_key):
args = ('company_id', field_key)
kwargs = {
'string': self._companyObject._columns[field_key].string,
'help': self._companyObject._columns[field_key].help,
'type': self._companyObject._columns[field_key]._type,
}
if '_obj' in self._companyObject._columns[field_key].__dict__.keys():
kwargs['relation'] = \
self._companyObject._columns[field_key]._obj
if '_domain' in \
self._companyObject._columns[field_key].__dict__.keys():
kwargs['domain'] = \
self._companyObject._columns[field_key]._domain
field_key = re.sub('^' + self._prefix, '', field_key)
self._columns[field_key] = \
fields.related(*args, **kwargs)
_columns = {
'company_id': fields.many2one(
'res.company',
'Company',
required=True),
}
def _default_company(self, cr, uid, context=None):
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return user.company_id.id
_defaults = {
'company_id': _default_company,
}
def field_to_populate_as_related(self, cr, uid, field, company_cols, context=None):
"""Only fields which comes from company with the right prefix
must be defined as related"""
if self._prefix + field in company_cols:
return True
return False
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
" update related fields "
values = {}
values['currency_id'] = False
if not company_id:
return {'value': values}
company_m = self.pool['res.company']
company = company_m.browse(
cr, uid, company_id, context=context)
company_cols = company_m._columns.keys()
for field in self._columns:
if self.field_to_populate_as_related(
cr, uid, field, company_cols, context=context):
cpny_field = self._columns[field].arg[-1]
if self._columns[field]._type == 'many2one':
values[field] = company[cpny_field]['id'] or False
else:
values[field] = company[cpny_field]
return {'value': values}
def create(self, cr, uid, values, context=None):
id = super(AbstractConfigSettings, self).create(
cr, uid, values, context=context)
# Hack: to avoid some nasty bug, related fields are not written
# upon record creation. Hence we write on those fields here.
vals = {}
for fname, field in self._columns.iteritems():
if isinstance(field, fields.related) and fname in values:
vals[fname] = values[fname]
self.write(cr, uid, [id], vals, context)
return id
| agpl-3.0 |
ttakamura/chainer | chainer/cudnn/cudnn.py | 2 | 3385 | """Common routines to use CuDNN."""
import atexit, ctypes, os
import numpy
from chainer import cuda
import libcudnn
enabled = int(os.environ.get('CHAINER_CUDNN', '1')) != 0
available = True
def get_ptr(x):
return ctypes.c_void_p(x.ptr)
class Auto(object):
"""Object to be destoryed automatically."""
def __init__(self, value, destroyer):
self.value = value
self.destroyer = destroyer
def __del__(self):
try:
self.destroyer(self.value)
except:
pass
_handles = {}
_pid = None
def get_default_handle():
"""Get the default handle of CuDNN."""
global _handles, _pid
pid = os.getpid()
if _pid != pid: # not initialized yet
_handles = {}
atexit.register(shutdown)
_pid = pid
device = cuda.Context.get_device()
if device in _handles:
return _handles[device]
handle = libcudnn.cudnnCreate()
_handles[device] = handle
return handle
def shutdown():
global _handles, _pid
pid = os.getpid()
if _pid != pid: # not initialized
return
for handle in _handles.itervalues():
libcudnn.cudnnDestroy(handle)
_handles = {}
_pid = None # mark as uninitialized
_dtypes = {numpy.dtype('float32'): libcudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
numpy.dtype('float64'): libcudnn.cudnnDataType['CUDNN_DATA_DOUBLE']}
def get_tensor_desc(x, h, w, form='CUDNN_TENSOR_NCHW'):
"""Create a tensor descriptor for given settings."""
n = x.shape[0]
c = x.size / (n * h * w)
desc = libcudnn.cudnnCreateTensorDescriptor()
libcudnn.cudnnSetTensor4dDescriptor(
desc, libcudnn.cudnnTensorFormat[form], _dtypes[x.dtype], n, c, h, w)
return Auto(desc, libcudnn.cudnnDestroyTensorDescriptor)
def get_conv_bias_desc(x):
"""Create a bias tensor descriptor."""
desc = libcudnn.cudnnCreateTensorDescriptor()
libcudnn.cudnnSetTensor4dDescriptor(
desc, libcudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'], _dtypes[x.dtype],
1, x.size, 1, 1)
return Auto(desc, libcudnn.cudnnDestroyTensorDescriptor)
_default_conv_mode = libcudnn.cudnnConvolutionMode['CUDNN_CROSS_CORRELATION']
def get_filter4d_desc(x, mode=_default_conv_mode):
"""Create a 2d convolution filter descriptor."""
k, c, h, w = x.shape
desc = libcudnn.cudnnCreateFilterDescriptor()
libcudnn.cudnnSetFilter4dDescriptor(desc, _dtypes[x.dtype], k, c, h, w)
return Auto(desc, libcudnn.cudnnDestroyFilterDescriptor)
def get_conv2d_desc(pad, stride, mode=_default_conv_mode):
"""Create a 2d convolution descriptor."""
desc = libcudnn.cudnnCreateConvolutionDescriptor()
libcudnn.cudnnSetConvolution2dDescriptor(
desc, pad[0], pad[1], stride[0], stride[1], 1, 1, mode)
return Auto(desc, libcudnn.cudnnDestroyConvolutionDescriptor)
_pool_mode = {'MAX': libcudnn.cudnnPoolingMode['CUDNN_POOLING_MAX'],
'AVE': libcudnn.cudnnPoolingMode['CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING']}
def get_pool2d_desc(ksize, stride, pad, mode):
"""Create a 2d pooling descriptor."""
desc = libcudnn.cudnnCreatePoolingDescriptor()
libcudnn.cudnnSetPooling2dDescriptor(
desc, libcudnn.cudnnPoolingMode[mode], ksize[0], ksize[1],
pad[0], pad[1], stride[0], stride[1])
return Auto(desc, libcudnn.cudnnDestroyPoolingDescriptor)
| mit |
philanthropy-u/edx-platform | common/djangoapps/track/backends/tests/test_mongodb.py | 172 | 1092 | from __future__ import absolute_import
from mock import patch
from django.test import TestCase
from track.backends.mongodb import MongoBackend
class TestMongoBackend(TestCase):
def setUp(self):
super(TestMongoBackend, self).setUp()
self.mongo_patcher = patch('track.backends.mongodb.MongoClient')
self.mongo_patcher.start()
self.addCleanup(self.mongo_patcher.stop)
self.backend = MongoBackend()
def test_mongo_backend(self):
events = [{'test': 1}, {'test': 2}]
self.backend.send(events[0])
self.backend.send(events[1])
# Check if we inserted events into the database
calls = self.backend.collection.insert.mock_calls
self.assertEqual(len(calls), 2)
# Unpack the arguments and check if the events were used
# as the first argument to collection.insert
def first_argument(call):
_, args, _ = call
return args[0]
self.assertEqual(events[0], first_argument(calls[0]))
self.assertEqual(events[1], first_argument(calls[1]))
| agpl-3.0 |
chokribr/invenio | invenio/modules/jsonalchemy/errors.py | 17 | 1260 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""JSONAlchemy errors."""
class JSONAlchemyException(Exception):
"""Base exception."""
pass
class FieldParserException(JSONAlchemyException):
"""Raised when some error happens parsing field definitions."""
pass
class ModelParserException(JSONAlchemyException):
"""Raised when some error happens parsing model definitions."""
pass
class ReaderException(JSONAlchemyException):
"""Raised when some error happens reading a blob."""
pass
| gpl-2.0 |
thom-at-redhat/cfme_tests | sprout/appliances/views.py | 1 | 22371 | # -*- coding: utf-8 -*-
import json
from celery import chain
from celery.result import AsyncResult
from dateutil import parser
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.db import transaction
from django.http import HttpResponse, Http404
from django.shortcuts import render, redirect
from django.template.base import add_to_builtins
from appliances.api import json_response
from appliances.models import (
Provider, AppliancePool, Appliance, Group, Template, MismatchVersionMailer, User)
from appliances.tasks import (appliance_power_on, appliance_power_off, appliance_suspend,
anyvm_power_on, anyvm_power_off, anyvm_suspend, anyvm_delete, delete_template_from_provider,
appliance_rename, wait_appliance_ready, mark_appliance_ready, appliance_reboot)
from sprout.log import create_logger
from utils.providers import get_mgmt
add_to_builtins('appliances.templatetags.appliances_extras')
def go_home(request):
return redirect(index)
def go_back_or_home(request):
ref = request.META.get('HTTP_REFERER')
if ref:
return redirect(ref)
else:
return go_home(request)
def index(request):
superusers = User.objects.filter(is_superuser=True).order_by("last_name", "first_name")
return render(request, 'index.html', locals())
def providers(request):
providers = Provider.objects.order_by("id")
complete_usage = Provider.complete_user_usage()
return render(request, 'appliances/providers.html', locals())
def templates(request):
groups = Group.objects.order_by("id")
mismatched_versions = MismatchVersionMailer.objects.order_by("id")
return render(request, 'appliances/templates.html', locals())
def shepherd(request):
if not request.user.is_authenticated():
return go_home(request)
groups = Group.objects.all()
return render(request, 'appliances/shepherd.html', locals())
def versions_for_group(request):
group_id = request.POST.get("stream")
latest_version = None
preconfigured = request.POST.get("preconfigured", "false").lower() == "true"
if group_id == "<None>":
versions = []
group = None
else:
try:
group = Group.objects.get(id=group_id)
except ObjectDoesNotExist:
versions = []
else:
versions = Template.get_versions(
template_group=group, ready=True, usable=True, exists=True,
preconfigured=preconfigured, provider__working=True)
if versions:
latest_version = versions[0]
return render(request, 'appliances/_versions.html', locals())
def date_for_group_and_version(request):
group_id = request.POST.get("stream")
latest_date = None
preconfigured = request.POST.get("preconfigured", "false").lower() == "true"
if group_id == "<None>":
dates = []
else:
try:
group = Group.objects.get(id=group_id)
except ObjectDoesNotExist:
dates = []
else:
version = request.POST.get("version")
filters = {
"template_group": group,
"ready": True,
"exists": True,
"usable": True,
"preconfigured": preconfigured,
"provider__working": True,
}
if version == "latest":
try:
versions = Template.get_versions(**filters)
filters["version"] = versions[0]
except IndexError:
pass # No such thing as version for this template group
else:
filters["version"] = version
dates = Template.get_dates(**filters)
if dates:
latest_date = dates[0]
return render(request, 'appliances/_dates.html', locals())
def providers_for_date_group_and_version(request):
total_provisioning_slots = 0
total_appliance_slots = 0
total_shepherd_slots = 0
shepherd_appliances = {}
group_id = request.POST.get("stream")
preconfigured = request.POST.get("preconfigured", "false").lower() == "true"
if group_id == "<None>":
providers = []
else:
try:
group = Group.objects.get(id=group_id)
except ObjectDoesNotExist:
providers = []
else:
version = request.POST.get("version")
filters = {
"template_group": group,
"ready": True,
"exists": True,
"usable": True,
"preconfigured": preconfigured,
"provider__working": True,
}
if version == "latest":
try:
versions = Template.get_versions(**filters)
filters["version"] = versions[0]
except IndexError:
pass # No such thing as version for this template group
else:
filters["version"] = version
date = request.POST.get("date")
if date == "latest":
try:
dates = Template.get_dates(**filters)
filters["date"] = dates[0]
except IndexError:
pass # No such thing as date for this template group
else:
filters["date"] = parser.parse(date)
providers = Template.objects.filter(**filters).values("provider").distinct()
providers = sorted([p.values()[0] for p in providers])
providers = [Provider.objects.get(id=provider) for provider in providers]
for provider in providers:
appl_filter = dict(
appliance_pool=None, ready=True, template__provider=provider,
template__preconfigured=filters["preconfigured"],
template__template_group=filters["template_group"])
if "date" in filters:
appl_filter["template__date"] = filters["date"]
if "version" in filters:
appl_filter["template__version"] = filters["version"]
shepherd_appliances[provider.id] = len(Appliance.objects.filter(**appl_filter))
total_shepherd_slots += shepherd_appliances[provider.id]
total_appliance_slots += provider.remaining_appliance_slots
total_provisioning_slots += provider.remaining_provisioning_slots
render_providers = {}
for provider in providers:
render_providers[provider.id] = {
"shepherd_count": shepherd_appliances[provider.id], "object": provider}
return render(request, 'appliances/_providers.html', locals())
def my_appliances(request, show_user="my"):
if not request.user.is_authenticated():
return go_home(request)
if not request.user.is_superuser:
if not (show_user == "my" or show_user == request.user.username):
messages.info(request, "You can't view others' appliances!")
show_user = "my"
if show_user == request.user.username:
show_user = "my"
else:
other_users = User.objects.exclude(pk=request.user.pk).order_by("last_name", "first_name")
if show_user == "my":
pools = AppliancePool.objects.filter(owner=request.user).order_by("id")
elif show_user == "all":
pools = AppliancePool.objects.order_by("id")
else:
pools = AppliancePool.objects.filter(owner__username=show_user).order_by("id")
groups = Group.objects.order_by("id")
can_order_pool = show_user == "my"
new_pool_possible = True
display_legend = False
for pool in pools:
if not pool.finished:
display_legend = True
per_pool_quota = None
pools_remaining = None
num_user_vms = Appliance.objects.filter(appliance_pool__owner=request.user).count()
if request.user.has_quotas:
if request.user.quotas.total_pool_quota is not None:
if request.user.quotas.total_pool_quota <= len(pools):
new_pool_possible = False
pools_remaining = request.user.quotas.total_pool_quota - len(pools)
if request.user.quotas.total_vm_quota is not None:
if request.user.quotas.total_vm_quota <= num_user_vms:
new_pool_possible = False
if request.user.quotas.per_pool_quota is not None:
per_pool_quota = request.user.quotas.per_pool_quota
remaining_vms = request.user.quotas.total_vm_quota - num_user_vms
if remaining_vms < per_pool_quota:
per_pool_quota = remaining_vms
per_pool_quota_enabled = per_pool_quota is not None
return render(request, 'appliances/my_appliances.html', locals())
def can_operate_appliance_or_pool(appliance_or_pool, user):
if user.is_superuser:
return True
else:
return appliance_or_pool.owner == user
def appliance_action(request, appliance_id, action, x=None):
if not request.user.is_authenticated():
return go_home(request)
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
messages.error(request, 'Appliance with ID {} does not exist!.'.format(appliance_id))
return go_back_or_home(request)
if not can_operate_appliance_or_pool(appliance, request.user):
messages.error(request, 'This appliance belongs either to some other user or nobody.')
return go_back_or_home(request)
if action == "start":
if appliance.power_state != Appliance.Power.ON:
chain(
appliance_power_on.si(appliance.id),
(wait_appliance_ready if appliance.preconfigured else mark_appliance_ready).si(
appliance.id))()
messages.success(request, 'Initiated launch of appliance.')
return go_back_or_home(request)
else:
messages.info(request, 'Appliance was already powered on.')
return go_back_or_home(request)
elif action == "reboot":
if appliance.power_state == Appliance.Power.ON:
chain(
appliance_reboot.si(appliance.id),
(wait_appliance_ready if appliance.preconfigured else mark_appliance_ready).si(
appliance.id))()
messages.success(request, 'Initiated reboot of appliance.')
return go_back_or_home(request)
else:
messages.error(request, 'Only powered on appliances can be rebooted')
return go_back_or_home(request)
elif action == "stop":
if appliance.power_state != Appliance.Power.OFF:
appliance_power_off.delay(appliance.id)
messages.success(request, 'Initiated stop of appliance.')
return go_back_or_home(request)
else:
messages.info(request, 'Appliance was already powered off.')
return go_back_or_home(request)
elif action == "suspend":
if appliance.power_state != Appliance.Power.SUSPENDED:
appliance_suspend.delay(appliance.id)
messages.success(request, 'Initiated suspend of appliance.')
return go_back_or_home(request)
else:
messages.info(request, 'Appliance was already suspended.')
return go_back_or_home(request)
elif action == "kill":
Appliance.kill(appliance)
messages.success(request, 'Kill initiated.')
return go_back_or_home(request)
elif action == "dont_expire":
if not request.user.is_superuser:
messages.error(request, 'Disabling expiration time is allowed only for superusers.')
return go_back_or_home(request)
with transaction.atomic():
appliance.leased_until = None
appliance.save()
messages.success(request, 'Lease disabled successfully. Be careful.')
return go_back_or_home(request)
elif action == "set_lease":
if not can_operate_appliance_or_pool(appliance, request.user):
messages.error(request, 'This appliance belongs either to some other user or nobody.')
return go_back_or_home(request)
appliance.prolong_lease(time=int(x))
messages.success(request, 'Lease prolonged successfully.')
return go_back_or_home(request)
else:
messages.error(request, "Unknown action '{}'".format(action))
def prolong_lease_pool(request, pool_id, minutes):
if not request.user.is_authenticated():
return go_home(request)
try:
appliance_pool = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
messages.error(request, 'Appliance pool with ID {} does not exist!.'.format(pool_id))
return go_back_or_home(request)
if not can_operate_appliance_or_pool(appliance_pool, request.user):
messages.error(request, 'This appliance belongs either to some other user or nobody.')
return go_back_or_home(request)
appliance_pool.prolong_lease(time=int(minutes))
messages.success(request, 'Lease prolonged successfully.')
return go_back_or_home(request)
def dont_expire_pool(request, pool_id):
if not request.user.is_authenticated():
return go_home(request)
try:
appliance_pool = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
messages.error(request, 'Pool with ID {} does not exist!.'.format(pool_id))
return go_back_or_home(request)
if not request.user.is_superuser:
messages.error(request, 'Disabling expiration time is allowed only for superusers.')
return go_back_or_home(request)
with transaction.atomic():
for appliance in appliance_pool.appliances:
appliance.leased_until = None
appliance.save()
messages.success(request, 'Lease disabled successfully. Be careful.')
return go_back_or_home(request)
def kill_pool(request, pool_id):
if not request.user.is_authenticated():
return go_home(request)
try:
pool = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
messages.error(request, 'Pool with ID {} does not exist!.'.format(pool_id))
return go_back_or_home(request)
if not can_operate_appliance_or_pool(pool, request.user):
messages.error(request, 'This pool belongs either to some other user or nobody.')
return go_back_or_home(request)
try:
pool.kill()
except Exception as e:
messages.error(request, "Exception {}: {}".format(type(e).__name__, str(e)))
else:
messages.success(request, 'Kill successfully initiated.')
return go_back_or_home(request)
def set_pool_description(request):
if not request.user.is_authenticated():
raise PermissionDenied()
try:
pool_id = request.POST.get("pool_id", None)
pool = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
raise Http404('Pool with ID {} does not exist!.'.format(pool_id))
if not can_operate_appliance_or_pool(pool, request.user):
raise PermissionDenied()
description = request.POST.get("description", None)
pool.description = description
pool.save()
return HttpResponse("")
def delete_template_provider(request, template_id):
if not request.user.is_authenticated():
return go_home(request)
try:
template = Template.objects.get(id=template_id)
except ObjectDoesNotExist:
messages.error(request, 'Template with ID {} does not exist!.'.format(template_id))
return go_back_or_home(request)
if not request.user.is_superuser:
messages.error(request, 'Templates can be deleted only by superusers.')
return go_back_or_home(request)
if not template.can_be_deleted:
messages.error(request, 'This template cannot be deleted from the provider.')
return go_back_or_home(request)
delete_template_from_provider.delay(template.id)
messages.success(request, 'Delete initiated.')
return go_back_or_home(request)
def request_pool(request):
if not request.user.is_authenticated():
return go_home(request)
try:
group = request.POST["stream"]
version = request.POST["version"]
if version == "latest":
version = None
date = request.POST["date"]
if date == "latest":
date = None
provider = request.POST["provider"]
if provider == "any":
provider = None
preconfigured = request.POST.get("preconfigured", "false").lower() == "true"
yum_update = request.POST.get("yum_update", "false").lower() == "true"
count = int(request.POST["count"])
lease_time = int(request.POST.get("expiration", 60))
pool_id = AppliancePool.create(
request.user, group, version, date, provider, count, lease_time, preconfigured,
yum_update).id
messages.success(request, "Pool requested - id {}".format(pool_id))
except Exception as e:
messages.error(request, "Exception {} happened: {}".format(type(e).__name__, str(e)))
return go_back_or_home(request)
def transfer_pool(request):
if not request.user.is_authenticated():
return go_home(request)
try:
pool_id = int(request.POST["pool_id"])
user_id = int(request.POST["user_id"])
with transaction.atomic():
pool = AppliancePool.objects.get(id=pool_id)
if not request.user.is_superuser:
if pool.owner != request.user:
raise Exception("User does not have the right to change this pool's owner!")
user = User.objects.get(id=user_id)
if user == request.user:
raise Exception("Why changing owner back to yourself? That does not make sense!")
# original_owner = pool.owner
pool.owner = user
pool.save()
# Rename appliances
# for appliance in pool.appliances:
# if appliance.name.startswith("{}_".format(original_owner.username)):
# # Change name
# appliance_rename.delay(
# appliance.id, user.username + appliance.name[len(original_owner.username):])
except Exception as e:
messages.error(request, "Exception {} happened: {}".format(type(e).__name__, str(e)))
else:
messages.success(request, "Success!")
finally:
return go_back_or_home(request)
def vms(request, current_provider=None):
if not request.user.is_authenticated():
return go_home(request)
provider_keys = sorted(Provider.get_available_provider_keys())
providers = []
for provider_key in provider_keys:
try:
provider = Provider.objects.get(id=provider_key)
except ObjectDoesNotExist:
providers.append((provider_key, True))
else:
providers.append((provider_key, provider.working))
if current_provider is None and providers:
return redirect("vms_at_provider", current_provider=provider_keys[0])
return render(request, 'appliances/vms/index.html', locals())
def vms_table(request, current_provider=None):
if not request.user.is_authenticated():
return go_home(request)
manager = get_mgmt(current_provider)
vms = sorted(manager.list_vm())
return render(request, 'appliances/vms/_list.html', locals())
def power_state(request, current_provider):
vm_name = request.POST["vm_name"]
manager = get_mgmt(current_provider)
state = Appliance.POWER_STATES_MAPPING.get(manager.vm_status(vm_name), "unknown")
return HttpResponse(state, content_type="text/plain")
def power_state_buttons(request, current_provider):
manager = get_mgmt(current_provider)
vm_name = request.POST["vm_name"]
power_state = request.POST["power_state"]
can_power_on = power_state in {Appliance.Power.SUSPENDED, Appliance.Power.OFF}
can_power_off = power_state in {Appliance.Power.ON}
can_suspend = power_state in {Appliance.Power.ON} and manager.can_suspend
can_delete = power_state in {Appliance.Power.OFF}
return render(request, 'appliances/vms/_buttons.html', locals())
def vm_action(request, current_provider):
if not request.user.is_authenticated():
return HttpResponse("Not authenticated", content_type="text/plain")
try:
get_mgmt(current_provider)
except Exception as e:
return HttpResponse(
"Troubles with provider {}: {}".format(current_provider, str(e)),
content_type="text/plain")
vm_name = request.POST["vm_name"]
action = request.POST["action"]
if action == "poweron":
anyvm_power_on.delay(current_provider, vm_name)
elif action == "poweroff":
anyvm_power_off.delay(current_provider, vm_name)
elif action == "suspend":
anyvm_suspend.delay(current_provider, vm_name)
elif action == "delete":
anyvm_delete.delay(current_provider, vm_name)
else:
HttpResponse("No such action {}!".format(action), content_type="text/plain")
logger().info("User {} initiated {} on {}@{}".format(
request.user.username, action, vm_name, current_provider))
return HttpResponse("Action {} was initiated".format(action), content_type="text/plain")
def logger():
return create_logger("sprout_vm_actions")
def rename_appliance(request):
post = json.loads(request.body)
if not request.user.is_authenticated():
raise PermissionDenied()
try:
appliance_id = post.get("appliance_id")
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
raise Http404('Appliance with ID {} does not exist!.'.format(appliance_id))
if not can_operate_appliance_or_pool(appliance, request.user):
raise PermissionDenied("Permission denied")
new_name = post.get("new_name")
return HttpResponse(str(appliance_rename.delay(appliance.id, new_name).task_id))
def task_result(request):
post = json.loads(request.body)
task_id = post.get("task_id")
result = AsyncResult(task_id)
if not result.ready():
return json_response(None)
return json_response(result.get(timeout=1))
| gpl-2.0 |
paulsmith/geodjango | django/contrib/sessions/models.py | 11 | 2618 | import base64
import md5
import cPickle as pickle
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary pickled and encoded as a string.
"""
pickled = pickle.dumps(session_dict)
pickled_md5 = md5.new(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django website).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'))
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
encoded_data = base64.decodestring(self.session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if md5.new(pickled + settings.SECRET_KEY).hexdigest() != tamper_check:
from django.core.exceptions import SuspiciousOperation
raise SuspiciousOperation, "User tampered with session cookie."
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
| bsd-3-clause |
beni55/networkx | networkx/algorithms/flow/capacityscaling.py | 9 | 14582 | # -*- coding: utf-8 -*-
"""
Capacity scaling minimum cost flow algorithm.
"""
__author__ = """ysitu <ysitu@users.noreply.github.com>"""
# Copyright (C) 2014 ysitu <ysitu@users.noreply.github.com>
# All rights reserved.
# BSD license.
__all__ = ['capacity_scaling']
from itertools import chain
from math import log
import networkx as nx
from ...utils import BinaryHeap
from ...utils import generate_unique_node
from ...utils import not_implemented_for
from ...utils import arbitrary_element
def _detect_unboundedness(R):
"""Detect infinite-capacity negative cycles.
"""
s = generate_unique_node()
G = nx.DiGraph()
G.add_nodes_from(R)
# Value simulating infinity.
inf = R.graph['inf']
# True infinity.
f_inf = float('inf')
for u in R:
for v, e in R[u].items():
# Compute the minimum weight of infinite-capacity (u, v) edges.
w = f_inf
for k, e in e.items():
if e['capacity'] == inf:
w = min(w, e['weight'])
if w != f_inf:
G.add_edge(u, v, weight=w)
if nx.negative_edge_cycle(G):
raise nx.NetworkXUnbounded(
'Negative cost cycle of infinite capacity found. '
'Min cost flow may be unbounded below.')
@not_implemented_for('undirected')
def _build_residual_network(G, demand, capacity, weight):
"""Build a residual network and initialize a zero flow.
"""
if sum(G.node[u].get(demand, 0) for u in G) != 0:
raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
R = nx.MultiDiGraph()
R.add_nodes_from((u, {'excess': -G.node[u].get(demand, 0),
'potential': 0}) for u in G)
inf = float('inf')
# Detect selfloops with infinite capacities and negative weights.
for u, v, e in G.selfloop_edges(data=True):
if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf:
raise nx.NetworkXUnbounded(
'Negative cost cycle of infinite capacity found. '
'Min cost flow may be unbounded below.')
# Extract edges with positive capacities. Self loops excluded.
if G.is_multigraph():
edge_list = [(u, v, k, e)
for u, v, k, e in G.edges(data=True, keys=True)
if u != v and e.get(capacity, inf) > 0]
else:
edge_list = [(u, v, 0, e) for u, v, e in G.edges(data=True)
if u != v and e.get(capacity, inf) > 0]
# Simulate infinity with the larger of the sum of absolute node imbalances
# the sum of finite edge capacities or any positive value if both sums are
# zero. This allows the infinite-capacity edges to be distinguished for
# unboundedness detection and directly participate in residual capacity
# calculation.
inf = max(sum(abs(R.node[u]['excess']) for u in R),
2 * sum(e[capacity] for u, v, k, e in edge_list
if capacity in e and e[capacity] != inf)) or 1
for u, v, k, e in edge_list:
r = min(e.get(capacity, inf), inf)
w = e.get(weight, 0)
# Add both (u, v) and (v, u) into the residual network marked with the
# original key. (key[1] == True) indicates the (u, v) is in the
# original network.
R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0)
R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0)
# Record the value simulating infinity.
R.graph['inf'] = inf
_detect_unboundedness(R)
return R
def _build_flow_dict(G, R, capacity, weight):
"""Build a flow dictionary from a residual network.
"""
inf = float('inf')
flow_dict = {}
if G.is_multigraph():
for u in G:
flow_dict[u] = {}
for v, es in G[u].items():
flow_dict[u][v] = dict(
# Always saturate negative selfloops.
(k, (0 if (u != v or e.get(capacity, inf) <= 0 or
e.get(weight, 0) >= 0) else e[capacity]))
for k, e in es.items())
for v, es in R[u].items():
if v in flow_dict[u]:
flow_dict[u][v].update((k[0], e['flow'])
for k, e in es.items()
if e['flow'] > 0)
else:
for u in G:
flow_dict[u] = dict(
# Always saturate negative selfloops.
(v, (0 if (u != v or e.get(capacity, inf) <= 0 or
e.get(weight, 0) >= 0) else e[capacity]))
for v, e in G[u].items())
flow_dict[u].update((v, e['flow']) for v, es in R[u].items()
for e in es.values() if e['flow'] > 0)
return flow_dict
def capacity_scaling(G, demand='demand', capacity='capacity', weight='weight',
heap=BinaryHeap):
"""Find a minimum cost flow satisfying all demands in digraph G.
This is a capacity scaling successive shortest augmenting path algorithm.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph or MultiDiGraph on which a minimum cost flow satisfying all
demands is to be found.
demand : string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight : string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
heap : class
Type of heap to be used in the algorithm. It should be a subclass of
:class:`MinHeap` or implement a compatible interface.
If a stock heap implementation is to be used, :class:`BinaryHeap` is
recommeded over :class:`PairingHeap` for Python implementations without
optimized attribute accesses (e.g., CPython) despite a slower
asymptotic running time. For Python implementations with optimized
attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better
performance. Default value: :class:`BinaryHeap`.
Returns
-------
flowCost: integer
Cost of a minimum cost flow satisfying all demands.
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v) if G is a digraph.
Dictionary of dictionaries of dictionaries keyed by nodes such that
flowDict[u][v][key] is the flow edge (u, v, key) if G is a
multidigraph.
Raises
------
NetworkXError
This exception is raised if the input graph is not directed,
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
Notes
-----
This algorithm does not work if edge weights are floating-point numbers.
See also
--------
:meth:`network_simplex`
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost, flowDict = nx.capacity_scaling(G)
>>> flowCost
24
>>> flowDict # doctest: +SKIP
{'a': {'c': 1, 'b': 4}, 'c': {'d': 1}, 'b': {'d': 4}, 'd': {}}
It is possible to change the name of the attributes used for the
algorithm.
>>> G = nx.DiGraph()
>>> G.add_node('p', spam = -4)
>>> G.add_node('q', spam = 2)
>>> G.add_node('a', spam = -2)
>>> G.add_node('d', spam = -1)
>>> G.add_node('t', spam = 2)
>>> G.add_node('w', spam = 3)
>>> G.add_edge('p', 'q', cost = 7, vacancies = 5)
>>> G.add_edge('p', 'a', cost = 1, vacancies = 4)
>>> G.add_edge('q', 'd', cost = 2, vacancies = 3)
>>> G.add_edge('t', 'q', cost = 1, vacancies = 2)
>>> G.add_edge('a', 't', cost = 2, vacancies = 4)
>>> G.add_edge('d', 'w', cost = 3, vacancies = 4)
>>> G.add_edge('t', 'w', cost = 4, vacancies = 1)
>>> flowCost, flowDict = nx.capacity_scaling(G, demand = 'spam',
... capacity = 'vacancies',
... weight = 'cost')
>>> flowCost
37
>>> flowDict # doctest: +SKIP
{'a': {'t': 4}, 'd': {'w': 2}, 'q': {'d': 1}, 'p': {'q': 2, 'a': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
"""
R = _build_residual_network(G, demand, capacity, weight)
inf = float('inf')
# Account cost of negative selfloops.
flow_cost = sum(
0 if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
else e[capacity] * e[weight]
for u, v, e in G.selfloop_edges(data=True))
# Determine the maxmimum edge capacity.
wmax = max(chain([-inf],
(e['capacity'] for u, v, e in R.edges(data=True))))
if wmax == -inf:
# Residual network has no edges.
return flow_cost, _build_flow_dict(G, R, capacity, weight)
R_node = R.node
R_succ = R.succ
delta = 2 ** int(log(wmax, 2))
while delta >= 1:
# Saturate Δ-residual edges with negative reduced costs to achieve
# Δ-optimality.
for u in R:
p_u = R_node[u]['potential']
for v, es in R_succ[u].items():
for k, e in es.items():
flow = e['capacity'] - e['flow']
if e['weight'] - p_u + R_node[v]['potential'] < 0:
flow = e['capacity'] - e['flow']
if flow >= delta:
e['flow'] += flow
R_succ[v][u][(k[0], not k[1])]['flow'] -= flow
R_node[u]['excess'] -= flow
R_node[v]['excess'] += flow
# Determine the Δ-active nodes.
S = set()
T = set()
S_add = S.add
S_remove = S.remove
T_add = T.add
T_remove = T.remove
for u in R:
excess = R_node[u]['excess']
if excess >= delta:
S_add(u)
elif excess <= -delta:
T_add(u)
# Repeatedly augment flow from S to T along shortest paths until
# Δ-feasibility is achieved.
while S and T:
s = arbitrary_element(S)
t = None
# Search for a shortest path in terms of reduce costs from s to
# any t in T in the Δ-residual network.
d = {}
pred = {s: None}
h = heap()
h_insert = h.insert
h_get = h.get
h_insert(s, 0)
while h:
u, d_u = h.pop()
d[u] = d_u
if u in T:
# Path found.
t = u
break
p_u = R_node[u]['potential']
for v, es in R_succ[u].items():
if v in d:
continue
wmin = inf
# Find the minimum-weighted (u, v) Δ-residual edge.
for k, e in es.items():
if e['capacity'] - e['flow'] >= delta:
w = e['weight']
if w < wmin:
wmin = w
kmin = k
emin = e
if wmin == inf:
continue
# Update the distance label of v.
d_v = d_u + wmin - p_u + R_node[v]['potential']
if h_insert(v, d_v):
pred[v] = (u, kmin, emin)
if t is not None:
# Augment Δ units of flow from s to t.
while u != s:
v = u
u, k, e = pred[v]
e['flow'] += delta
R_succ[v][u][(k[0], not k[1])]['flow'] -= delta
# Account node excess and deficit.
R_node[s]['excess'] -= delta
R_node[t]['excess'] += delta
if R_node[s]['excess'] < delta:
S_remove(s)
if R_node[t]['excess'] > -delta:
T_remove(t)
# Update node potentials.
d_t = d[t]
for u, d_u in d.items():
R_node[u]['potential'] -= d_u - d_t
else:
# Path not found.
S_remove(s)
delta //= 2
if any(R.node[u]['excess'] != 0 for u in R):
raise nx.NetworkXUnfeasible('No flow satisfying all demands.')
# Calculate the flow cost.
for u in R:
for v, es in R_succ[u].items():
for e in es.values():
flow = e['flow']
if flow > 0:
flow_cost += flow * e['weight']
return flow_cost, _build_flow_dict(G, R, capacity, weight)
| bsd-3-clause |
lowks/simoorg | src/simoorg/plugins/healthcheck/HealthCheck.py | 8 | 1122 | #
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
"""
The Health check interface
"""
class HealthCheck(object):
"""
All healthcheck plugins should inherit this class
"""
def __init__(self, script, plugin_config=None):
"""
Init function of the class
Args:
script - The health check script
Return:
None
Raise:
None
"""
self.script = script
# return true or false
def check(self):
"""
Checks the health, usually involves executing the script and
returing the status
"""
pass
| apache-2.0 |
appleseedhq/blenderseed | operators/texture_ops.py | 2 | 5174 | #
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import bpy
from ..properties.nodes import AppleseedOSLNode
from ..utils import util
class ASTEX_OT_convert_textures(bpy.types.Operator):
"""
Converts base textures into mipmapped .tx textures for rendering
"""
bl_label = "Convert Textures"
bl_description = "Convert textures"
bl_idname = "appleseed.convert_textures"
def execute(self, context):
scene = context.scene
textures = scene.appleseed
for tex in textures.textures:
filename = bpy.path.abspath(tex.name.filepath)
if textures.tex_output_use_cust_dir:
tex_name = os.path.basename(filename).split('.')[0]
out_path = os.path.join(textures.tex_output_dir, f"{tex_name}.tx")
else:
out_path = f"{filename.split('.')[0]}.tx"
import appleseed as asr
asr.oiio_make_texture(filename, out_path, tex.input_space, tex.output_depth)
subbed_filename = f"{os.path.splitext(filename)[0]}.tx"
bpy.ops.image.open(filepath=subbed_filename)
return {'FINISHED'}
class ASTEX_OT_refresh_texture(bpy.types.Operator):
"""
Operator for refreshing texture list to convert.
"""
bl_label = "Refresh Texture"
bl_description = "Refresh textures for conversion"
bl_idname = "appleseed.refresh_textures"
def execute(self, context):
scene = context.scene
collection = scene.appleseed.textures
existing_textures = [x.name for x in collection]
scene_textures = list()
for mat in bpy.data.materials:
if mat.node_tree is not None:
for node in mat.node_tree.nodes:
if isinstance(node, AppleseedOSLNode):
for param in node.filepaths:
texture_block = getattr(node, param)
if texture_block not in scene_textures:
scene_textures.append(texture_block)
if texture_block not in existing_textures:
collection.add()
num = len(collection)
collection[num - 1].name = texture_block
texture_index = len(collection) - 1
while texture_index > -1:
texture = collection[texture_index]
if texture.name not in scene_textures:
collection.remove(texture_index)
texture_index -= 1
return {'FINISHED'}
class ASTES_OT_add_texture(bpy.types.Operator):
"""
Operator for adding a texture to convert.
"""
bl_label = "Add Texture"
bl_description = "Add new texture"
bl_idname = "appleseed.add_texture"
def execute(self, context):
scene = context.scene
collection = scene.appleseed.textures
collection.add()
return {'FINISHED'}
class ASTEX_OT_remove_texture(bpy.types.Operator):
"""
Operator for removing a texture to convert.
"""
bl_label = "Remove Texture"
bl_description = "Remove texture"
bl_idname = "appleseed.remove_texture"
def execute(self, context):
scene = context.scene
collection = scene.appleseed.textures
index = scene.appleseed.textures_index
collection.remove(index)
num = len(collection)
if index >= num:
index = num - 1
if index < 0:
index = 0
scene.appleseed.textures_index = index
return {'FINISHED'}
classes = (
ASTEX_OT_convert_textures,
ASTEX_OT_refresh_texture,
ASTES_OT_add_texture,
ASTEX_OT_remove_texture
)
def register():
for cls in classes:
util.safe_register_class(cls)
def unregister():
for cls in reversed(classes):
util.safe_unregister_class(cls)
| mit |
thomasalrin/Ghost | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexer.py | 265 | 26921 | # -*- coding: utf-8 -*-
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re, itertools
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this']
_encoding_map = [('\xef\xbb\xbf', 'utf-8'),
('\xff\xfe\0\0', 'utf-32'),
('\0\0\xfe\xff', 'utf-32be'),
('\xff\xfe', 'utf-16'),
('\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(cls, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(cls, name, bases, d)
class Lexer(object):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
*New in Pygments 1.3.*
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'latin1'``).
Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or
``'chardet'`` to use the chardet library, if it is installed.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
__metaclass__ = LexerMeta
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'latin1')
# self.encoding = options.get('inencoding', None) or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, unicode):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = unicode(text[len(bom):], encoding,
errors='replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = unicode(text, enc.get('encoding') or 'utf-8',
errors='replace')
text = decoded
else:
text = text.decode(self.encoding)
else:
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (tokentype, value) pairs.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
#-------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str):
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit(object):
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit()
class combined(tuple):
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch(object):
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags):
"""Preprocess the regular expression component of a token definition."""
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# processed already
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags)
except Exception, err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in tokendefs.keys():
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in itertools.chain((cls,), cls.__mro__):
toks = c.__dict__.get('tokens', {})
for state, items in toks.iteritems():
curitems = tokens.get(state)
if curitems is None:
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
__metaclass__ = RegexLexerMeta
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext(object):
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(statestack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = insertions.next()
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
| mit |
sudosurootdev/external_chromium_org | native_client_sdk/src/build_tools/tests/easy_template_test.py | 53 | 3559 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import difflib
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import easy_template
class EasyTemplateTestCase(unittest.TestCase):
def _RunTest(self, template, expected, template_dict):
src = cStringIO.StringIO(template)
dst = cStringIO.StringIO()
easy_template.RunTemplate(src, dst, template_dict)
if dst.getvalue() != expected:
expected_lines = expected.splitlines(1)
actual_lines = dst.getvalue().splitlines(1)
diff = ''.join(difflib.unified_diff(
expected_lines, actual_lines,
fromfile='expected', tofile='actual'))
self.fail('Unexpected output:\n' + diff)
def testEmpty(self):
self._RunTest('', '', {})
def testNewlines(self):
self._RunTest('\n\n', '\n\n', {})
def testNoInterpolation(self):
template = """I love paris in the
the springtime [don't you?]
{this is not interpolation}.
"""
self._RunTest(template, template, {})
def testSimpleInterpolation(self):
self._RunTest(
'{{foo}} is my favorite number',
'42 is my favorite number',
{'foo': 42})
def testLineContinuations(self):
template = "Line 1 \\\nLine 2\n"""
self._RunTest(template, template, {})
def testIfStatement(self):
template = r"""
[[if foo:]]
foo
[[else:]]
not foo
[[]]"""
self._RunTest(template, "\n foo\n", {'foo': True})
self._RunTest(template, "\n not foo\n", {'foo': False})
def testForStatement(self):
template = r"""[[for beers in [99, 98, 1]:]]
{{beers}} bottle{{(beers != 1) and 's' or ''}} of beer on the wall...
[[]]"""
expected = r"""99 bottles of beer on the wall...
98 bottles of beer on the wall...
1 bottle of beer on the wall...
"""
self._RunTest(template, expected, {})
def testListVariables(self):
template = r"""
[[for i, item in enumerate(my_list):]]
{{i+1}}: {{item}}
[[]]
"""
self._RunTest(template, "\n1: Banana\n2: Grapes\n3: Kumquat\n",
{'my_list': ['Banana', 'Grapes', 'Kumquat']})
def testListInterpolation(self):
template = "{{', '.join(growing[0:-1]) + ' and ' + growing[-1]}} grow..."
self._RunTest(template, "Oats, peas, beans and barley grow...",
{'growing': ['Oats', 'peas', 'beans', 'barley']})
self._RunTest(template, "Love and laughter grow...",
{'growing': ['Love', 'laughter']})
def testComplex(self):
template = r"""
struct {{name}} {
[[for field in fields:]]
[[ if field['type'] == 'array':]]
{{field['basetype']}} {{field['name']}}[{{field['size']}}];
[[ else:]]
{{field['type']}} {{field['name']}};
[[ ]]
[[]]
};"""
expected = r"""
struct Foo {
std::string name;
int problems[99];
};"""
self._RunTest(template, expected, {
'name': 'Foo',
'fields': [
{'name': 'name', 'type': 'std::string'},
{'name': 'problems', 'type': 'array', 'basetype': 'int', 'size': 99}]})
def testModulo(self):
self._RunTest('No expression %', 'No expression %', {})
self._RunTest('% before {{3 + 4}}', '% before 7', {})
self._RunTest('{{2**8}} % after', '256 % after', {})
self._RunTest('inside {{8 % 3}}', 'inside 2', {})
self._RunTest('Everywhere % {{8 % 3}} %', 'Everywhere % 2 %', {})
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
radiasoft/sirepo | sirepo/template/opal.py | 2 | 34879 | # -*- coding: utf-8 -*-
u"""OPAL execution template.
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcompat
from pykern import pkio
from pykern import pkjinja
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdc, pkdlog
from sirepo import simulation_db
from sirepo.template import code_variable
from sirepo.template import lattice
from sirepo.template import template_common
from sirepo.template.lattice import LatticeUtil
from sirepo.template.madx_converter import MadxConverter
import h5py
import math
import numpy as np
import re
import sirepo.lib
import sirepo.sim_data
_SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals()
OPAL_INPUT_FILE = 'opal.in'
OPAL_OUTPUT_FILE = 'opal.out'
OPAL_POSITION_FILE = 'opal-vtk.py'
_DIM_INDEX = PKDict(
x=0,
y=1,
z=2,
)
_OPAL_H5_FILE = 'opal.h5'
_OPAL_SDDS_FILE = 'opal.stat'
_OPAL_VTK_FILE = 'opal_ElementPositions.vtk'
_ELEMENTS_WITH_TYPE_FIELD = ('CYCLOTRON', 'MONITOR','RFCAVITY')
_HEADER_COMMANDS = ('option', 'filter', 'geometry', 'particlematterinteraction', 'wake')
class LibAdapter(sirepo.lib.LibAdapterBase):
def parse_file(self, path):
from sirepo.template import opal_parser
data, input_files = opal_parser.parse_file(
pkio.read_text(path),
filename=path.basename,
)
self._verify_files(path, [f.filename for f in input_files])
return self._convert(data)
def write_files(self, data, source_path, dest_dir):
"""writes files for the simulation
Returns:
PKDict: structure of files written (debugging only)
"""
class _G(_Generate):
def _input_file(self, model_name, field, filename):
return f'"{filename}"'
g = _G(data)
r = PKDict(commands=dest_dir.join(source_path.basename))
pkio.write_text(r.commands, g.sim())
self._write_input_files(data, source_path, dest_dir)
r.output_files = LatticeUtil(data, _SCHEMA).iterate_models(
OpalOutputFileIterator(),
).result.keys_in_order
return r
class OpalElementIterator(lattice.ElementIterator):
def __init__(self, formatter, visited=None):
super().__init__(None, formatter)
self.visited = visited
def end(self, model):
if self.visited:
if '_id' in model and model._id not in self.visited:
return
super().end(model)
def is_ignore_field(self, field):
return field == 'name'
class OpalOutputFileIterator(lattice.ModelIterator):
def __init__(self):
self.result = PKDict(
keys_in_order=[],
)
self.model_index = PKDict()
def field(self, model, field_schema, field):
self.field_index += 1
# for now only interested in element outfn output files
if field == 'outfn' and field_schema[1] == 'OutputFile':
filename = '{}.{}.h5'.format(model.name, field)
k = LatticeUtil.file_id(model._id, self.field_index)
self.result[k] = filename
self.result.keys_in_order.append(k)
def start(self, model):
self.field_index = 0
self.model_name = LatticeUtil.model_name_for_data(model)
if self.model_name in self.model_index:
self.model_index[self.model_name] += 1
else:
self.model_index[self.model_name] = 1
class OpalMadxConverter(MadxConverter):
_FIELD_MAP = [
['DRIFT',
['DRIFT', 'l'],
],
['SBEND',
['SBEND', 'l', 'angle', 'e1', 'e2', 'gap=hgap', 'psi=tilt'],
],
['RBEND',
['RBEND', 'l', 'angle', 'e1', 'e2', 'gap=hgap', 'psi=tilt'],
],
['QUADRUPOLE',
['QUADRUPOLE', 'l', 'k1', 'k1s', 'psi=tilt'],
],
['SEXTUPOLE',
['SEXTUPOLE', 'l', 'k2', 'k2s', 'psi=tilt'],
],
['OCTUPOLE',
['OCTUPOLE', 'l', 'k3', 'k3s', 'psi=tilt'],
],
['SOLENOID',
#TODO(pjm): compute dks from ksi?
['SOLENOID', 'l', 'ks'],
],
['MULTIPOLE',
#TODO(pjm): compute kn, ks from knl, ksl?
['MULTIPOLE', 'psi=tilt'],
],
['HKICKER',
['HKICKER', 'l', 'kick', 'psi=tilt'],
],
['VKICKER',
['VKICKER', 'l', 'kick', 'psi=tilt'],
],
['KICKER',
['KICKER', 'l', 'hkick', 'vkick', 'psi=tilt'],
],
['MARKER',
['MARKER'],
],
['PLACEHOLDER',
['DRIFT', 'l'],
],
['INSTRUMENT',
['DRIFT', 'l'],
],
['ECOLLIMATOR',
['ECOLLIMATOR', 'l', 'xsize', 'ysize'],
],
['RCOLLIMATOR',
['RCOLLIMATOR', 'l', 'xsize', 'ysize'],
],
['COLLIMATOR apertype=ELLIPSE',
['ECOLLIMATOR', 'l', 'xsize', 'ysize'],
],
['COLLIMATOR apertype=RECTANGLE',
['RCOLLIMATOR', 'l', 'xsize', 'ysize'],
],
['RFCAVITY',
['RFCAVITY', 'l', 'volt', 'lag', 'harmon', 'freq'],
],
['TWCAVITY',
['TRAVELINGWAVE', 'l', 'volt', 'lag', 'freq', 'dlag=delta_lag'],
],
['HMONITOR',
['MONITOR', 'l'],
],
['VMONITOR',
['MONITOR', 'l'],
],
['MONITOR',
['MONITOR', 'l'],
],
]
def __init__(self):
super().__init__(SIM_TYPE, self._FIELD_MAP)
def to_madx(self, data):
madx = super().to_madx(data)
mb = LatticeUtil.find_first_command(madx, 'beam')
ob = LatticeUtil.find_first_command(data, 'beam')
for f in ob:
if f in mb and f in _SCHEMA.model.command_beam:
mb[f] = ob[f]
if f in ('gamma', 'energy', 'pc') and mb[f]:
madx.models.bunch.beamDefinition = f
od = LatticeUtil.find_first_command(data, 'distribution')
#TODO(pjm): save dist in vars
return madx
def from_madx(self, madx):
data = super().from_madx(madx)
data.models.simulation.elementPosition = 'relative'
mb = LatticeUtil.find_first_command(madx, 'beam')
LatticeUtil.find_first_command(data, 'option').version = 20000
LatticeUtil.find_first_command(data, 'beam').particle = mb.particle.upper()
LatticeUtil.find_first_command(data, 'beam').pc = self.particle_energy.pc
LatticeUtil.find_first_command(data, 'track').line = data.models.simulation.visualizationBeamlineId
self.__fixup_distribution(madx, data)
return data
def _fixup_element(self, element_in, element_out):
super()._fixup_element(element_in, element_out)
if self.from_class.sim_type() == SIM_TYPE:
pass
else:
if element_in.type == 'SBEND':
angle = self.__val(element_in.angle)
if angle != 0:
length = self.__val(element_in.l)
d1 = 2 * length / angle;
element_out.l = d1 * math.sin(length / d1)
if element_in.type in ('SBEND', 'RBEND'):
# kenetic energy in MeV
element_out.designenergy = round(
(math.sqrt(self.particle_energy.energy ** 2 + self.beam.mass ** 2) - self.beam.mass) * 1e3,
6,
)
element_out.gap = 2 * self.__val(element_in.hgap)
element_out.fmapfn = 'hard_edge_profile.txt'
if element_in.type == 'QUADRUPOLE':
k1 = self.__val(element_out.k1)
if self.beam.charge < 0:
k1 *= -1
element_out.k1 = '{} * {}'.format(k1, self._var_name('brho'))
def __fixup_distribution(self, madx, data):
mb = LatticeUtil.find_first_command(madx, 'beam')
dist = LatticeUtil.find_first_command(data, 'distribution')
beta_gamma = self.particle_energy.beta * self.particle_energy.gamma
self._replace_var(data, 'brho', self.particle_energy.brho)
self._replace_var(data, 'gamma', self.particle_energy.gamma)
self._replace_var(data, 'beta', 'sqrt(1 - (1 / ({} * {})))'.format(
self._var_name('gamma'),
self._var_name('gamma'),
))
for dim in ('x', 'y'):
self._replace_var(data, f'emit_{dim}', mb[f'e{dim}'])
beta = self._find_var(madx, f'beta_{dim}')
if beta:
dist[f'sigma{dim}'] = 'sqrt({} * {})'.format(
self._var_name(f'emit_{dim}'), self._var_name(f'beta_{dim}'))
dist[f'sigmap{dim}'] = 'sqrt({} * {}) * {} * {}'.format(
self._var_name(f'emit_{dim}'), self._var_name(f'gamma_{dim}'),
self._var_name('beta'), self._var_name('gamma'))
dist[f'corr{dim}'] = '-{}/sqrt(1 + {} * {})'.format(
self._var_name(f'alpha_{dim}'),
self._var_name(f'alpha_{dim}'),
self._var_name(f'alpha_{dim}'),
)
if self._find_var(madx, 'dp_s_coupling'):
dist.corrz = self._var_name('dp_s_coupling')
ob = LatticeUtil.find_first_command(data, 'beam')
ob.bcurrent = mb.bcurrent
if self._find_var(madx, 'n_particles_per_bunch'):
ob.npart = self._var_name('n_particles_per_bunch')
dist.sigmaz = self.__val(mb.sigt)
dist.sigmapz = '{} * {} * {}'.format(mb.sige, self._var_name('beta'), self._var_name('gamma'))
def __val(self, var_value):
return self.vars.eval_var_with_assert(var_value)
def background_percent_complete(report, run_dir, is_running):
res = PKDict(
percentComplete=0,
frameCount=0,
)
if is_running:
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
#TODO(pjm): determine total frame count and set percentComplete
res.frameCount = _read_frame_count(run_dir) - 1
return res
if run_dir.join('{}.json'.format(template_common.INPUT_BASE_NAME)).exists():
res.frameCount = _read_frame_count(run_dir)
if res.frameCount > 0:
res.percentComplete = 100
res.outputInfo = _output_info(run_dir)
return res
def code_var(variables):
class _P(code_variable.PurePythonEval):
#TODO(pjm): parse from opal files into schema
_OPAL_PI = 3.14159265358979323846
_OPAL_CONSTANTS = PKDict(
pi=_OPAL_PI,
twopi=_OPAL_PI * 2.0,
raddeg=180.0 / _OPAL_PI,
degrad=_OPAL_PI / 180.0,
e=2.7182818284590452354,
emass=0.51099892e-03,
pmass=0.93827204e+00,
hmmass=0.939277e+00,
umass=238 * 0.931494027e+00,
cmass=12 * 0.931494027e+00,
mmass=0.10565837,
dmass=2*0.931494027e+00,
xemass=124*0.931494027e+00,
clight=299792458.0,
p0=1,
seed=123456789,
)
def __init__(self):
super().__init__(self._OPAL_CONSTANTS)
def eval_var(self, expr, depends, variables):
if re.match(r'^\{.+\}$', expr):
# It is an array of values
return expr, None
return super().eval_var(expr, depends, variables)
return code_variable.CodeVar(
variables,
_P(),
case_insensitive=True,
)
def get_application_data(data, **kwargs):
if data.method == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_frames)
if code_var(data.variables).get_application_data(data, _SCHEMA, ignore_array_values=True):
return data
def get_data_file(run_dir, model, frame, options=None, **kwargs):
if frame < 0:
return template_common.text_data_file(OPAL_OUTPUT_FILE, run_dir)
if model in ('bunchAnimation', 'plotAnimation') or 'bunchReport' in model:
return _OPAL_H5_FILE
if model == 'plot2Animation':
return _OPAL_SDDS_FILE
if model == 'beamline3dAnimation':
return _OPAL_VTK_FILE
if 'elementAnimation' in model:
return _file_name_for_element_animation(run_dir, model)
raise AssertionError('unknown model={}'.format(model))
def import_file(req, unit_test_mode=False, **kwargs):
from sirepo.template import opal_parser
text = pkcompat.from_bytes(req.file_stream.read())
if re.search(r'\.in$', req.filename, re.IGNORECASE):
data, input_files = opal_parser.parse_file(
text,
filename=req.filename)
missing_files = []
for infile in input_files:
if not _SIM_DATA.lib_file_exists(infile.lib_filename):
missing_files.append(infile)
if missing_files:
return PKDict(
error='Missing data files',
missingFiles=missing_files,
)
elif re.search(r'\.madx$', req.filename, re.IGNORECASE):
data = OpalMadxConverter().from_madx_text(text)
data.models.simulation.name = re.sub(r'\.madx$', '', req.filename, flags=re.IGNORECASE)
else:
raise IOError('invalid file extension, expecting .in or .madx')
return data
def new_simulation(data, new_simulation_data):
data.models.simulation.elementPosition = new_simulation_data.elementPosition
def post_execution_processing(
success_exit=True,
is_parallel=True,
run_dir=None,
**kwargs
):
if success_exit:
return None
if is_parallel:
return _parse_opal_log(run_dir)
return _parse_opal_log(run_dir)
def prepare_for_client(data):
code_var(data.models.rpnVariables).compute_cache(data, _SCHEMA)
return data
def prepare_sequential_output_file(run_dir, data):
report = data['report']
if 'bunchReport' in report:
fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
if fn.exists():
fn.remove()
try:
save_sequential_report_data(data, run_dir)
except IOError:
# the output file isn't readable
pass
def python_source_for_model(data, model):
if model == 'madx':
return OpalMadxConverter().to_madx_text(data)
return _generate_parameters_file(data)
def save_sequential_report_data(data, run_dir):
report = data.models[data.report]
res = None
if 'bunchReport' in data.report:
res = _bunch_plot(report, run_dir, 0)
res.title = ''
else:
raise AssertionError('unknown report: {}'.format(report))
template_common.write_sequential_result(
res,
run_dir=run_dir,
)
def sim_frame(frame_args):
# elementAnimations
return _bunch_plot(
frame_args,
frame_args.run_dir,
frame_args.frameIndex,
_file_name_for_element_animation(frame_args),
)
def sim_frame_beamline3dAnimation(frame_args):
res = PKDict(
title=' ',
points=[],
polys=[],
colors=[],
bounds=_compute_3d_bounds(frame_args.run_dir),
)
state = None
with pkio.open_text(_OPAL_VTK_FILE) as f:
for line in f:
if line == '\n':
continue
if line.startswith('POINTS '):
state = 'points'
continue
if line.startswith('CELLS '):
state = 'polys'
continue
if line.startswith('CELL_TYPES'):
state = None
continue
if line.startswith('COLOR_SCALARS'):
state = 'colors'
continue
if state == 'points' or state == 'colors':
for v in line.split(' '):
res[state].append(float(v))
elif state == 'polys':
for v in line.split(' '):
res[state].append(int(v))
return res
def sim_frame_bunchAnimation(frame_args):
a = frame_args.sim_in.models.bunchAnimation
a.update(frame_args)
return _bunch_plot(a, a.run_dir, a.frameIndex)
def sim_frame_plotAnimation(frame_args):
def _walk_file(h5file, key, step, res):
if key:
for field in res.values():
field.points.append(h5file[key].attrs[field.name][field.index])
else:
for field in res.values():
_units_from_hdf5(h5file, field)
res = PKDict()
for dim in 'x', 'y1', 'y2', 'y3':
parts = frame_args[dim].split(' ')
if parts[0] == 'none':
continue
res[dim] = PKDict(
label=frame_args[dim],
dim=dim,
points=[],
name=parts[0],
index=_DIM_INDEX[parts[1]] if len(parts) > 1 else 0,
)
_iterate_hdf5_steps(frame_args.run_dir.join(_OPAL_H5_FILE), _walk_file, res)
plots = []
for field in res.values():
if field.dim != 'x':
plots.append(field)
return template_common.parameter_plot(
res.x.points,
plots,
PKDict(),
PKDict(
title='',
y_label='',
x_label=res.x.label,
),
)
def sim_frame_plot2Animation(frame_args):
from sirepo.template import sdds_util
x = None
plots = []
for f in ('x', 'y1', 'y2', 'y3'):
name = frame_args[f].replace(' ', '_')
if name == 'none':
continue
col = sdds_util.extract_sdds_column(str(frame_args.run_dir.join(_OPAL_SDDS_FILE)), name, 0)
if col.err:
return col.err
field = PKDict(
points=col['values'],
label=frame_args[f],
)
_field_units(col.column_def[1], field)
if f == 'x':
x = field
else:
plots.append(field)
# independent reads of file may produce more columns, trim to match x length
for p in plots:
if len(x.points) < len(p.points):
p.points = p.points[:len(x.points)]
return template_common.parameter_plot(x.points, plots, {}, {
'title': '',
'y_label': '',
'x_label': x.label,
})
def write_parameters(data, run_dir, is_parallel):
pkio.write_text(
run_dir.join(OPAL_INPUT_FILE),
_generate_parameters_file(data),
)
if is_parallel:
pkio.write_text(
run_dir.join(OPAL_POSITION_FILE),
'import os\n' \
+ 'os.system("python data/opal_ElementPositions.py --export-vtk")\n',
)
class _Generate(sirepo.lib.GenerateBase):
def __init__(self, data):
self.data = data
self._schema = _SCHEMA
def sim(self):
d = self.data
self.jinja_env = template_common.flatten_data(d.models, PKDict())
self._code_var = code_var(d.models.rpnVariables)
if 'bunchReport' in d.get('report', ''):
return self._bunch_simulation()
return self._full_simulation()
def _bunch_simulation(self):
v = self.jinja_env
# keep only first distribution and beam in command list
beam = LatticeUtil.find_first_command(self.data, 'beam')
distribution = LatticeUtil.find_first_command(self.data, 'distribution')
v.beamName = beam.name
v.distributionName = distribution.name
# these need to get set to default or distribution won't generate in 1 step
# for emitted distributions
distribution.nbin = 0
distribution.emissionsteps = 1
distribution.offsetz = 0
self.data.models.commands = [
LatticeUtil.find_first_command(self.data, 'option'),
beam,
distribution,
]
self._generate_commands_and_variables()
return template_common.render_jinja(SIM_TYPE, v, 'bunch.in')
def _format_field_value(self, state, model, field, el_type):
value = model[field]
if el_type == 'Boolean':
value = 'true' if value == '1' else 'false'
elif el_type == 'RPNValue':
value = _fix_opal_float(value)
elif el_type == 'InputFile':
value = self._input_file(LatticeUtil.model_name_for_data(model), field, value)
elif el_type == 'OutputFile':
ext = 'dat' if model.get('_type', '') == 'list' else 'h5'
value = '"{}.{}.{}"'.format(model.name, field, ext)
elif re.search(r'List$', el_type):
value = state.id_map[int(value)].name
elif re.search(r'String', el_type):
if str(value):
if not re.search(r'^\s*\{.*\}$', value):
value = '"{}"'.format(value)
elif LatticeUtil.is_command(model):
if el_type != 'RPNValue' and str(value):
value = '"{}"'.format(value)
elif not LatticeUtil.is_command(model):
if model.type in _ELEMENTS_WITH_TYPE_FIELD and '_type' in field:
return ['type', value]
if str(value):
return [field, value]
return None
def _full_simulation(self):
v = self.jinja_env
v.lattice = self._generate_lattice(
self.util,
self._code_var,
LatticeUtil.find_first_command(
self.util.data,
'track',
).line or self.util.select_beamline().id,
)
v.use_beamline = self.util.select_beamline().name
self._generate_commands_and_variables()
return template_common.render_jinja(SIM_TYPE, v, 'parameters.in')
def _generate_commands(self, util, is_header):
# reorder command so OPTION and list commands come first
commands = []
key = None
if is_header:
key = 'header_commands'
# add header commands in order, with option first
for ctype in _HEADER_COMMANDS:
for c in util.data.models.commands:
if c._type == ctype:
commands.append(c)
else:
key = 'other_commands'
for c in util.data.models.commands:
if c._type not in _HEADER_COMMANDS:
commands.append(c)
util.data.models[key] = commands
res = util.render_lattice(
util.iterate_models(
OpalElementIterator(self._format_field_value),
key,
).result,
quote_name=True,
want_semicolon=True)
# separate run from track, add endtrack
#TODO(pjm): better to have a custom element generator for this case
lines = []
for line in res.splitlines():
m = re.match('(.*?: track,.*?)(run_.*?)(;|,[^r].*)', line)
if m:
lines.append('{}{}'.format(re.sub(r',$', '', m.group(1)), m.group(3)))
lines.append(' run, {};'.format(re.sub(r'run_', '', m.group(2))))
lines.append('endtrack;')
else:
lines.append(line)
return '\n'.join(lines)
def _generate_commands_and_variables(self):
self.jinja_env.update(dict(
variables=self._code_var.generate_variables(self._generate_variable),
header_commands=self._generate_commands(self.util, True),
commands=self._generate_commands(self.util, False),
))
def _generate_lattice(self, util, code_var, beamline_id):
if util.data.models.simulation.elementPosition == 'absolute':
beamline, visited = _generate_absolute_beamline(util, beamline_id)
else:
beamline, _, names, visited = _generate_beamline(util, code_var, beamline_id)
beamline += '{}: LINE=({});\n'.format(
util.id_map[beamline_id].name,
','.join(names),
)
res = util.render_lattice(
util.iterate_models(
OpalElementIterator(self._format_field_value, visited),
'elements',
).result,
quote_name=True,
want_semicolon=True,
) + '\n'
res += beamline
return res
def _generate_variable(self, name, variables, visited):
res = ''
if name not in visited:
res += 'REAL {} = {};\n'.format(name, _fix_opal_float(variables[name]))
visited[name] = True
return res
def _input_file(self, model_name, field, filename):
return '"{}"'.format(_SIM_DATA.lib_file_name_with_model_field(
model_name,
field,
filename,
))
def _compute_3d_bounds(run_dir):
res = []
p = run_dir.join('data/opal_ElementPositions.txt')
with pkio.open_text(p) as f:
for line in f:
m = re.search(r'^".*?"\s+(\S*?)\s+(\S*?)\s+(\S*?)\s*$', line)
if m:
res.append([float(v) for v in (m.group(1), m.group(2), m.group(3))])
res = np.array(res)
bounds = []
for n in range(3):
v = res[:, n]
bounds.append([min(v), max(v)])
return bounds
def _generate_parameters_file(data):
return _Generate(data).sim()
def _bunch_plot(report, run_dir, idx, filename=_OPAL_H5_FILE):
res = PKDict()
title = 'Step {}'.format(idx)
with h5py.File(str(run_dir.join(filename)), 'r') as f:
for field in ('x', 'y'):
res[field] = PKDict(
name=report[field],
points=np.array(f['/Step#{}/{}'.format(idx, report[field])]),
label=report[field],
)
_units_from_hdf5(f, res[field])
if 'SPOS' in f['/Step#{}'.format(idx)].attrs:
title += ', SPOS {0:.5f}m'.format(f['/Step#{}'.format(idx)].attrs['SPOS'][0])
return template_common.heatmap([res.x.points, res.y.points], report, PKDict(
x_label=res.x.label,
y_label=res.y.label,
title=title,
))
def _compute_range_across_frames(run_dir, data):
def _walk_file(h5file, key, step, res):
if key:
for field in res:
v = np.array(h5file['/{}/{}'.format(key, field)])
min1, max1 = v.min(), v.max()
if res[field]:
if res[field][0] > min1:
res[field][0] = min1
if res[field][1] < max1:
res[field][1] = max1
else:
res[field] = [min1, max1]
res = PKDict()
for v in _SCHEMA.enum.PhaseSpaceCoordinate:
res[v[0]] = None
return _iterate_hdf5_steps(run_dir.join(_OPAL_H5_FILE), _walk_file, res)
def _column_data(col, col_names, rows):
idx = col_names.index(col)
assert idx >= 0, 'invalid col: {}'.format(col)
res = []
for row in rows:
res.append(float(row[idx]))
return res
def _field_units(units, field):
if units == '1':
units = ''
elif units[0] == 'M' and len(units) > 1:
units = re.sub(r'^.', '', units)
field.points = (np.array(field.points) * 1e6).tolist()
elif units[0] == 'G' and len(units) > 1:
units = re.sub(r'^.', '', units)
field.points = (np.array(field.points) * 1e9).tolist()
elif units == 'ns':
units = 's'
field.points = (np.array(field.points) / 1e9).tolist()
if units:
if re.search(r'^#', units):
field.label += ' ({})'.format(units)
else:
field.label += ' [{}]'.format(units)
field.units = units
def _file_name_for_element_animation(frame_args):
r = frame_args.frameReport
for info in _output_info(frame_args.run_dir):
if info.modelKey == r:
return info.filename
raise AssertionError(f'no output file for frameReport={r}')
def _find_run_method(commands):
for command in commands:
if command._type == 'track' and command.run_method:
return command.run_method
return 'THIN'
def _fix_opal_float(value):
if value and not code_variable.CodeVar.is_var_value(value):
# need to format values as floats, OPAL has overflow issues with large integers
return float(value)
return value
def _generate_absolute_beamline(util, beamline_id, count_by_name=None, visited=None):
if count_by_name is None:
count_by_name = PKDict()
if visited is None:
visited = set()
names = []
res = ''
beamline = util.id_map[abs(beamline_id)]
items = beamline['items']
for idx in range(len(items)):
item_id = items[idx]
item = util.id_map[abs(item_id)]
name = item.name.upper()
if name not in count_by_name:
count_by_name[name] = 0
if 'type' in item:
# element
name = '"{}#{}"'.format(name, count_by_name[name])
count_by_name[item.name.upper()] += 1
pos = beamline.positions[idx]
res += '{}: "{}",elemedge={};\n'.format(name, item.name.upper(), pos.elemedge)
names.append(name)
visited.add(item_id)
else :
if item_id not in visited:
text, visited = _generate_absolute_beamline(util, item_id, count_by_name, visited)
res += text
names.append('{}'.format(name))
has_orientation = False
for f in ('x', 'y', 'z', 'theta', 'phi', 'psi'):
if f in beamline and beamline[f]:
has_orientation = True
break
orientation = ''
if has_orientation:
orientation = ', ORIGIN={}, ORIENTATION={}'.format(
'{}{}, {}, {}{}'.format('{', beamline.x, beamline.y, beamline.z, '}'),
'{}{}, {}, {}{}'.format('{', beamline.theta, beamline.phi, beamline.psi, '}'),
)
res += '{}: LINE=({}){};\n'.format(
beamline.name,
','.join(names),
orientation,
)
return res, visited
def _generate_beamline(util, code_var, beamline_id, count_by_name=None, edge=0, names=None, visited=None):
if count_by_name is None:
count_by_name = PKDict()
if names is None:
names = []
if visited is None:
visited = set()
res = ''
run_method = _find_run_method(util.data.models.commands)
beamline = util.id_map[abs(beamline_id)]
items = beamline['items']
if beamline_id < 0:
items = list(reversed(items))
for idx in range(len(items)):
item_id = items[idx]
item = util.id_map[abs(item_id)]
if 'type' in item:
# element
name = item.name.upper()
if name not in count_by_name:
count_by_name[name] = 0
name = '"{}#{}"'.format(name, count_by_name[name])
count_by_name[item.name.upper()] += 1
if run_method == 'OPAL-CYCL' or run_method == 'CYCLOTRON-T':
res += '"{}": {};\n'.format(name, item.name.upper())
names.append(name)
visited.add(item_id)
continue
length = code_var.eval_var(item.l)[0]
if item.type == 'DRIFT' and length < 0:
# don't include reverse drifts, for positioning only
pass
else:
res += '{}: "{}",elemedge={};\n'.format(name, item.name.upper(), edge)
names.append(name)
if item.type == 'SBEND' and run_method == 'THICK':
# use arclength for SBEND with THICK tracker (only?)
angle = code_var.eval_var_with_assert(item.angle)
length = angle * length / (2 * math.sin(angle / 2))
visited.add(item_id)
edge += length
else:
# beamline
text, edge, names, visited = _generate_beamline(util, code_var, item_id, count_by_name, edge, names, visited)
res += text
return res, edge, names, visited
def _iterate_hdf5_steps(path, callback, state):
with h5py.File(str(path), 'r') as f:
step = 0
key = 'Step#{}'.format(step)
while key in f:
callback(f, key, step, state)
step += 1
key = 'Step#{}'.format(step)
callback(f, None, -1, state)
return state
def _output_info(run_dir):
#TODO(pjm): cache to file with version, similar to template.elegant
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
files = LatticeUtil(data, _SCHEMA).iterate_models(OpalOutputFileIterator()).result
res = []
for k in files.keys_in_order:
if run_dir.join(files[k]).exists():
res.append(PKDict(
modelKey='elementAnimation{}'.format(k),
filename=files[k],
isHistogram=True,
))
return res
def _parse_opal_log(run_dir):
res = ''
p = run_dir.join((OPAL_OUTPUT_FILE))
if not p.exists():
return res
with pkio.open_text(p) as f:
prev_line = ''
for line in f:
if re.search(r'^Error.*?>', line):
line = re.sub(r'^Error.*?>\s*\**\s*', '', line.rstrip())
if re.search(r'1DPROFILE1-DEFAULT', line):
continue
if line and line != prev_line:
res += line + '\n'
prev_line = line
if res:
return res
return 'An unknown error occurred'
def _read_data_file(path):
col_names = []
rows = []
with pkio.open_text(str(path)) as f:
col_names = []
rows = []
mode = ''
for line in f:
if '---' in line:
if mode == 'header':
mode = 'data'
elif mode == 'data':
break
if not mode:
mode = 'header'
continue
line = re.sub('\0', '', line)
if mode == 'header':
col_names = re.split(r'\s+', line.lower())
elif mode == 'data':
#TODO(pjm): separate overlapped columns. Instead should explicitly set field dimensions
line = re.sub(r'(\d)(\-\d)', r'\1 \2', line)
line = re.sub(r'(\.\d{3})(\d+\.)', r'\1 \2', line)
rows.append(re.split(r'\s+', line))
return col_names, rows
def _read_frame_count(run_dir):
def _walk_file(h5file, key, step, res):
if key:
res[0] = step + 1
try:
return _iterate_hdf5_steps(run_dir.join(_OPAL_H5_FILE), _walk_file, [0])[0]
except IOError:
pass
return 0
def _units_from_hdf5(h5file, field):
return _field_units(pkcompat.from_bytes(h5file.attrs['{}Unit'.format(field.name)]), field)
| apache-2.0 |
aeklant/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py | 21 | 20990 | # -*- coding: utf-8 -*-
from numpy import (abs, sum, sin, cos, sqrt, log, prod, where, pi, exp, arange,
floor, log10, atleast_2d, zeros)
from .go_benchmark import Benchmark
class Parsopoulos(Benchmark):
r"""
Parsopoulos objective function.
This class defines the Parsopoulos [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Parsopoulos}}(x) = \cos(x_1)^2 + \sin(x_2)^2
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: This function has infinite number of global minima in R2,
at points :math:`\left(k\frac{\pi}{2}, \lambda \pi \right)`,
where :math:`k = \pm1, \pm3, ...` and :math:`\lambda = 0, \pm1, \pm2, ...`
In the given domain problem, function has 12 global minima all equal to
zero.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[pi / 2.0, pi]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return cos(x[0]) ** 2.0 + sin(x[1]) ** 2.0
class Pathological(Benchmark):
r"""
Pathological objective function.
This class defines the Pathological [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Pathological}}(x) = \sum_{i=1}^{n -1} \frac{\sin^{2}\left(
\sqrt{100 x_{i+1}^{2} + x_{i}^{2}}\right) -0.5}{0.001 \left(x_{i}^{2}
- 2x_{i}x_{i+1} + x_{i+1}^{2}\right)^{2} + 0.50}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.` for :math:`x = [0, 0]` for
:math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.
def fun(self, x, *args):
self.nfev += 1
vec = (0.5 + (sin(sqrt(100 * x[: -1] ** 2 + x[1:] ** 2)) ** 2 - 0.5) /
(1. + 0.001 * (x[: -1] ** 2 - 2 * x[: -1] * x[1:]
+ x[1:] ** 2) ** 2))
return sum(vec)
class Paviani(Benchmark):
r"""
Paviani objective function.
This class defines the Paviani [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Paviani}}(x) = \sum_{i=1}^{10} \left[\log^{2}\left(10
- x_i\right) + \log^{2}\left(x_i -2\right)\right]
- \left(\prod_{i=1}^{10} x_i^{10} \right)^{0.2}
with :math:`x_i \in [2.001, 9.999]` for :math:`i = 1, ... , 10`.
*Global optimum*: :math:`f(x_i) = -45.7784684040686` for
:math:`x_i = 9.350266` for :math:`i = 1, ..., 10`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: think Gavana web/code definition is wrong because final product term
shouldn't raise x to power 10.
"""
def __init__(self, dimensions=10):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([2.001] * self.N, [9.999] * self.N))
self.global_optimum = [[9.350266 for _ in range(self.N)]]
self.fglob = -45.7784684040686
def fun(self, x, *args):
self.nfev += 1
return sum(log(x - 2) ** 2.0 + log(10.0 - x) ** 2.0) - prod(x) ** 0.2
class Penalty01(Benchmark):
r"""
Penalty 1 objective function.
This class defines the Penalty 1 [1]_ global optimization problem. This is a
imultimodal minimization problem defined as follows:
.. math::
f_{\text{Penalty01}}(x) = \frac{\pi}{30} \left\{10 \sin^2(\pi y_1)
+ \sum_{i=1}^{n-1} (y_i - 1)^2 \left[1 + 10 \sin^2(\pi y_{i+1}) \right]
+ (y_n - 1)^2 \right \} + \sum_{i=1}^n u(x_i, 10, 100, 4)
Where, in this exercise:
.. math::
y_i = 1 + \frac{1}{4}(x_i + 1)
And:
.. math::
u(x_i, a, k, m) =
\begin{cases}
k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\
0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\
k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-50, 50]` for :math:`i= 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = -1` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([-5.0, 5.0], [-5.0, 5.0])
self.global_optimum = [[-1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
a, b, c = 10.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b * (xx - a) ** c, 0.0)
y = 1.0 + (x + 1.0) / 4.0
return (sum(u) + (pi / 30.0) * (10.0 * sin(pi * y[0]) ** 2.0
+ sum((y[: -1] - 1.0) ** 2.0
* (1.0 + 10.0 * sin(pi * y[1:]) ** 2.0))
+ (y[-1] - 1) ** 2.0))
class Penalty02(Benchmark):
r"""
Penalty 2 objective function.
This class defines the Penalty 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Penalty02}}(x) = 0.1 \left\{\sin^2(3\pi x_1) + \sum_{i=1}^{n-1}
(x_i - 1)^2 \left[1 + \sin^2(3\pi x_{i+1}) \right ]
+ (x_n - 1)^2 \left [1 + \sin^2(2 \pi x_n) \right ]\right \}
+ \sum_{i=1}^n u(x_i, 5, 100, 4)
Where, in this exercise:
.. math::
u(x_i, a, k, m) =
\begin{cases}
k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\
0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\
k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a \\
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-50, 50]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0])
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
a, b, c = 5.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b * (xx - a) ** c, 0.0)
return (sum(u) + 0.1 * (10 * sin(3.0 * pi * x[0]) ** 2.0
+ sum((x[:-1] - 1.0) ** 2.0
* (1.0 + sin(3 * pi * x[1:]) ** 2.0))
+ (x[-1] - 1) ** 2.0 * (1 + sin(2 * pi * x[-1]) ** 2.0)))
class PenHolder(Benchmark):
r"""
PenHolder objective function.
This class defines the PenHolder [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{PenHolder}}(x) = -e^{\left|{e^{-\left|{- \frac{\sqrt{x_{1}^{2}
+ x_{2}^{2}}}{\pi} + 1}\right|} \cos\left(x_{1}\right)
\cos\left(x_{2}\right)}\right|^{-1}}
with :math:`x_i \in [-11, 11]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -0.9635348327265058` for
:math:`x_i = \pm 9.646167671043401` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-11.0] * self.N, [11.0] * self.N))
self.global_optimum = [[-9.646167708023526, 9.646167671043401]]
self.fglob = -0.9635348327265058
def fun(self, x, *args):
self.nfev += 1
a = abs(1. - (sqrt(x[0] ** 2 + x[1] ** 2) / pi))
b = cos(x[0]) * cos(x[1]) * exp(a)
return -exp(-abs(b) ** -1)
class PermFunction01(Benchmark):
r"""
PermFunction 1 objective function.
This class defines the PermFunction1 [1]_ global optimization problem. This is
a multimodal minimization problem defined as follows:
.. math::
f_{\text{PermFunction01}}(x) = \sum_{k=1}^n \left\{ \sum_{j=1}^n (j^k
+ \beta) \left[ \left(\frac{x_j}{j}\right)^k - 1 \right] \right\}^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-n, n + 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = i` for
:math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO: line 560
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-self.N] * self.N,
[self.N + 1] * self.N))
self.global_optimum = [list(range(1, self.N + 1))]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
b = 0.5
k = atleast_2d(arange(self.N) + 1).T
j = atleast_2d(arange(self.N) + 1)
s = (j ** k + b) * ((x / j) ** k - 1)
return sum((sum(s, axis=1) ** 2))
class PermFunction02(Benchmark):
r"""
PermFunction 2 objective function.
This class defines the Perm Function 2 [1]_ global optimization problem. This is
a multimodal minimization problem defined as follows:
.. math::
f_{\text{PermFunction02}}(x) = \sum_{k=1}^n \left\{ \sum_{j=1}^n (j
+ \beta) \left[ \left(x_j^k - {\frac{1}{j}}^{k} \right )
\right] \right\}^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-n, n+1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = \frac{1}{i}`
for :math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO: line 582
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-self.N] * self.N,
[self.N + 1] * self.N))
self.custom_bounds = ([0, 1.5], [0, 1.0])
self.global_optimum = [1. / arange(1, self.N + 1)]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
b = 10
k = atleast_2d(arange(self.N) + 1).T
j = atleast_2d(arange(self.N) + 1)
s = (j + b) * (x ** k - (1. / j) ** k)
return sum((sum(s, axis=1) ** 2))
class Pinter(Benchmark):
r"""
Pinter objective function.
This class defines the Pinter [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Pinter}}(x) = \sum_{i=1}^n ix_i^2 + \sum_{i=1}^n 20i
\sin^2 A + \sum_{i=1}^n i \log_{10} (1 + iB^2)
Where, in this exercise:
.. math::
\begin{cases}
A = x_{i-1} \sin x_i + \sin x_{i+1} \\
B = x_{i-1}^2 - 2x_i + 3x_{i + 1} - \cos x_i + 1\\
\end{cases}
Where :math:`x_0 = x_n` and :math:`x_{n + 1} = x_1`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(self.N) + 1
xx = zeros(self.N + 2)
xx[1: - 1] = x
xx[0] = x[-1]
xx[-1] = x[0]
A = xx[0: -2] * sin(xx[1: - 1]) + sin(xx[2:])
B = xx[0: -2] ** 2 - 2 * xx[1: - 1] + 3 * xx[2:] - cos(xx[1: - 1]) + 1
return (sum(i * x ** 2)
+ sum(20 * i * sin(A) ** 2)
+ sum(i * log10(1 + i * B ** 2)))
class Plateau(Benchmark):
r"""
Plateau objective function.
This class defines the Plateau [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Plateau}}(x) = 30 + \sum_{i=1}^n \lfloor \lvert x_i
\rvert\rfloor
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 30` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 30.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return 30.0 + sum(floor(abs(x)))
class Powell(Benchmark):
r"""
Powell objective function.
This class defines the Powell [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Powell}}(x) = (x_3+10x_1)^2 + 5(x_2-x_4)^2 + (x_1-2x_2)^4
+ 10(x_3-x_4)^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-4, 5]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., 4`
..[1] Powell, M. An iterative method for finding stationary values of a
function of several variables Computer Journal, 1962, 5, 147-151
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-4.0] * self.N, [5.0] * self.N))
self.global_optimum = [[0, 0, 0, 0]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return ((x[0] + 10 * x[1]) ** 2 + 5 * (x[2] - x[3]) ** 2
+ (x[1] - 2 * x[2]) ** 4 + 10 * (x[0] - x[3]) ** 4)
class PowerSum(Benchmark):
r"""
Power sum objective function.
This class defines the Power Sum global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{PowerSum}}(x) = \sum_{k=1}^n\left[\left(\sum_{i=1}^n x_i^k
\right) - b_k \right]^2
Where, in this exercise, :math:`b = [8, 18, 44, 114]`
Here, :math:`x_i \in [0, 4]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2, 2, 3]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[4.0] * self.N))
self.global_optimum = [[1.0, 2.0, 2.0, 3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
b = [8.0, 18.0, 44.0, 114.0]
k = atleast_2d(arange(self.N) + 1).T
return sum((sum(x ** k, axis=1) - b) ** 2)
class Price01(Benchmark):
r"""
Price 1 objective function.
This class defines the Price 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Price01}}(x) = (\lvert x_1 \rvert - 5)^2
+ (\lvert x_2 \rvert - 5)^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [5, 5]` or
:math:`x = [5, -5]` or :math:`x = [-5, 5]` or :math:`x = [-5, -5]`.
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])
self.global_optimum = [[5.0, 5.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (abs(x[0]) - 5.0) ** 2.0 + (abs(x[1]) - 5.0) ** 2.0
class Price02(Benchmark):
r"""
Price 2 objective function.
This class defines the Price 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Price02}}(x) = 1 + \sin^2(x_1) + \sin^2(x_2)
- 0.1e^{(-x_1^2 - x_2^2)}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.9` for :math:`x_i = [0, 0]`
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.9
def fun(self, x, *args):
self.nfev += 1
return 1.0 + sum(sin(x) ** 2) - 0.1 * exp(-x[0] ** 2.0 - x[1] ** 2.0)
class Price03(Benchmark):
r"""
Price 3 objective function.
This class defines the Price 3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Price03}}(x) = 100(x_2 - x_1^2)^2 + \left[6.4(x_2 - 0.5)^2
- x_1 - 0.6 \right]^2
with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-5, -5]`,
:math:`x = [-5, 5]`, :math:`x = [5, -5]`, :math:`x = [5, 5]`.
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
TODO Jamil #96 has an erroneous factor of 6 in front of the square brackets
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[1] - x[0] ** 2) ** 2
+ (6.4 * (x[1] - 0.5) ** 2 - x[0] - 0.6) ** 2)
class Price04(Benchmark):
r"""
Price 4 objective function.
This class defines the Price 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Price04}}(x) = (2 x_1^3 x_2 - x_2^3)^2
+ (6 x_1 - x_2^2 + x_2)^2
with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`,
:math:`x = [2, 4]` and :math:`x = [1.464, -2.506]`
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[2.0, 4.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((2.0 * x[1] * x[0] ** 3.0 - x[1] ** 3.0) ** 2.0
+ (6.0 * x[0] - x[1] ** 2.0 + x[1]) ** 2.0)
| bsd-3-clause |
dgjustice/ansible | test/units/playbook/test_helpers.py | 60 | 19182 | # (c) 2016, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
from units.mock.loader import DictDataLoader
from ansible import errors
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role.include import RoleInclude
from ansible.playbook import helpers
class MixinForMocks(object):
def _setup(self):
# This is not a very good mixin, lots of side effects
self.fake_loader = DictDataLoader({'include_test.yml': "",
'other_include_test.yml': ""})
self.mock_tqm = MagicMock(name='MockTaskQueueManager')
self.mock_play = MagicMock(name='MockPlay')
self.mock_iterator = MagicMock(name='MockIterator')
self.mock_iterator._play = self.mock_play
self.mock_inventory = MagicMock(name='MockInventory')
self.mock_inventory._hosts_cache = dict()
def _get_host(host_name):
return None
self.mock_inventory.get_host.side_effect = _get_host
# TODO: can we use a real VariableManager?
self.mock_variable_manager = MagicMock(name='MockVariableManager')
self.mock_variable_manager.get_vars.return_value = dict()
self.mock_block = MagicMock(name='MockBlock')
self.fake_role_loader = DictDataLoader({"/etc/ansible/roles/bogus_role/tasks/main.yml": """
- shell: echo 'hello world'
"""})
self._test_data_path = os.path.dirname(__file__)
self.fake_include_loader = DictDataLoader({"/dev/null/includes/test_include.yml": """
- include: other_test_include.yml
- shell: echo 'hello world'
""",
"/dev/null/includes/static_test_include.yml": """
- include: other_test_include.yml
- shell: echo 'hello static world'
""",
"/dev/null/includes/other_test_include.yml": """
- debug:
msg: other_test_include_debug
"""})
class TestLoadListOfTasks(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def _assert_is_task_list(self, results):
for result in results:
self.assertIsInstance(result, Task)
def _assert_is_task_list_or_blocks(self, results):
self.assertIsInstance(results, list)
for result in results:
self.assertIsInstance(result, (Task, Block))
def test_ds_not_list(self):
ds = {}
self.assertRaises(AssertionError, helpers.load_list_of_tasks,
ds, self.mock_play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_task(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_empty_task_use_handlers(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_tasks,
ds,
use_handlers=True,
play=self.mock_play,
variable_manager=self.mock_variable_manager,
loader=self.fake_loader)
def test_one_bogus_block(self):
ds = [{'block': None}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"A malformed block was encountered",
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_unknown_action(self):
action_name = 'foo_test_unknown_action'
ds = [{'action': action_name}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertEquals(res[0].action, action_name)
def test_block_unknown_action(self):
action_name = 'foo_test_block_unknown_action'
ds = [{
'block': [{'action': action_name}]
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self._assert_default_block(res[0])
def _assert_default_block(self, block):
# the expected defaults
self.assertIsInstance(block.block, list)
self.assertEquals(len(block.block), 1)
self.assertIsInstance(block.rescue, list)
self.assertEquals(len(block.rescue), 0)
self.assertIsInstance(block.always, list)
self.assertEquals(len(block.always), 0)
def test_block_unknown_action_use_handlers(self):
ds = [{
'block': [{'action': 'foo_test_block_unknown_action'}]
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self._assert_default_block(res[0])
def test_one_bogus_block_use_handlers(self):
ds = [{'block': True}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"A malformed block was encountered",
helpers.load_list_of_tasks,
ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_one_bogus_include(self):
ds = [{'include': 'somefile.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_bogus_include_use_handlers(self):
ds = [{'include': 'somefile.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_bogus_include_static(self):
ds = [{'include': 'somefile.yml',
'static': 'true'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_include(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
def test_one_parent_include(self):
ds = [{'include': '/dev/null/includes/test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIsInstance(res[0]._parent, TaskInclude)
# TODO/FIXME: do this non deprecated way
def test_one_include_tags(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml',
'tags': ['test_one_include_tags_tag1', 'and_another_tagB']
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('test_one_include_tags_tag1', res[0].tags)
self.assertIn('and_another_tagB', res[0].tags)
# TODO/FIXME: do this non deprecated way
def test_one_parent_include_tags(self):
ds = [{'include': '/dev/null/includes/test_include.yml',
#'vars': {'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']}
'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']
}
]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('test_one_parent_include_tags_tag1', res[0].tags)
self.assertIn('and_another_tag2', res[0].tags)
# It would be useful to be able to tell what kind of deprecation we encountered and where we encountered it.
def test_one_include_tags_deprecated_mixed(self):
ds = [{'include': "/dev/null/includes/other_test_include.yml",
'vars': {'tags': "['tag_on_include1', 'tag_on_include2']"},
'tags': 'mixed_tag1, mixed_tag2'
}]
self.assertRaisesRegexp(errors.AnsibleParserError, 'Mixing styles',
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
def test_one_include_tags_deprecated_include(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml',
'vars': {'tags': ['include_tag1_deprecated', 'and_another_tagB_deprecated']}
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('include_tag1_deprecated', res[0].tags)
self.assertIn('and_another_tagB_deprecated', res[0].tags)
def test_one_include_use_handlers(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Handler)
def test_one_parent_include_use_handlers(self):
ds = [{'include': '/dev/null/includes/test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Handler)
# default for Handler
self.assertEquals(res[0].listen, None)
# TODO/FIXME: this doesn't seen right
# figure out how to get the non-static errors to be raised, this seems to just ignore everything
def test_one_include_not_static(self):
ds = [{
'include': '/dev/null/includes/static_test_include.yml',
'static': False
}]
#a_block = Block()
ti_ds = {'include': '/dev/null/includes/ssdftatic_test_include.yml'}
a_task_include = TaskInclude()
ti = a_task_include.load(ti_ds)
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
block=ti,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Task)
self.assertEquals(res[0].args['_raw_params'], '/dev/null/includes/static_test_include.yml')
# TODO/FIXME: This two get stuck trying to make a mock_block into a TaskInclude
# def test_one_include(self):
# ds = [{'include': 'other_test_include.yml'}]
# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
# block=self.mock_block,
# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
# print(res)
# def test_one_parent_include(self):
# ds = [{'include': 'test_include.yml'}]
# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
# block=self.mock_block,
# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
# print(res)
def test_one_bogus_include_role(self):
ds = [{'include_role': {'name': 'bogus_role'}}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
block=self.mock_block,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
def test_one_bogus_include_role_use_handlers(self):
ds = [{'include_role': {'name': 'bogus_role'}}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
block=self.mock_block,
variable_manager=self.mock_variable_manager,
loader=self.fake_role_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
class TestLoadListOfRoles(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
self.assertRaises(AssertionError, helpers.load_list_of_roles,
ds, self.mock_play)
def test_empty_role(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleError,
"role definitions must contain a role name",
helpers.load_list_of_roles,
ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
def test_empty_role_just_name(self):
ds = [{'name': 'bogus_role'}]
res = helpers.load_list_of_roles(ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertIsInstance(res, list)
for r in res:
self.assertIsInstance(r, RoleInclude)
def test_block_unknown_action(self):
ds = [{
'block': [{'action': 'foo_test_block_unknown_action'}]
}]
ds = [{'name': 'bogus_role'}]
res = helpers.load_list_of_roles(ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertIsInstance(res, list)
for r in res:
self.assertIsInstance(r, RoleInclude)
class TestLoadListOfBlocks(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
mock_play = MagicMock(name='MockPlay')
self.assertRaises(AssertionError, helpers.load_list_of_blocks,
ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_block(self):
ds = [{}]
mock_play = MagicMock(name='MockPlay')
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_blocks,
ds, mock_play,
parent_block=None,
role=None,
task_include=None,
use_handlers=False,
variable_manager=None,
loader=None)
def test_block_unknown_action(self):
ds = [{'action': 'foo'}]
mock_play = MagicMock(name='MockPlay')
res = helpers.load_list_of_blocks(ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None,
loader=None)
self.assertIsInstance(res, list)
for block in res:
self.assertIsInstance(block, Block)
| gpl-3.0 |
electrical/jenkins-job-builder | jenkins_jobs/modules/hipchat_notif.py | 20 | 7674 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Enable HipChat notifications of build execution.
:Parameters:
* **enabled** *(bool)*: general cut off switch. If not explicitly set to
``true``, no hipchat parameters are written to XML. For Jenkins HipChat
plugin of version prior to 0.1.5, also enables all build results to be
reported in HipChat room. For later plugin versions, explicit notify-*
setting is required (see below).
* **room** *(str)*: name of HipChat room to post messages to (default '')
.. deprecated:: 1.2.0 Please use 'rooms'.
* **rooms** *(list)*: list of HipChat rooms to post messages to
(default empty)
* **start-notify** *(bool)*: post messages about build start event
.. deprecated:: 1.2.0 use notify-start parameter instead
* **notify-start** *(bool)*: post messages about build start event
(default false)
* **notify-success** *(bool)*: post messages about successful build event
(Jenkins HipChat plugin >= 0.1.5) (default false)
* **notify-aborted** *(bool)*: post messages about aborted build event
(Jenkins HipChat plugin >= 0.1.5) (default false)
* **notify-not-built** *(bool)*: post messages about build set to NOT_BUILT
status (Jenkins HipChat plugin >= 0.1.5). This status code is used in a
multi-stage build (like maven2) where a problem in earlier stage prevented
later stages from building. (default false)
* **notify-unstable** *(bool)*: post messages about unstable build event
(Jenkins HipChat plugin >= 0.1.5) (default false)
* **notify-failure** *(bool)*: post messages about build failure event
(Jenkins HipChat plugin >= 0.1.5) (default false)
* **notify-back-to-normal** *(bool)*: post messages about build being back to
normal after being unstable or failed (Jenkins HipChat plugin >= 0.1.5)
(default false)
Example:
.. literalinclude:: /../../tests/hipchat/fixtures/hipchat001.yaml
:language: yaml
"""
# Enabling hipchat notifications on a job requires specifying the hipchat
# config in job properties, and adding the hipchat notifier to the job's
# publishers list.
# The publisher configuration contains extra details not specified per job:
# - the hipchat authorisation token.
# - the jenkins server url.
# - a default room name/id.
# This complicates matters somewhat since the sensible place to store these
# details is in the global config file.
# The global config object is therefore passed down to the registry object,
# and this object is passed to the HipChat() class initialiser.
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
import jenkins_jobs.errors
import logging
import pkg_resources
from six.moves import configparser
import sys
logger = logging.getLogger(__name__)
class HipChat(jenkins_jobs.modules.base.Base):
sequence = 80
def __init__(self, registry):
self.authToken = None
self.jenkinsUrl = None
self.registry = registry
def _load_global_data(self):
"""Load data from the global config object.
This is done lazily to avoid looking up the '[hipchat]' section
unless actually required.
"""
if(not self.authToken):
try:
self.authToken = self.registry.global_config.get(
'hipchat', 'authtoken')
# Require that the authtoken is non-null
if self.authToken == '':
raise jenkins_jobs.errors.JenkinsJobsException(
"Hipchat authtoken must not be a blank string")
except (configparser.NoSectionError,
jenkins_jobs.errors.JenkinsJobsException) as e:
logger.fatal("The configuration file needs a hipchat section" +
" containing authtoken:\n{0}".format(e))
sys.exit(1)
self.jenkinsUrl = self.registry.global_config.get('jenkins', 'url')
self.sendAs = self.registry.global_config.get('hipchat', 'send-as')
def gen_xml(self, parser, xml_parent, data):
hipchat = data.get('hipchat')
if not hipchat or not hipchat.get('enabled', True):
return
self._load_global_data()
plugin_info = self.registry.get_plugin_info("Jenkins HipChat Plugin")
version = pkg_resources.parse_version(plugin_info.get('version', '0'))
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
pdefhip = XML.SubElement(properties,
'jenkins.plugins.hipchat.'
'HipChatNotifier_-HipChatJobProperty')
room = XML.SubElement(pdefhip, 'room')
if 'rooms' in hipchat:
room.text = ",".join(hipchat['rooms'])
elif 'room' in hipchat:
logger.warn("'room' is deprecated, please use 'rooms'")
room.text = hipchat['room']
# Handle backwards compatibility 'start-notify' but all add an element
# of standardization with notify-*
if hipchat.get('start-notify'):
logger.warn("'start-notify' is deprecated, please use "
"'notify-start'")
XML.SubElement(pdefhip, 'startNotification').text = str(
hipchat.get('notify-start', hipchat.get('start-notify',
False))).lower()
if version >= pkg_resources.parse_version("0.1.5"):
XML.SubElement(pdefhip, 'notifySuccess').text = str(
hipchat.get('notify-success', False)).lower()
XML.SubElement(pdefhip, 'notifyAborted').text = str(
hipchat.get('notify-aborted', False)).lower()
XML.SubElement(pdefhip, 'notifyNotBuilt').text = str(
hipchat.get('notify-not-built', False)).lower()
XML.SubElement(pdefhip, 'notifyUnstable').text = str(
hipchat.get('notify-unstable', False)).lower()
XML.SubElement(pdefhip, 'notifyFailure').text = str(
hipchat.get('notify-failure', False)).lower()
XML.SubElement(pdefhip, 'notifyBackToNormal').text = str(
hipchat.get('notify-back-to-normal', False)).lower()
publishers = xml_parent.find('publishers')
if publishers is None:
publishers = XML.SubElement(xml_parent, 'publishers')
hippub = XML.SubElement(publishers,
'jenkins.plugins.hipchat.HipChatNotifier')
if version >= pkg_resources.parse_version("0.1.8"):
XML.SubElement(hippub, 'buildServerUrl').text = self.jenkinsUrl
XML.SubElement(hippub, 'sendAs').text = self.sendAs
else:
XML.SubElement(hippub, 'jenkinsUrl').text = self.jenkinsUrl
XML.SubElement(hippub, 'authToken').text = self.authToken
# The room specified here is the default room. The default is
# redundant in this case since a room must be specified. Leave empty.
XML.SubElement(hippub, 'room').text = ''
| apache-2.0 |
QGuLL/samba | third_party/pep8/testsuite/E50.py | 44 | 2846 | #: E501
a = '12345678901234567890123456789012345678901234567890123456789012345678901234567890'
#: E501
a = '1234567890123456789012345678901234567890123456789012345678901234567890' or \
6
#: E501
a = 7 or \
'1234567890123456789012345678901234567890123456789012345678901234567890' or \
6
#: E501 E501
a = 7 or \
'1234567890123456789012345678901234567890123456789012345678901234567890' or \
'1234567890123456789012345678901234567890123456789012345678901234567890' or \
6
#: E501
a = '1234567890123456789012345678901234567890123456789012345678901234567890' # \
#: E502
a = ('123456789012345678901234567890123456789012345678901234567890123456789' \
'01234567890')
#: E502
a = ('AAA \
BBB' \
'CCC')
#: E502
if (foo is None and bar is "e000" and \
blah == 'yeah'):
blah = 'yeahnah'
#
#: Okay
a = ('AAA'
'BBB')
a = ('AAA \
BBB'
'CCC')
a = 'AAA' \
'BBB' \
'CCC'
a = ('AAA\
BBBBBBBBB\
CCCCCCCCC\
DDDDDDDDD')
#
#: Okay
if aaa:
pass
elif bbb or \
ccc:
pass
ddd = \
ccc
('\
' + ' \
')
('''
''' + ' \
')
#: E501 E225 E226
very_long_identifiers=and_terrible_whitespace_habits(are_no_excuse+for_long_lines)
#
#: E501
'''multiline string
with a long long long long long long long long long long long long long long long long line
'''
#: E501
'''same thing, but this time without a terminal newline in the string
long long long long long long long long long long long long long long long long line'''
#
# issue 224 (unavoidable long lines in docstrings)
#: Okay
"""
I'm some great documentation. Because I'm some great documentation, I'm
going to give you a reference to some valuable information about some API
that I'm calling:
http://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
"""
#: E501
"""
longnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaces"""
#: Okay
"""
This
almost_empty_line
"""
#: E501
"""
This
almost_empty_line
"""
#: E501
# A basic comment
# with a long long long long long long long long long long long long long long long long line
#
#: Okay
# I'm some great comment. Because I'm so great, I'm going to give you a
# reference to some valuable information about some API that I'm calling:
#
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
import this
# longnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaces
#
#: Okay
# This
# almost_empty_line
#
#: E501
# This
# almost_empty_line
| gpl-3.0 |
heli522/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
SimVascular/VTK | ThirdParty/Twisted/twisted/web/soap.py | 60 | 5179 | # -*- test-case-name: twisted.web.test.test_soap -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
SOAP support for twisted.web.
Requires SOAPpy 0.10.1 or later.
Maintainer: Itamar Shtull-Trauring
Future plans:
SOAPContext support of some kind.
Pluggable method lookup policies.
"""
# SOAPpy
import SOAPpy
# twisted imports
from twisted.web import server, resource, client
from twisted.internet import defer
class SOAPPublisher(resource.Resource):
"""Publish SOAP methods.
By default, publish methods beginning with 'soap_'. If the method
has an attribute 'useKeywords', it well get the arguments passed
as keyword args.
"""
isLeaf = 1
# override to change the encoding used for responses
encoding = "UTF-8"
def lookupFunction(self, functionName):
"""Lookup published SOAP function.
Override in subclasses. Default behaviour - publish methods
starting with soap_.
@return: callable or None if not found.
"""
return getattr(self, "soap_%s" % functionName, None)
def render(self, request):
"""Handle a SOAP command."""
data = request.content.read()
p, header, body, attrs = SOAPpy.parseSOAPRPC(data, 1, 1, 1)
methodName, args, kwargs, ns = p._name, p._aslist, p._asdict, p._ns
# deal with changes in SOAPpy 0.11
if callable(args):
args = args()
if callable(kwargs):
kwargs = kwargs()
function = self.lookupFunction(methodName)
if not function:
self._methodNotFound(request, methodName)
return server.NOT_DONE_YET
else:
if hasattr(function, "useKeywords"):
keywords = {}
for k, v in kwargs.items():
keywords[str(k)] = v
d = defer.maybeDeferred(function, **keywords)
else:
d = defer.maybeDeferred(function, *args)
d.addCallback(self._gotResult, request, methodName)
d.addErrback(self._gotError, request, methodName)
return server.NOT_DONE_YET
def _methodNotFound(self, request, methodName):
response = SOAPpy.buildSOAP(SOAPpy.faultType("%s:Client" %
SOAPpy.NS.ENV_T, "Method %s not found" % methodName),
encoding=self.encoding)
self._sendResponse(request, response, status=500)
def _gotResult(self, result, request, methodName):
if not isinstance(result, SOAPpy.voidType):
result = {"Result": result}
response = SOAPpy.buildSOAP(kw={'%sResponse' % methodName: result},
encoding=self.encoding)
self._sendResponse(request, response)
def _gotError(self, failure, request, methodName):
e = failure.value
if isinstance(e, SOAPpy.faultType):
fault = e
else:
fault = SOAPpy.faultType("%s:Server" % SOAPpy.NS.ENV_T,
"Method %s failed." % methodName)
response = SOAPpy.buildSOAP(fault, encoding=self.encoding)
self._sendResponse(request, response, status=500)
def _sendResponse(self, request, response, status=200):
request.setResponseCode(status)
if self.encoding is not None:
mimeType = 'text/xml; charset="%s"' % self.encoding
else:
mimeType = "text/xml"
request.setHeader("Content-type", mimeType)
request.setHeader("Content-length", str(len(response)))
request.write(response)
request.finish()
class Proxy:
"""A Proxy for making remote SOAP calls.
Pass the URL of the remote SOAP server to the constructor.
Use proxy.callRemote('foobar', 1, 2) to call remote method
'foobar' with args 1 and 2, proxy.callRemote('foobar', x=1)
will call foobar with named argument 'x'.
"""
# at some point this should have encoding etc. kwargs
def __init__(self, url, namespace=None, header=None):
self.url = url
self.namespace = namespace
self.header = header
def _cbGotResult(self, result):
result = SOAPpy.parseSOAPRPC(result)
if hasattr(result, 'Result'):
return result.Result
elif len(result) == 1:
## SOAPpy 0.11.6 wraps the return results in a containing structure.
## This check added to make Proxy behaviour emulate SOAPProxy, which
## flattens the structure by default.
## This behaviour is OK because even singleton lists are wrapped in
## another singleton structType, which is almost always useless.
return result[0]
else:
return result
def callRemote(self, method, *args, **kwargs):
payload = SOAPpy.buildSOAP(args=args, kw=kwargs, method=method,
header=self.header, namespace=self.namespace)
return client.getPage(self.url, postdata=payload, method="POST",
headers={'content-type': 'text/xml',
'SOAPAction': method}
).addCallback(self._cbGotResult)
| bsd-3-clause |
jlegendary/orange | Orange/orng/orngProjectionPursuit.py | 6 | 7987 | import orange
import numpy
import scipy.special
import scipy.optimize
import scipy.stats
from pylab import *
def sqrtm(mat):
""" Retruns the square root of the matrix mat """
U, S, V = numpy.linalg.svd(mat)
D = numpy.diag(numpy.sqrt(S))
return numpy.dot(numpy.dot(U,D),V)
def standardize(mat):
""" Subtracts means and multiplies by diagonal elements of inverse
square root of covariance matrix.
"""
av = numpy.average(mat, axis=0)
sigma = numpy.corrcoef(mat, rowvar=0)
srSigma = sqrtm(sigma)
isrSigma = numpy.linalg.inv(srSigma)
return (mat-av) * numpy.diag(isrSigma)
def friedman_tmp_func(alpha, Z=numpy.zeros((1,1)), J=5, n=1):
alpha = numpy.array(alpha)
pols = [scipy.special.legendre(j) for j in range(0,J+1)]
vals0 = [numpy.dot(alpha.T, Z[i,:]) for i in range(n)]
def f_tmp(x): return 2*x-1
vals = map(f_tmp, map(scipy.stats.zprob, vals0))
val = [1./n*sum(map(p, vals))**2 for p in pols]
return vals, pols, - 0.5 * sum([(2*j+1)*v for j, v in enumerate(val)])
class ProjectionPursuit:
FRIEDMAN = 0
MOMENT = 1
SILHUETTE = 2
HARTINGAN = 3
def __init__(self, data, index = FRIEDMAN, dim=2, maxiter=10):
self.dim = dim
if type(data) == orange.ExampleTable:
self.dataNP = data.toNumpy()[0] # TODO: check if conversion of discrete values works ok
else:
self.dataNP = data
self.Z = standardize(self.dataNP)
self.totalSize, self.nVars = numpy.shape(self.Z)
self.maxiter = maxiter
self.currentOptimum = None
self.index = index
def optimize(self, maxiter = 5, opt_method=scipy.optimize.fmin):
func = self.getIndex()
if self.currentOptimum != None:
x = self.currentOptimum
else:
x = numpy.random.rand(self.dim * self.nVars)
alpha = opt_method(func, x, maxiter=maxiter).reshape(self.dim * self.nVars,1)
self.currentOptimum = alpha
print alpha, len(alpha)
optValue = func(alpha)
if self.dim == 2:
alpha1 = alpha[:self.nVars]
alpha2 = alpha[self.nVars:]
alpha = numpy.append(alpha1, alpha2, axis=1)
projectedData = numpy.dot(self.Z, alpha)
return alpha, optValue, projectedData
def find_optimum(self, opt_method=scipy.optimize.fmin):
func = self.getIndex()
alpha = opt_method(func, \
numpy.random.rand(self.dim * self.nVars),\
maxiter=self.maxiter).reshape(self.dim * self.nVars,1)
print alpha, len(alpha)
optValue = func(alpha)
if self.dim == 2:
alpha1 = alpha[:self.nVars]
alpha2 = alpha[self.nVars:]
alpha = numpy.append(alpha1, alpha2, axis=1)
projectedData = numpy.dot(self.Z, alpha)
return alpha, optValue, projectedData
def getIndex(self):
if self.index == self.FRIEDMAN:
return self.getFriedmanIndex()
elif self.index == self.MOMENT:
return self.getMomentIndex()
elif self.index == self.SILHUETTE:
return self.getSilhouetteBasedIndex()
elif self.index == self.HARTINGAN:
return self.getHartinganBasedIndex()
def getFriedmanIndex(self, J=5):
if self.dim == 1:
def func(alpha, Z=self.Z, J=J, n=self.totalSize):
vals, pols, val = friedman_tmp_func(alpha, Z=Z, J=J, n=n)
return val
elif self.dim == 2:
def func(alpha, Z=self.Z, J=J, n=self.totalSize):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
vals1, pols, val1 = friedman_tmp_func(alpha1, Z=Z, J=J, n=n)
vals2, pols, val2 = friedman_tmp_func(alpha2, Z=Z, J=J, n=n)
val12 = - 0.5 * sum([sum([(2*j+1)*(2*k+1)*vals1[j]*vals2[k] for k in range(0, J+1-j)]) \
for j in range(0,J+1)])
## print val1, val2
return 0.5 * (val1 + val2 + val12)
return func
def getMomentIndex(self): # lahko dodas faktor 1./12
if self.dim == 1:
def func(alpha):
smpl = numpy.dot(self.Z, alpha)
return scipy.stats.kstat(smpl, n=3) ** 2 + 0.25 * scipy.stats.kstat(smpl, n=4)
else:
print "To do."
return func
def getSilhouetteBasedIndex(self, nClusters=5):
import orngClustering
def func(alpha, nClusters=nClusters):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
alpha1 = alpha1.reshape((self.nVars,1))
alpha2 = alpha2.reshape(self.nVars,1)
alpha = numpy.append(alpha1, alpha2, axis=1)
smpl = numpy.dot(self.Z, alpha)
smpl = orange.ExampleTable(smpl)
km = orngClustering.KMeans(smpl, centroids=nClusters)
score = orngClustering.score_silhouette(km)
return -score
import functools
silhIndex = functools.partial(func, nClusters=nClusters)
return silhIndex
def getHartinganBasedIndex(self, nClusters=5):
import orngClustering
def func(alpha, nClusters=nClusters):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
alpha1 = alpha1.reshape((self.nVars,1))
alpha2 = alpha2.reshape(self.nVars,1)
alpha = numpy.append(alpha1, alpha2, axis=1)
smpl = numpy.dot(self.Z, alpha)
smpl = orange.ExampleTable(smpl)
km1 = orngClustering.KMeans(smpl, centroids=nClusters)
km2 = orngClustering.KMeans(smpl, centroids=nClusters)
score = (self.totalSize - nClusters - 1) * (km1.score-km2.score) / (km2.score)
return -score
import functools
hartinganIndex = functools.partial(func, nClusters=nClusters)
return hartinganIndex
def draw_scatter_hist(x,y, fileName="lala.png"):
from matplotlib.ticker import NullFormatter
nullfmt = NullFormatter() # no labels
clf()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
figure(1, figsize=(8,8))
axScatter = axes(rect_scatter)
axHistx = axes(rect_histx)
axHisty = axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y)
# now determine nice limits by hand:
binwidth = 0.25
xymax = numpy.max([numpy.max(np.fabs(x)), numpy.max(np.fabs(y))])
lim = (int(xymax/binwidth) + 1) * binwidth
axScatter.set_xlim( (-lim, lim) )
axScatter.set_ylim( (-lim, lim) )
bins = numpy.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
savefig(fileName)
if __name__=="__main__":
## data = orange.ExampleTable("c:\\Work\\Subgroup discovery\\iris.tab")
data = orange.ExampleTable(r"E:\Development\Orange Datasets\UCI\iris.tab")
data = data.select(data.domain.attributes)
impmin = orange.ImputerConstructor_minimal(data)
data = impmin(data)
ppy = ProjectionPursuit(data, dim=2, maxiter=100)
#ppy.friedman_index(J=5)
#ppy.silhouette_based_index(nClusters=2)
## import os
## os.chdir("C:\\Work\\Subgroup discovery")
#draw_scatter_hist(ppy.friedmanProjData[:,0], ppy.friedmanProjData[:,1])
#draw_scatter_hist(ppy.silhouetteProjData[:,0], ppy.silhouetteProjData[:,1])
print ppy.optimize()
| gpl-3.0 |
evansd/django | tests/generic_views/test_dates.py | 5 | 35337 | import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import Artist, Author, Book, BookSigning, Page
def _make_books(n, base_date):
for i in range(n):
Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100 + i,
pubdate=base_date - datetime.timedelta(days=i))
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
@override_settings(ROOT_URLCONF='generic_views.urls')
class ArchiveIndexViewTests(TestDataMixin, TestCase):
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertNotIn('latest', res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
with self.assertRaises(ImproperlyConfigured):
self.client.get('/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
def test_archive_view_custom_sorting(self):
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2007, 5, 1))
res = self.client.get('/dates/books/sortedbyname/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.order_by('name').all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_custom_sorting_dec(self):
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2007, 5, 1))
res = self.client.get('/dates/books/sortedbynamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.order_by('-name').all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
@override_settings(ROOT_URLCONF='generic_views.urls')
class YearArchiveViewTests(TestDataMixin, TestCase):
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
# Since allow_empty=False, next/prev years must be valid (#7164)
self.assertIsNone(res.context['next_year'])
self.assertEqual(res.context['previous_year'], datetime.date(2006, 1, 1))
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
# Since allow_empty=True, next/prev are allowed to be empty years (#7164)
self.assertEqual(res.context['next_year'], datetime.date(2000, 1, 1))
self.assertEqual(res.context['previous_year'], datetime.date(1998, 1, 1))
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_custom_sort_order(self):
# Zebras comes after Dreaming by name, but before on '-pubdate' which is the default sorting
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/dates/books/2006/sortedbyname/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1), datetime.date(2006, 9, 1)])
self.assertEqual(
list(res.context['book_list']),
list(Book.objects.filter(pubdate__year=2006).order_by('name'))
)
self.assertEqual(
list(res.context['object_list']),
list(Book.objects.filter(pubdate__year=2006).order_by('name'))
)
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_two_custom_sort_orders(self):
Book.objects.create(name="Zebras for Dummies", pages=300, pubdate=datetime.date(2006, 9, 1))
Book.objects.create(name="Hunting Hippos", pages=400, pubdate=datetime.date(2006, 3, 1))
res = self.client.get('/dates/books/2006/sortedbypageandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(
list(res.context['date_list']),
[datetime.date(2006, 3, 1), datetime.date(2006, 5, 1), datetime.date(2006, 9, 1)]
)
self.assertEqual(
list(res.context['book_list']),
list(Book.objects.filter(pubdate__year=2006).order_by('pages', '-name'))
)
self.assertEqual(
list(res.context['object_list']),
list(Book.objects.filter(pubdate__year=2006).order_by('pages', '-name'))
)
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(4):
self.client.get('/dates/books/2008/reverse/')
def test_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in year view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
@override_settings(ROOT_URLCONF='generic_views.urls')
class MonthArchiveViewTests(TestDataMixin, TestCase):
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertIsNone(res.context['next_month'])
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since allow_empty=True, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertIsNone(res.context['next_month'])
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0], b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertIsNone(res.context['next_month'])
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(
list(res.context['book_list']),
list(Book.objects.filter(pubdate__year=2008, pubdate__month=10))
)
self.assertEqual(
list(res.context['object_list']),
list(Book.objects.filter(pubdate__year=2008, pubdate__month=10))
)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month, day in ((9, 1), (10, 2), (11, 3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 9, 1))
def test_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in month view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/dec/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
@override_settings(ROOT_URLCONF='generic_views.urls')
class WeekArchiveViewTests(TestDataMixin, TestCase):
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
# Since allow_empty=False, next/prev weeks must be valid
self.assertIsNone(res.context['next_week'])
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_allow_empty(self):
# allow_empty = False, empty week
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['week'], datetime.date(2008, 3, 23))
# Since allow_empty=True, next/prev are allowed to be empty weeks
self.assertEqual(res.context['next_week'], datetime.date(2008, 3, 30))
self.assertEqual(res.context['previous_week'], datetime.date(2008, 3, 16))
# allow_empty but not allow_future: next_week should be empty
url = datetime.date.today().strftime('/dates/books/%Y/week/%U/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertIsNone(res.context['next_week'])
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['week'], future_sunday)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty weeks
self.assertIsNone(res.context['next_week'])
self.assertEqual(res.context['previous_week'], datetime.date(2008, 9, 28))
# allow_future, but not allow_empty, with a current week. So next
# should be in the future
res = self.client.get('/dates/books/2008/week/39/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], future_sunday)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(
list(res.context['book_list']),
list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end))
)
self.assertEqual(
list(res.context['object_list']),
list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end))
)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
def test_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(ROOT_URLCONF='generic_views.urls')
class DayArchiveViewTests(TestDataMixin, TestCase):
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertIsNone(res.context['next_day'])
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertIsNone(res.context['next_day'])
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertIsNone(res.context['next_day'])
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
# allow_future for yesterday, next_day is today (#17192)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
res = self.client.get('/dates/books/%s/allow_empty_and_future/'
% yesterday.strftime('%Y/%b/%d').lower())
self.assertEqual(res.context['next_day'], today)
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(
list(res.context['book_list']),
list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1))
)
self.assertEqual(
list(res.context['object_list']),
list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1))
)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, b"Archive for Oct. 1, 2008. Previous day is May 1, 2006\n")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
def test_datetime_day_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_day_view(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 404)
@override_settings(ROOT_URLCONF='generic_views.urls')
class DateDetailViewTests(TestDataMixin, TestCase):
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/%s/' % self.book1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.book1)
self.assertEqual(res.context['book'], self.book1)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/%s/' % self.book1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], self.book1)
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_year_out_of_range(self):
urls = [
'/dates/books/9999/',
'/dates/books/9999/12/',
'/dates/books/9999/week/52/',
]
for url in urls:
with self.subTest(url=url):
res = self.client.get(url)
self.assertEqual(res.status_code, 404)
self.assertEqual(res.context['exception'], 'Date out of range')
def test_invalid_url(self):
with self.assertRaises(AttributeError):
self.client.get("/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_queryset(self):
"""
Custom querysets are used when provided to
BaseDateDetailView.get_object().
"""
res = self.client.get(
'/dates/books/get_object_custom_queryset/2006/may/01/%s/' % self.book2.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.book2)
self.assertEqual(res.context['book'], self.book2)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_queryset/2008/oct/01/9999999/')
self.assertEqual(res.status_code, 404)
def test_get_object_custom_queryset_numqueries(self):
with self.assertNumQueries(1):
self.client.get('/dates/books/get_object_custom_queryset/2006/may/01/2/')
def test_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 404)
| bsd-3-clause |
shlee322/opencampus | cron/lecturerecommend.py | 2 | 2815 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 opencampus.kr
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from opencampus.common.db import db
from opencampus.common.models import Campus
from opencampus.module.lecture.models import Lecture, LectureAnalogue
def load_student_ids(campus_id):
page = 0
student_ids = []
while True:
lectures = Lecture.objects(campus_id=campus_id).skip(20*page).limit(20)
if len(lectures) < 1:
break
print('load page %s' % page)
for lecture in lectures:
for student in lecture.students:
student_ids.append(student)
page += 1
student_ids = list(set(student_ids))
student_ids.sort(reverse=True)
return student_ids
def check_data(campus_id, student_id1, lectures1, student_id2, lectures2):
if student_id1 == student_id2:
return
lectures1 = set([l.subject_code for l in lectures1])
lectures2 = set([l.subject_code for l in lectures2])
analogue = len(lectures1 & lectures2)/len(lectures1 | lectures2)
if analogue >= 1.0:
return
try:
analogue_obj = LectureAnalogue.objects(
campus_id=campus_id,
student_id=student_id1
).get()
if analogue_obj.point >= analogue:
return
except:
analogue_obj = LectureAnalogue(
campus_id=campus_id,
student_id=student_id1
)
analogue_obj.point = analogue
analogue_obj.target = student_id2
analogue_obj.save()
print('%s-%s %s' % (student_id1, student_id2, analogue))
campus_page = 0
while True:
campuses = Campus.objects().skip(20*campus_page).limit(20)
if len(campuses) < 1:
break
campus_page += 1
for campus in campuses:
campus_student_ids = load_student_ids(campus.id)
for student_id1 in campus_student_ids:
lecture1 = Lecture.objects(students__in=[student_id1], campus_id=campus.id)
for student_id2 in campus_student_ids:
lecture2 = Lecture.objects(students__in=[student_id2], campus_id=campus.id)
check_data(campus.id, student_id1, lecture1, student_id2, lecture2)
| agpl-3.0 |
gylian/sickbeard | lib/requests/packages/chardet2/euctwfreq.py | 323 | 34864 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = ( \
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
| gpl-3.0 |
tomtor/QGIS | python/plugins/processing/algs/gdal/GdalAlgorithmProvider.py | 14 | 7128 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GdalAlgorithmProvider.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from osgeo import gdal
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsApplication,
QgsProcessingProvider,
QgsRuntimeProfiler)
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from .GdalUtils import GdalUtils
from .AssignProjection import AssignProjection
from .aspect import aspect
from .buildvrt import buildvrt
from .ClipRasterByExtent import ClipRasterByExtent
from .ClipRasterByMask import ClipRasterByMask
from .ColorRelief import ColorRelief
from .contour import contour, contour_polygon
from .Datasources2Vrt import Datasources2Vrt
from .fillnodata import fillnodata
from .gdalinfo import gdalinfo
from .gdal2tiles import gdal2tiles
from .gdal2xyz import gdal2xyz
from .gdaladdo import gdaladdo
from .gdalcalc import gdalcalc
from .gdaltindex import gdaltindex
from .GridAverage import GridAverage
from .GridDataMetrics import GridDataMetrics
from .GridInverseDistance import GridInverseDistance
from .GridInverseDistanceNearestNeighbor import GridInverseDistanceNearestNeighbor
from .GridLinear import GridLinear
from .GridNearestNeighbor import GridNearestNeighbor
from .hillshade import hillshade
from .merge import merge
from .nearblack import nearblack
from .pct2rgb import pct2rgb
from .polygonize import polygonize
from .proximity import proximity
from .rasterize import rasterize
from .rearrange_bands import rearrange_bands
from .retile import retile
from .rgb2pct import rgb2pct
from .roughness import roughness
from .sieve import sieve
from .slope import slope
from .translate import translate
from .tpi import tpi
from .tri import tri
from .warp import warp
from .pansharp import pansharp
from .rasterize_over_fixed_value import rasterize_over_fixed_value
from .viewshed import viewshed
from .extractprojection import ExtractProjection
from .rasterize_over import rasterize_over
from .Buffer import Buffer
from .ClipVectorByExtent import ClipVectorByExtent
from .ClipVectorByMask import ClipVectorByMask
from .Dissolve import Dissolve
from .ExecuteSql import ExecuteSql
from .OffsetCurve import OffsetCurve
from .ogr2ogr import ogr2ogr
from .ogrinfo import ogrinfo
from .OgrToPostGis import OgrToPostGis
from .ogr2ogrtopostgislist import Ogr2OgrToPostGisList
from .OneSideBuffer import OneSideBuffer
from .PointsAlongLines import PointsAlongLines
# from .ogr2ogrtabletopostgislist import Ogr2OgrTableToPostGisList
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class GdalAlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
self.algs = []
QgsApplication.processingRegistry().addAlgorithmAlias('qgis:buildvirtualvector', 'gdal:buildvirtualvector')
def load(self):
with QgsRuntimeProfiler.profile('GDAL Provider'):
ProcessingConfig.settingIcons[self.name()] = self.icon()
ProcessingConfig.addSetting(Setting(self.name(), 'ACTIVATE_GDAL',
self.tr('Activate'), True))
ProcessingConfig.readSettings()
self.refreshAlgorithms()
return True
def unload(self):
ProcessingConfig.removeSetting('ACTIVATE_GDAL')
def isActive(self):
return ProcessingConfig.getSetting('ACTIVATE_GDAL')
def setActive(self, active):
ProcessingConfig.setSettingValue('ACTIVATE_GDAL', active)
def name(self):
return 'GDAL'
def longName(self):
version = GdalUtils.readableVersion()
return 'GDAL ({})'.format(version)
def id(self):
return 'gdal'
def helpId(self):
return 'gdal'
def icon(self):
return QgsApplication.getThemeIcon("/providerGdal.svg")
def svgIconPath(self):
return QgsApplication.iconPath("providerGdal.svg")
def loadAlgorithms(self):
self.algs = [
AssignProjection(),
aspect(),
buildvrt(),
ClipRasterByExtent(),
ClipRasterByMask(),
ColorRelief(),
contour(),
contour_polygon(),
Datasources2Vrt(),
fillnodata(),
gdalinfo(),
gdal2tiles(),
gdal2xyz(),
gdaladdo(),
gdalcalc(),
gdaltindex(),
GridAverage(),
GridDataMetrics(),
GridInverseDistance(),
GridInverseDistanceNearestNeighbor(),
GridLinear(),
GridNearestNeighbor(),
hillshade(),
merge(),
nearblack(),
pct2rgb(),
polygonize(),
proximity(),
rasterize(),
rearrange_bands(),
retile(),
rgb2pct(),
roughness(),
sieve(),
slope(),
translate(),
tpi(),
tri(),
warp(),
pansharp(),
# rasterize(),
ExtractProjection(),
rasterize_over(),
rasterize_over_fixed_value(),
# ----- OGR tools -----
Buffer(),
ClipVectorByExtent(),
ClipVectorByMask(),
Dissolve(),
ExecuteSql(),
OffsetCurve(),
ogr2ogr(),
ogrinfo(),
OgrToPostGis(),
Ogr2OgrToPostGisList(),
OneSideBuffer(),
PointsAlongLines(),
# Ogr2OgrTableToPostGisList(),
]
if int(gdal.VersionInfo()) > 3010000:
self.algs.append(viewshed())
for a in self.algs:
self.addAlgorithm(a)
def supportedOutputRasterLayerExtensions(self):
return GdalUtils.getSupportedRasterExtensions()
def supportsNonFileBasedOutput(self):
"""
GDAL Provider doesn't support non file based outputs
"""
return False
def tr(self, string, context=''):
if context == '':
context = 'GdalAlgorithmProvider'
return QCoreApplication.translate(context, string)
| gpl-2.0 |
xsynergy510x/android_external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/inspector_timeline_unittest.py | 47 | 1636 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
from telemetry.core.backends.chrome import inspector_timeline
from telemetry.unittest import tab_test_case
class InspectorTimelineTabTest(tab_test_case.TabTestCase):
"""Test case that opens a browser and creates and then checks an event."""
def _WaitForAnimationFrame(self):
"""Wait until the variable window.done is set on the tab."""
def _IsDone():
return bool(self._tab.EvaluateJavaScript('window.done'))
util.WaitFor(_IsDone, 5)
def testGotTimeline(self):
# While the timeline is recording, call window.webkitRequestAnimationFrame.
# This will create a FireAnimationEvent, which can be checked below. See:
# https://developer.mozilla.org/en/docs/Web/API/window.requestAnimationFrame
with inspector_timeline.InspectorTimeline.Recorder(self._tab):
self._tab.ExecuteJavaScript(
"""
var done = false;
function sleep(ms) {
var endTime = (new Date().getTime()) + ms;
while ((new Date().getTime()) < endTime);
}
window.webkitRequestAnimationFrame(function() {
sleep(10);
window.done = true;
});
""")
self._WaitForAnimationFrame()
# There should be at least a FireAnimationFrame record with some duration.
events = self._tab.timeline_model.GetAllEventsOfName('FireAnimationFrame')
self.assertTrue(len(events) > 0)
self.assertTrue(events[0].duration > 0)
| bsd-3-clause |
lidiamcfreitas/FenixScheduleMaker | ScheduleMaker/brython/www/src/Lib/test/test_ipaddress.py | 29 | 72749 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""Unittest for ipaddress module."""
import unittest
import re
import contextlib
import operator
import ipaddress
class BaseTestCase(unittest.TestCase):
# One big change in ipaddress over the original ipaddr module is
# error reporting that tries to assume users *don't know the rules*
# for what constitutes an RFC compliant IP address
# Ensuring these errors are emitted correctly in all relevant cases
# meant moving to a more systematic test structure that allows the
# test structure to map more directly to the module structure
# Note that if the constructors are refactored so that addresses with
# multiple problems get classified differently, that's OK - just
# move the affected examples to the newly appropriate test case.
# There is some duplication between the original relatively ad hoc
# test suite and the new systematic tests. While some redundancy in
# testing is considered preferable to accidentally deleting a valid
# test, the original test suite will likely be reduced over time as
# redundant tests are identified.
@property
def factory(self):
raise NotImplementedError
@contextlib.contextmanager
def assertCleanError(self, exc_type, details, *args):
"""
Ensure exception does not display a context by default
Wraps unittest.TestCase.assertRaisesRegex
"""
if args:
details = details % args
cm = self.assertRaisesRegex(exc_type, details)
with cm as exc:
yield exc
# Ensure we produce clean tracebacks on failure
if exc.exception.__context__ is not None:
self.assertTrue(exc.exception.__suppress_context__)
def assertAddressError(self, details, *args):
"""Ensure a clean AddressValueError"""
return self.assertCleanError(ipaddress.AddressValueError,
details, *args)
def assertNetmaskError(self, details, *args):
"""Ensure a clean NetmaskValueError"""
return self.assertCleanError(ipaddress.NetmaskValueError,
details, *args)
def assertInstancesEqual(self, lhs, rhs):
"""Check constructor arguments produce equivalent instances"""
self.assertEqual(self.factory(lhs), self.factory(rhs))
class CommonTestMixin:
def test_empty_address(self):
with self.assertAddressError("Address cannot be empty"):
self.factory("")
def test_floats_rejected(self):
with self.assertAddressError(re.escape(repr("1.0"))):
self.factory(1.0)
def test_not_an_index_issue15559(self):
# Implementing __index__ makes for a very nasty interaction with the
# bytes constructor. Thus, we disallow implicit use as an integer
self.assertRaises(TypeError, operator.index, self.factory(1))
self.assertRaises(TypeError, hex, self.factory(1))
self.assertRaises(TypeError, bytes, self.factory(1))
class CommonTestMixin_v4(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("000.000.000.000", "0.0.0.0")
self.assertInstancesEqual("192.168.000.001", "192.168.0.1")
def test_int(self):
self.assertInstancesEqual(0, "0.0.0.0")
self.assertInstancesEqual(3232235521, "192.168.0.1")
def test_packed(self):
self.assertInstancesEqual(bytes.fromhex("00000000"), "0.0.0.0")
self.assertInstancesEqual(bytes.fromhex("c0a80001"), "192.168.0.1")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**32) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % 2**32)):
self.factory(2**32)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 4) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
assertBadLength(3)
assertBadLength(5)
class CommonTestMixin_v6(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("0000::0000", "::")
self.assertInstancesEqual("000::c0a8:0001", "::c0a8:1")
def test_int(self):
self.assertInstancesEqual(0, "::")
self.assertInstancesEqual(3232235521, "::c0a8:1")
def test_packed(self):
addr = bytes(12) + bytes.fromhex("00000000")
self.assertInstancesEqual(addr, "::")
addr = bytes(12) + bytes.fromhex("c0a80001")
self.assertInstancesEqual(addr, "::c0a8:1")
addr = bytes.fromhex("c0a80001") + bytes(12)
self.assertInstancesEqual(addr, "c0a8:1::")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**128) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % 2**128)):
self.factory(2**128)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 16) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
self.factory(addr)
assertBadLength(15)
assertBadLength(17)
class AddressTestCase_v4(BaseTestCase, CommonTestMixin_v4):
factory = ipaddress.IPv4Address
def test_network_passed_as_address(self):
addr = "127.0.0.1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv4Address(addr)
def test_bad_address_split(self):
def assertBadSplit(addr):
with self.assertAddressError("Expected 4 octets in %r", addr):
ipaddress.IPv4Address(addr)
assertBadSplit("127.0.1")
assertBadSplit("42.42.42.42.42")
assertBadSplit("42.42.42")
assertBadSplit("42.42")
assertBadSplit("42")
assertBadSplit("42..42.42.42")
assertBadSplit("42.42.42.42.")
assertBadSplit("42.42.42.42...")
assertBadSplit(".42.42.42.42")
assertBadSplit("...42.42.42.42")
assertBadSplit("016.016.016")
assertBadSplit("016.016")
assertBadSplit("016")
assertBadSplit("000")
assertBadSplit("0x0a.0x0a.0x0a")
assertBadSplit("0x0a.0x0a")
assertBadSplit("0x0a")
assertBadSplit(".")
assertBadSplit("bogus")
assertBadSplit("bogus.com")
assertBadSplit("1000")
assertBadSplit("1000000000000000")
assertBadSplit("192.168.0.1.com")
def test_empty_octet(self):
def assertBadOctet(addr):
with self.assertAddressError("Empty octet not permitted in %r",
addr):
ipaddress.IPv4Address(addr)
assertBadOctet("42..42.42")
assertBadOctet("...")
def test_invalid_characters(self):
def assertBadOctet(addr, octet):
msg = "Only decimal digits permitted in %r in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("0x0a.0x0a.0x0a.0x0a", "0x0a")
assertBadOctet("0xa.0x0a.0x0a.0x0a", "0xa")
assertBadOctet("42.42.42.-0", "-0")
assertBadOctet("42.42.42.+0", "+0")
assertBadOctet("42.42.42.-42", "-42")
assertBadOctet("+1.+2.+3.4", "+1")
assertBadOctet("1.2.3.4e0", "4e0")
assertBadOctet("1.2.3.4::", "4::")
assertBadOctet("1.a.2.3", "a")
def test_octal_decimal_ambiguity(self):
def assertBadOctet(addr, octet):
msg = "Ambiguous (octal/decimal) value in %r not permitted in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("016.016.016.016", "016")
assertBadOctet("001.000.008.016", "008")
def test_octet_length(self):
def assertBadOctet(addr, octet):
msg = "At most 3 characters permitted in %r in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("0000.000.000.000", "0000")
assertBadOctet("12345.67899.-54321.-98765", "12345")
def test_octet_limit(self):
def assertBadOctet(addr, octet):
msg = "Octet %d (> 255) not permitted in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("257.0.0.0", 257)
assertBadOctet("192.168.0.999", 999)
class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
factory = ipaddress.IPv6Address
def test_network_passed_as_address(self):
addr = "::1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv6Address(addr)
def test_bad_address_split_v6_not_enough_parts(self):
def assertBadSplit(addr):
msg = "At least 3 parts expected in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":")
assertBadSplit(":1")
assertBadSplit("FEDC:9878")
def test_bad_address_split_v6_too_many_colons(self):
def assertBadSplit(addr):
msg = "At most 8 colons permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("9:8:7:6:5:4:3::2:1")
assertBadSplit("10:9:8:7:6:5:4:3:2:1")
assertBadSplit("::8:7:6:5:4:3:2:1")
assertBadSplit("8:7:6:5:4:3:2:1::")
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe:0:0:0:0:0:0:0:1")
assertBadSplit("9:8:7:6:5:4:3:2:1")
assertBadSplit("7:6:5:4:3:2:1")
# A trailing IPv4 address is two parts
assertBadSplit("9:8:7:6:5:4:3:42.42.42.42")
assertBadSplit("7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts_with_double_colon(self):
def assertBadSplit(addr):
msg = "Expected at most 7 other parts with '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("1:2:3:4::5:6:7:8")
def test_bad_address_split_v6_repeated_double_colon(self):
def assertBadSplit(addr):
msg = "At most one '::' permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe::1::1")
assertBadSplit("1::2::3::4:5")
assertBadSplit("2001::db:::1")
assertBadSplit("3ffe::1::")
assertBadSplit("::3ffe::1")
assertBadSplit(":3ffe::1::1")
assertBadSplit("3ffe::1::1:")
assertBadSplit(":3ffe::1::1:")
assertBadSplit(":::")
assertBadSplit('2001:db8:::1')
def test_bad_address_split_v6_leading_colon(self):
def assertBadSplit(addr):
msg = "Leading ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":2001:db8::1")
assertBadSplit(":1:2:3:4:5:6:7")
assertBadSplit(":1:2:3:4:5:6:")
assertBadSplit(":6:5:4:3:2:1::")
def test_bad_address_split_v6_trailing_colon(self):
def assertBadSplit(addr):
msg = "Trailing ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("2001:db8::1:")
assertBadSplit("1:2:3:4:5:6:7:")
assertBadSplit("::1.2.3.4:")
assertBadSplit("::7:6:5:4:3:2:")
def test_bad_v4_part_in(self):
def assertBadAddressPart(addr, v4_error):
with self.assertAddressError("%s in %r", v4_error, addr):
ipaddress.IPv6Address(addr)
assertBadAddressPart("3ffe::1.net", "Expected 4 octets in '1.net'")
assertBadAddressPart("3ffe::127.0.1",
"Expected 4 octets in '127.0.1'")
assertBadAddressPart("::1.2.3",
"Expected 4 octets in '1.2.3'")
assertBadAddressPart("::1.2.3.4.5",
"Expected 4 octets in '1.2.3.4.5'")
assertBadAddressPart("3ffe::1.1.1.net",
"Only decimal digits permitted in 'net' "
"in '1.1.1.net'")
def test_invalid_characters(self):
def assertBadPart(addr, part):
msg = "Only hex digits permitted in %r in %r" % (part, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv6Address(addr)
assertBadPart("3ffe::goog", "goog")
assertBadPart("3ffe::-0", "-0")
assertBadPart("3ffe::+0", "+0")
assertBadPart("3ffe::-1", "-1")
assertBadPart("1.2.3.4::", "1.2.3.4")
assertBadPart('1234:axy::b', "axy")
def test_part_length(self):
def assertBadPart(addr, part):
msg = "At most 4 characters permitted in %r in %r"
with self.assertAddressError(msg, part, addr):
ipaddress.IPv6Address(addr)
assertBadPart("::00000", "00000")
assertBadPart("3ffe::10000", "10000")
assertBadPart("02001:db8::", "02001")
assertBadPart('2001:888888::1', "888888")
class NetmaskTestMixin_v4(CommonTestMixin_v4):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "1.2.3.4/32/24"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("bogus", "Expected 4 octets")
assertBadAddress("google.com", "Expected 4 octets")
assertBadAddress("10/8", "Expected 4 octets")
assertBadAddress("::1.2.3.4", "Only decimal digits")
assertBadAddress("1.2.3.256", re.escape("256 (> 255)"))
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask"
with self.assertNetmaskError(msg % netmask):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("1.2.3.4", "")
assertBadNetmask("1.2.3.4", "33")
assertBadNetmask("1.2.3.4", "254.254.255.256")
assertBadNetmask("1.1.1.1", "254.xyz.2.3")
assertBadNetmask("1.1.1.1", "240.255.0.0")
assertBadNetmask("1.1.1.1", "pudding")
class InterfaceTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Interface
class NetworkTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Network
class NetmaskTestMixin_v6(CommonTestMixin_v6):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "cafe:cafe::/128/190"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("google.com", "At least 3 parts")
assertBadAddress("1.2.3.4", "At least 3 parts")
assertBadAddress("10/8", "At least 3 parts")
assertBadAddress("1234:axy::b", "Only hex digits")
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask"
with self.assertNetmaskError(msg % netmask):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("::1", "")
assertBadNetmask("::1", "::1")
assertBadNetmask("::1", "1::")
assertBadNetmask("::1", "129")
assertBadNetmask("::1", "pudding")
class InterfaceTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Interface
class NetworkTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Network
class FactoryFunctionErrors(BaseTestCase):
def assertFactoryError(self, factory, kind):
"""Ensure a clean ValueError with the expected message"""
addr = "camelot"
msg = '%r does not appear to be an IPv4 or IPv6 %s'
with self.assertCleanError(ValueError, msg, addr, kind):
factory(addr)
def test_ip_address(self):
self.assertFactoryError(ipaddress.ip_address, "address")
def test_ip_interface(self):
self.assertFactoryError(ipaddress.ip_interface, "interface")
def test_ip_network(self):
self.assertFactoryError(ipaddress.ip_network, "network")
class ComparisonTests(unittest.TestCase):
v4addr = ipaddress.IPv4Address(1)
v4net = ipaddress.IPv4Network(1)
v4intf = ipaddress.IPv4Interface(1)
v6addr = ipaddress.IPv6Address(1)
v6net = ipaddress.IPv6Network(1)
v6intf = ipaddress.IPv6Interface(1)
v4_addresses = [v4addr, v4intf]
v4_objects = v4_addresses + [v4net]
v6_addresses = [v6addr, v6intf]
v6_objects = v6_addresses + [v6net]
objects = v4_objects + v6_objects
def test_foreign_type_equality(self):
# __eq__ should never raise TypeError directly
other = object()
for obj in self.objects:
self.assertNotEqual(obj, other)
self.assertFalse(obj == other)
self.assertEqual(obj.__eq__(other), NotImplemented)
self.assertEqual(obj.__ne__(other), NotImplemented)
def test_mixed_type_equality(self):
# Ensure none of the internal objects accidentally
# expose the right set of attributes to become "equal"
for lhs in self.objects:
for rhs in self.objects:
if lhs is rhs:
continue
self.assertNotEqual(lhs, rhs)
def test_containment(self):
for obj in self.v4_addresses:
self.assertIn(obj, self.v4net)
for obj in self.v6_addresses:
self.assertIn(obj, self.v6net)
for obj in self.v4_objects + [self.v6net]:
self.assertNotIn(obj, self.v6net)
for obj in self.v6_objects + [self.v4net]:
self.assertNotIn(obj, self.v4net)
def test_mixed_type_ordering(self):
for lhs in self.objects:
for rhs in self.objects:
if isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs)):
continue
self.assertRaises(TypeError, lambda: lhs < rhs)
self.assertRaises(TypeError, lambda: lhs > rhs)
self.assertRaises(TypeError, lambda: lhs <= rhs)
self.assertRaises(TypeError, lambda: lhs >= rhs)
def test_mixed_type_key(self):
# with get_mixed_type_key, you can sort addresses and network.
v4_ordered = [self.v4addr, self.v4net, self.v4intf]
v6_ordered = [self.v6addr, self.v6net, self.v6intf]
self.assertEqual(v4_ordered,
sorted(self.v4_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v6_ordered,
sorted(self.v6_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v4_ordered + v6_ordered,
sorted(self.objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(NotImplemented, ipaddress.get_mixed_type_key(object))
def test_incompatible_versions(self):
# These should always raise TypeError
v4addr = ipaddress.ip_address('1.1.1.1')
v4net = ipaddress.ip_network('1.1.1.1')
v6addr = ipaddress.ip_address('::1')
v6net = ipaddress.ip_address('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4_address = ipaddress.IPv4Address('1.2.3.4')
self.ipv4_interface = ipaddress.IPv4Interface('1.2.3.4/24')
self.ipv4_network = ipaddress.IPv4Network('1.2.3.0/24')
#self.ipv4_hostmask = ipaddress.IPv4Interface('10.0.0.1/0.255.255.255')
self.ipv6_address = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1')
self.ipv6_interface = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1/64')
self.ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/64')
def testRepr(self):
self.assertEqual("IPv4Interface('1.2.3.4/32')",
repr(ipaddress.IPv4Interface('1.2.3.4')))
self.assertEqual("IPv6Interface('::1/128')",
repr(ipaddress.IPv6Interface('::1')))
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
ipaddress.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') - 256,
ipaddress.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddress.IPv6Address('::1') + (2**16 - 2),
ipaddress.IPv6Address('::ffff'))
self.assertEqual(ipaddress.IPv6Address('::ffff') - (2**16 - 2),
ipaddress.IPv6Address('::1'))
def testInvalidIntToBytes(self):
self.assertRaises(ValueError, ipaddress.v4_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v4_int_to_packed,
2 ** ipaddress.IPV4LENGTH)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed,
2 ** ipaddress.IPV6LENGTH)
def testInternals(self):
first, last = ipaddress._find_address_range([
ipaddress.IPv4Address('10.10.10.10'),
ipaddress.IPv4Address('10.10.10.12')])
self.assertEqual(first, last)
self.assertEqual(128, ipaddress._count_righthand_zero_bits(0, 128))
self.assertEqual("IPv4Network('1.2.3.0/24')", repr(self.ipv4_network))
def testMissingAddressVersion(self):
class Broken(ipaddress._BaseAddress):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingNetworkVersion(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingAddressClass(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*address"):
broken._address_class
def testGetNetwork(self):
self.assertEqual(int(self.ipv4_network.network_address), 16909056)
self.assertEqual(str(self.ipv4_network.network_address), '1.2.3.0')
self.assertEqual(int(self.ipv6_network.network_address),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6_network.network_address),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6_network.hostmask),
'::ffff:ffff:ffff:ffff')
def testIpFromInt(self):
self.assertEqual(self.ipv4_interface._ip,
ipaddress.IPv4Interface(16909060)._ip)
ipv4 = ipaddress.ip_network('1.2.3.4')
ipv6 = ipaddress.ip_network('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddress.ip_network(int(ipv4.network_address)))
self.assertEqual(ipv6, ipaddress.ip_network(int(ipv6.network_address)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6_interface._ip,
ipaddress.IPv6Interface(v6_int)._ip)
self.assertEqual(ipaddress.ip_network(self.ipv4_address._ip).version,
4)
self.assertEqual(ipaddress.ip_network(self.ipv6_address._ip).version,
6)
def testIpFromPacked(self):
address = ipaddress.ip_address
self.assertEqual(self.ipv4_interface._ip,
ipaddress.ip_interface(b'\x01\x02\x03\x04')._ip)
self.assertEqual(address('255.254.253.252'),
address(b'\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6_interface.ip,
ipaddress.ip_interface(
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01').ip)
self.assertEqual(address('ffff:2:3:4:ffff::'),
address(b'\xff\xff\x00\x02\x00\x03\x00\x04' +
b'\xff\xff' + b'\x00' * 6))
self.assertEqual(address('::'),
address(b'\x00' * 16))
def testGetIp(self):
self.assertEqual(int(self.ipv4_interface.ip), 16909060)
self.assertEqual(str(self.ipv4_interface.ip), '1.2.3.4')
self.assertEqual(int(self.ipv6_interface.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6_interface.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4_network.netmask), 4294967040)
self.assertEqual(str(self.ipv4_network.netmask), '255.255.255.0')
self.assertEqual(int(self.ipv6_network.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6_network.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddress.IPv4Interface('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.network.netmask), 0)
self.assertTrue(ipv4_zero_netmask.network._is_valid_netmask(
str(0)))
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0'))
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0.0.0.0'))
self.assertFalse(ipv4_zero_netmask._is_valid_netmask('invalid'))
ipv6_zero_netmask = ipaddress.IPv6Interface('::1/0')
self.assertEqual(int(ipv6_zero_netmask.network.netmask), 0)
self.assertTrue(ipv6_zero_netmask.network._is_valid_netmask(
str(0)))
def testIPv4NetAndHostmasks(self):
net = self.ipv4_network
self.assertFalse(net._is_valid_netmask('invalid'))
self.assertTrue(net._is_valid_netmask('128.128.128.128'))
self.assertFalse(net._is_valid_netmask('128.128.128.127'))
self.assertFalse(net._is_valid_netmask('128.128.128.255'))
self.assertTrue(net._is_valid_netmask('255.128.128.128'))
self.assertFalse(net._is_hostmask('invalid'))
self.assertTrue(net._is_hostmask('128.255.255.255'))
self.assertFalse(net._is_hostmask('255.255.255.255'))
self.assertFalse(net._is_hostmask('1.2.3.4'))
net = ipaddress.IPv4Network('127.0.0.0/0.0.0.255')
self.assertEqual(24, net.prefixlen)
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4_network.broadcast_address), 16909311)
self.assertEqual(str(self.ipv4_network.broadcast_address), '1.2.3.255')
self.assertEqual(int(self.ipv6_network.broadcast_address),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6_network.broadcast_address),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4_interface.network.prefixlen, 24)
self.assertEqual(self.ipv6_interface.network.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4_network.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4_network.supernet().network_address),
'1.2.2.0')
self.assertEqual(
ipaddress.IPv4Interface('0.0.0.0/0').network.supernet(),
ipaddress.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6_network.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6_network.supernet().network_address),
'2001:658:22a:cafe::')
self.assertEqual(ipaddress.IPv6Interface('::0/0').network.supernet(),
ipaddress.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4_network.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4_network.supernet(3).network_address),
'1.2.0.0')
self.assertEqual(self.ipv6_network.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6_network.supernet(3).network_address),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv4_network.supernet,
new_prefix=25)
self.assertEqual(self.ipv4_network.supernet(prefixlen_diff=2),
self.ipv4_network.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv6_network.supernet,
new_prefix=65)
self.assertEqual(self.ipv6_network.supernet(prefixlen_diff=2),
self.ipv6_network.supernet(new_prefix=62))
def testHosts(self):
hosts = list(self.ipv4_network.hosts())
self.assertEqual(254, len(hosts))
self.assertEqual(ipaddress.IPv4Address('1.2.3.1'), hosts[0])
self.assertEqual(ipaddress.IPv4Address('1.2.3.254'), hosts[-1])
# special case where only 1 bit is left for address
self.assertEqual([ipaddress.IPv4Address('2.0.0.0'),
ipaddress.IPv4Address('2.0.0.1')],
list(ipaddress.ip_network('2.0.0.0/31').hosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)),
sorted(self.ipv4_network.subnets(new_prefix=27)))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(new_prefix=23))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(prefixlen_diff=3,
new_prefix=27))
self.assertEqual(sorted(self.ipv6_network.subnets(prefixlen_diff=4)),
sorted(self.ipv6_network.subnets(new_prefix=68)))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(new_prefix=63))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(prefixlen_diff=4,
new_prefix=68))
def testGetSubnets(self):
self.assertEqual(list(self.ipv4_network.subnets())[0].prefixlen, 25)
self.assertEqual(str(list(
self.ipv4_network.subnets())[0].network_address),
'1.2.3.0')
self.assertEqual(str(list(
self.ipv4_network.subnets())[1].network_address),
'1.2.3.128')
self.assertEqual(list(self.ipv6_network.subnets())[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddress.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddress.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4_network.subnets(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6_network.subnets(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(65))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(65))
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError,
self.ipv4_interface.network.supernet, 25)
self.assertRaises(ValueError,
self.ipv6_interface.network.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(-1))
def testGetNum_Addresses(self):
self.assertEqual(self.ipv4_network.num_addresses, 256)
self.assertEqual(list(self.ipv4_network.subnets())[0].num_addresses,
128)
self.assertEqual(self.ipv4_network.supernet().num_addresses, 512)
self.assertEqual(self.ipv6_network.num_addresses, 18446744073709551616)
self.assertEqual(list(self.ipv6_network.subnets())[0].num_addresses,
9223372036854775808)
self.assertEqual(self.ipv6_network.supernet().num_addresses,
36893488147419103232)
def testContains(self):
self.assertTrue(ipaddress.IPv4Interface('1.2.3.128/25') in
self.ipv4_network)
self.assertFalse(ipaddress.IPv4Interface('1.2.4.1/24') in
self.ipv4_network)
# We can test addresses and string as well.
addr1 = ipaddress.IPv4Address('1.2.3.37')
self.assertTrue(addr1 in self.ipv4_network)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddress.IPv4Network('1.1.0.0/16').__contains__(
ipaddress.IPv4Network('1.0.0.0/15')))
def testNth(self):
self.assertEqual(str(self.ipv4_network[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4_network.__getitem__, 256)
self.assertEqual(str(self.ipv6_network[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddress.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertFalse(self.ipv4_interface == '')
self.assertFalse(self.ipv4_interface == [])
self.assertFalse(self.ipv4_interface == 2)
self.assertTrue(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv6_interface == '')
self.assertFalse(self.ipv6_interface == [])
self.assertFalse(self.ipv6_interface == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertTrue(self.ipv4_interface != '')
self.assertTrue(self.ipv4_interface != [])
self.assertTrue(self.ipv4_interface != 2)
self.assertTrue(self.ipv4_address !=
ipaddress.IPv4Address('1.2.3.5'))
self.assertTrue(self.ipv4_address != '')
self.assertTrue(self.ipv4_address != [])
self.assertTrue(self.ipv4_address != 2)
self.assertFalse(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv6_interface != '')
self.assertTrue(self.ipv6_interface != [])
self.assertTrue(self.ipv6_interface != 2)
self.assertTrue(self.ipv6_address !=
ipaddress.IPv4Address('1.2.3.4'))
self.assertTrue(self.ipv6_address != '')
self.assertTrue(self.ipv6_address != [])
self.assertTrue(self.ipv6_address != 2)
def testSlash32Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface(
'1.2.3.4/255.255.255.255')), '1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(str(ipaddress.IPv6Interface('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
ip5 = ipaddress.IPv4Address('1.1.1.4')
ip6 = ipaddress.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses(
[ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30'),
ipaddress.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
#ip5 = ipaddress.IPv4Interface('1.1.1.4/30')
#ip6 = ipaddress.IPv4Interface('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30')])
# test only IP networks
ip1 = ipaddress.IPv4Network('1.1.0.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/24')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
ip4 = ipaddress.IPv4Network('1.1.3.0/24')
ip5 = ipaddress.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call
# [].sort
ip6 = ipaddress.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4, ip5,
ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/22'),
ipaddress.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddress.collapse_addresses([ip1, ip2])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddress.IPv4Network('1.1.1.1/32')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddress.IPv4Address('1.1.1.1')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ipaddress.ip_network('1.1.1.1/32')])
ip1 = ipaddress.IPv6Network('2001::/100')
ip2 = ipaddress.IPv6Network('2001::/120')
ip3 = ipaddress.IPv6Network('2001::/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3])
self.assertEqual(list(collapsed), [ip3])
# the toejam test
addr_tuples = [
(ipaddress.ip_address('1.1.1.1'),
ipaddress.ip_address('::1')),
(ipaddress.IPv4Network('1.1.0.0/24'),
ipaddress.IPv6Network('2001::/120')),
(ipaddress.IPv4Network('1.1.0.0/32'),
ipaddress.IPv6Network('2001::/128')),
]
for ip1, ip2 in addr_tuples:
self.assertRaises(TypeError, ipaddress.collapse_addresses,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddress.ip_address
#ipnet = ipaddress.ip_network
summarize = ipaddress.summarize_address_range
ip1 = ipaddress.ip_address('1.1.1.0')
ip2 = ipaddress.ip_address('1.1.1.255')
# summarize works only for IPv4 & IPv6
class IPv7Address(ipaddress.IPv6Address):
@property
def version(self):
return 7
ip_invalid1 = IPv7Address('::1')
ip_invalid2 = IPv7Address('::1')
self.assertRaises(ValueError, list,
summarize(ip_invalid1, ip_invalid2))
# test that a summary over ip4 & ip6 fails
self.assertRaises(TypeError, list,
summarize(ip1, ipaddress.IPv6Address('::1')))
# test a /24 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('1.1.1.8')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1.1.1.0/29'),
ipaddress.ip_network('1.1.1.8')])
# all!
ip1 = ipaddress.IPv4Address(0)
ip2 = ipaddress.IPv4Address(ipaddress.IPv4Address._ALL_ONES)
self.assertEqual([ipaddress.IPv4Network('0.0.0.0/0')],
list(summarize(ip1, ip2)))
ip1 = ipaddress.ip_address('1::')
ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('2::')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1::/16'),
ipaddress.ip_network('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, list,
summarize(ipaddress.ip_address('1.1.1.0'),
ipaddress.ip_address('1.1.0.0')))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_address('::'),
ipaddress.ip_network('1.1.0.0')))
def testAddressComparison(self):
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.1'))
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.2'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::1'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::2'))
def testInterfaceComparison(self):
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.1'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.2'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::1'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddress.IPv4Network('1.1.1.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/32')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip1), 0)
# if addresses are the same, sort by netmask
self.assertEqual(ip1.compare_networks(ip2), -1)
self.assertEqual(ip2.compare_networks(ip1), 1)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertEqual(ip3.compare_networks(ip1), 1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddress.IPv6Network('2001:2000::/96')
ip2 = ipaddress.IPv6Network('2001:2001::/96')
ip3 = ipaddress.IPv6Network('2001:ffff:2000::/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
self.assertRaises(TypeError,
self.ipv4_network.compare_networks,
self.ipv6_network)
ipv6 = ipaddress.IPv6Interface('::/0')
ipv4 = ipaddress.IPv4Interface('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddress.ip_network('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddress.ip_network('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddress.ip_network('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddress.ip_network('10.10.10.0/31')
ip2 = ipaddress.ip_network('10.10.10.0')
ip3 = ipaddress.ip_network('10.10.10.2/31')
ip4 = ipaddress.ip_network('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertRaises(TypeError, ip1.__lt__,
ipaddress.ip_address('10.10.10.0'))
self.assertRaises(TypeError, ip2.__lt__,
ipaddress.ip_address('10.10.10.0'))
# <=, >=
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.2'))
self.assertFalse(ipaddress.ip_network('1.1.1.2') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::2'))
self.assertFalse(ipaddress.ip_network('::2') <=
ipaddress.ip_network('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddress.ip_network, '192.168.1.1/24')
self.assertRaises(ValueError, ipaddress.ip_network, '::1/120')
def testOverlaps(self):
other = ipaddress.IPv4Network('1.2.3.0/30')
other2 = ipaddress.IPv4Network('1.2.2.0/24')
other3 = ipaddress.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4_network.overlaps(other))
self.assertFalse(self.ipv4_network.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddress.IPv4Interface(ipv4_string)
v4compat_ipv6 = ipaddress.IPv6Interface('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddress.IPv6Interface('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1'),
ipaddress.ip_address('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4_address.version, 4)
self.assertEqual(self.ipv6_address.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4_interface.max_prefixlen, 32)
self.assertEqual(self.ipv6_interface.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4_address.packed,
b'\x01\x02\x03\x04')
self.assertEqual(ipaddress.IPv4Interface('255.254.253.252').packed,
b'\xff\xfe\xfd\xfc')
self.assertEqual(self.ipv6_address.packed,
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01')
self.assertEqual(ipaddress.IPv6Interface('ffff:2:3:4:ffff::').packed,
b'\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ b'\x00' * 6)
self.assertEqual(ipaddress.IPv6Interface('::1:0:0:0:0').packed,
b'\x00' * 6 + b'\x00\x01' + b'\x00' * 8)
def testIpStrFromPrefixlen(self):
ipv4 = ipaddress.IPv4Interface('1.2.3.4/24')
self.assertEqual(ipv4._ip_string_from_prefix(), '255.255.255.0')
self.assertEqual(ipv4._ip_string_from_prefix(28), '255.255.255.240')
def testIpType(self):
ipv4net = ipaddress.ip_network('1.2.3.4')
ipv4addr = ipaddress.ip_address('1.2.3.4')
ipv6net = ipaddress.ip_network('::1.2.3.4')
ipv6addr = ipaddress.ip_address('::1.2.3.4')
self.assertEqual(ipaddress.IPv4Network, type(ipv4net))
self.assertEqual(ipaddress.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddress.IPv6Network, type(ipv6net))
self.assertEqual(ipaddress.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddress.ip_interface(
'224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddress.ip_network('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_network('240.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_interface(
'192.168.1.1/17').is_private)
self.assertEqual(False, ipaddress.ip_network('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_network(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_network(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_network('169.254.1.0/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_interface(
'169.254.100.200/24').is_link_local)
self.assertEqual(False,
ipaddress.ip_interface(
'169.255.100.200/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_network(
'127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddress.ip_network(
'127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddress.ip_network('128.0.0.0').is_loopback)
# test addresses
self.assertEqual(True, ipaddress.ip_address('0.0.0.0').is_unspecified)
self.assertEqual(True, ipaddress.ip_address('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddress.ip_address('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_address('240.0.0.1').is_reserved)
self.assertEqual(False,
ipaddress.ip_address('239.255.255.255').is_reserved)
self.assertEqual(True, ipaddress.ip_address('192.168.1.1').is_private)
self.assertEqual(False, ipaddress.ip_address('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('11.0.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_address('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddress.ip_address('169.255.100.200').is_link_local)
self.assertEqual(True,
ipaddress.ip_address('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddress.ip_address('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_network('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_network('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_network(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_network('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_network(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_network('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_network(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_interface('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddress.ip_interface('::1/127').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::1').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::/127').is_unspecified)
# test addresses
self.assertEqual(True, ipaddress.ip_address('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_address('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_address('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_address(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_address('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_address(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_address('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddress.ip_address('::1').is_loopback)
self.assertEqual(False, ipaddress.ip_address('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddress.ip_address('100::').is_reserved)
self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(
ipaddress.ip_address('::ffff:192.168.1.1').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
self.assertEqual(ipaddress.ip_address('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddress.ip_address('::ffff:c0a8:101').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network('10.1.1.0/26')
addr3 = ipaddress.ip_network('10.2.1.0/24')
addr4 = ipaddress.ip_address('10.1.1.0')
addr5 = ipaddress.ip_network('2001:db8::0/32')
self.assertEqual(sorted(list(addr1.address_exclude(addr2))),
[ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
self.assertRaises(ValueError, list, addr1.address_exclude(addr3))
self.assertRaises(TypeError, list, addr1.address_exclude(addr4))
self.assertRaises(TypeError, list, addr1.address_exclude(addr5))
self.assertEqual(list(addr1.address_exclude(addr1)), [])
def testHash(self):
self.assertEqual(hash(ipaddress.ip_interface('10.1.1.0/24')),
hash(ipaddress.ip_interface('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_network('10.1.1.0/24')),
hash(ipaddress.ip_network('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_address('10.1.1.0')),
hash(ipaddress.ip_address('10.1.1.0')))
# i70
self.assertEqual(hash(ipaddress.ip_address('1.2.3.4')),
hash(ipaddress.ip_address(
int(ipaddress.ip_address('1.2.3.4')._ip))))
ip1 = ipaddress.ip_address('10.1.1.0')
ip2 = ipaddress.ip_address('1::')
dummy = {}
dummy[self.ipv4_address] = None
dummy[self.ipv6_address] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertTrue(self.ipv4_address in dummy)
self.assertTrue(ip2 in dummy)
def testIPBases(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.compressed)
self.assertEqual(
net._ip_int_from_prefix(24),
net._ip_int_from_prefix(None))
net = self.ipv6_network
self.assertRaises(ValueError, net._string_from_ip_int, 2**128 + 1)
self.assertEqual(
self.ipv6_address._string_from_ip_int(self.ipv6_address._ip),
self.ipv6_address._string_from_ip_int(None))
def testIPv6NetworkHelpers(self):
net = self.ipv6_network
self.assertEqual('2001:658:22a:cafe::/64', net.with_prefixlen)
self.assertEqual('2001:658:22a:cafe::/ffff:ffff:ffff:ffff::',
net.with_netmask)
self.assertEqual('2001:658:22a:cafe::/::ffff:ffff:ffff:ffff',
net.with_hostmask)
self.assertEqual('2001:658:22a:cafe::/64', str(net))
def testIPv4NetworkHelpers(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.with_prefixlen)
self.assertEqual('1.2.3.0/255.255.255.0', net.with_netmask)
self.assertEqual('1.2.3.0/0.0.0.255', net.with_hostmask)
self.assertEqual('1.2.3.0/24', str(net))
def testCopyConstructor(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network(addr1)
addr3 = ipaddress.ip_interface('2001:658:22a:cafe:200::1/64')
addr4 = ipaddress.ip_interface(addr3)
addr5 = ipaddress.IPv4Address('1.1.1.1')
addr6 = ipaddress.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddress.IPv4Address(addr5))
self.assertEqual(addr6, ipaddress.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddress.IPv6Interface('2001::1')
addr2 = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
addr3 = ipaddress.IPv6Network('2001::/96')
addr4 = ipaddress.IPv4Address('192.168.178.1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddress.IPv6Interface('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0000/96',
addr3.exploded)
self.assertEqual('192.168.178.1', addr4.exploded)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4_address))
self.assertEqual(42540616829182469433547762482097946625,
int(self.ipv6_address))
def testForceVersion(self):
self.assertEqual(ipaddress.ip_network(1).version, 4)
self.assertEqual(ipaddress.IPv6Network(1).version, 6)
def testWithStar(self):
self.assertEqual(self.ipv4_interface.with_prefixlen, "1.2.3.4/24")
self.assertEqual(self.ipv4_interface.with_netmask,
"1.2.3.4/255.255.255.0")
self.assertEqual(self.ipv4_interface.with_hostmask,
"1.2.3.4/0.0.0.255")
self.assertEqual(self.ipv6_interface.with_prefixlen,
'2001:658:22a:cafe:200::1/64')
self.assertEqual(self.ipv6_interface.with_netmask,
'2001:658:22a:cafe:200::1/ffff:ffff:ffff:ffff::')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(self.ipv6_interface.with_hostmask,
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertFalse('network_address' in self.ipv4_network._cache)
self.assertFalse('broadcast_address' in self.ipv4_network._cache)
self.assertFalse('hostmask' in self.ipv4_network._cache)
# V4 - populate and test
self.assertEqual(self.ipv4_network.network_address,
ipaddress.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4_network.broadcast_address,
ipaddress.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4_network.hostmask,
ipaddress.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertTrue('broadcast_address' in self.ipv4_network._cache)
self.assertTrue('hostmask' in self.ipv4_network._cache)
# V6 - make sure we're empty
self.assertFalse('broadcast_address' in self.ipv6_network._cache)
self.assertFalse('hostmask' in self.ipv6_network._cache)
# V6 - populate and test
self.assertEqual(self.ipv6_network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6_interface.network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(
self.ipv6_network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
self.assertEqual(
self.ipv6_interface.network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_interface.network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertTrue('broadcast_address' in self.ipv6_network._cache)
self.assertTrue('hostmask' in self.ipv6_network._cache)
self.assertTrue(
'broadcast_address' in self.ipv6_interface.network._cache)
self.assertTrue('hostmask' in self.ipv6_interface.network._cache)
def testTeredo(self):
# stolen from wikipedia
server = ipaddress.IPv4Address('65.54.227.120')
client = ipaddress.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddress.ip_address(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
# i77
teredo_addr = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddress.IPv4Address('94.245.121.253'),
ipaddress.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddress.ip_address('2002:ac1d:2d64::1')
bad_addr = ipaddress.ip_address('2000:ac1d:2d64::1')
self.assertEqual(ipaddress.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
Pulgama/supriya | supriya/realtime/BusGroup.py | 1 | 13504 | import supriya.exceptions
from supriya import CalculationRate
from supriya.realtime.ServerObject import ServerObject
class BusGroup(ServerObject):
"""
A bus group.
::
>>> server = supriya.Server.default().boot()
>>> bus_group = supriya.BusGroup(bus_count=4)
>>> bus_group
<- BusGroup{4}: ??? (control)>
::
>>> bus_group.allocate()
<+ BusGroup{4}: 0 (control)>
::
>>> bus_group[2]
<+ Bus: 2 (control)>
::
>>> for i in range(len(bus_group)):
... bus = bus_group[i]
... value = (i * 0.2) + 0.1
... bus.set(value)
...
>>> bus_values = bus_group.get()
Values in ``scsynth`` don't necessarily have the same precision as in
Python, so we'll round them here for display purposes:
::
>>> print([round(value, 1) for value in bus_values])
[0.1, 0.3, 0.5, 0.7]
::
>>> print(bus_group)
c0
::
>>> bus_group.free()
<- BusGroup{4}: ??? (control)>
"""
### CLASS VARIABLES ###
__documentation_section__ = "Main Classes"
__slots__ = ("_bus_id", "_buses", "_calculation_rate")
### INITIALIZER ###
def __init__(
self, bus_count=1, calculation_rate=CalculationRate.CONTROL, *, bus_id=None
):
import supriya.realtime
ServerObject.__init__(self)
calculation_rate = CalculationRate.from_expr(calculation_rate)
assert calculation_rate in (CalculationRate.AUDIO, CalculationRate.CONTROL)
self._calculation_rate = calculation_rate
bus_count = int(bus_count)
assert 0 < bus_count
self._buses = tuple(
supriya.realtime.Bus(
bus_group_or_index=self, calculation_rate=self.calculation_rate
)
for _ in range(bus_count)
)
assert isinstance(bus_id, (type(None), int))
self._bus_id = bus_id
### SPECIAL METHODS ###
def __contains__(self, item):
"""
Test if a bus belongs to the bus group.
::
>>> bus_group = supriya.BusGroup.control(4)
>>> bus_group[0] in bus_group
True
::
>>> bus = supriya.Bus.audio()
>>> bus in bus_group
False
"""
# TODO: Should this handle allocated buses that match by ID?
return self.buses.__contains__(item)
def __float__(self):
return float(self.bus_id)
def __getitem__(self, item):
"""
Get ``item`` in bus group.
::
>>> server = supriya.Server.default().boot()
>>> bus_group = supriya.BusGroup.control(4).allocate()
>>> bus_group[0]
<+ Bus: 0 (control)>
::
>>> bus_group[1:]
<+ BusGroup{3}: 1 (control)>
"""
if isinstance(item, int):
return self._buses[item]
elif isinstance(item, slice):
indices = item.indices(len(self))
bus_count = indices[1] - indices[0]
bus_group = type(self)(
bus_count=bus_count,
bus_id=indices[0],
calculation_rate=self.calculation_rate,
)
if self.is_allocated:
bus_group._server = self.server
return bus_group
def __int__(self):
return int(self.bus_id)
def __iter__(self):
return iter(self.buses)
def __len__(self):
return len(self._buses)
def __repr__(self):
bus_id = self.bus_id
if bus_id is None:
bus_id = "???"
return "<{} {}{{{}}}: {} ({})>".format(
"+" if self.is_allocated else "-",
type(self).__name__,
len(self),
bus_id,
self.calculation_rate.name.lower(),
)
def __str__(self):
"""
Gets map symbol representation of bus group.
::
>>> server = supriya.Server.default().boot()
>>> control_bus_group = supriya.BusGroup.control(4).allocate()
>>> audio_bus_group = supriya.BusGroup.audio(4).allocate()
::
>>> print(str(control_bus_group))
c0
::
>>> print(str(audio_bus_group))
a16
"""
return self.map_symbol
### PUBLIC METHODS ###
def allocate(self, server=None):
import supriya.realtime
if self.is_allocated:
raise supriya.exceptions.BusAlreadyAllocated
ServerObject.allocate(self, server=server)
allocator = supriya.realtime.Bus._get_allocator(
calculation_rate=self.calculation_rate, server=self.server
)
bus_id = allocator.allocate(len(self))
if bus_id is None:
ServerObject.free(self)
raise ValueError
self._bus_id = bus_id
return self
def ar(self):
"""
Creates an audio-rate input ugen subgraph.
.. container:: example
::
>>> import supriya.realtime
>>> audio_bus_group = supriya.realtime.BusGroup(
... bus_id=8,
... bus_count=4,
... calculation_rate='audio',
... )
>>> ugen = audio_bus_group.ar()
>>> supriya.graph(ugen) # doctest: +SKIP
::
>>> print(ugen)
synthdef:
name: 0af6b551a643cad01e9994845ff4ae40
ugens:
- In.ar:
bus: 8.0
.. container:: example
::
>>> control_bus_group = supriya.realtime.BusGroup(
... bus_id=8,
... bus_count=4,
... calculation_rate='control',
... )
>>> ugen = control_bus_group.ar()
>>> supriya.graph(ugen) # doctest: +SKIP
::
>>> print(ugen)
synthdef:
name: ecaa7fe9417cb0742cdcda87657fe9de
ugens:
- In.kr:
bus: 8.0
- K2A.ar/0:
source: In.kr[0]
- K2A.ar/1:
source: In.kr[1]
- K2A.ar/2:
source: In.kr[2]
- K2A.ar/3:
source: In.kr[3]
Returns ugen.
"""
import supriya.ugens
channel_count = len(self)
if self.calculation_rate == CalculationRate.AUDIO:
ugen = supriya.ugens.In.ar(bus=self.bus_id, channel_count=channel_count)
else:
ugen = supriya.ugens.In.kr(bus=self.bus_id, channel_count=channel_count)
ugen = supriya.ugens.K2A.ar(source=ugen)
return ugen
@classmethod
def audio(cls, bus_count=1):
return cls(bus_count=bus_count, calculation_rate=CalculationRate.AUDIO)
@classmethod
def control(cls, bus_count=1):
return cls(bus_count=bus_count, calculation_rate=CalculationRate.CONTROL)
def fill(self, value):
"""
Fill buses in bus group with ``value``.
::
>>> server = supriya.Server.default().boot()
>>> bus_group = supriya.BusGroup.control(4).allocate()
>>> bus_group.get()
(0.0, 0.0, 0.0, 0.0)
::
>>> bus_group.fill(0.5)
::
>>> bus_group.get()
(0.5, 0.5, 0.5, 0.5)
::
>>> bus_group = supriya.BusGroup.audio(4)
>>> bus_group.fill(0.5)
Traceback (most recent call last):
...
supriya.exceptions.BusNotAllocated
::
>>> bus_group.allocate().fill(0.5)
Traceback (most recent call last):
...
supriya.exceptions.IncompatibleRate
"""
import supriya.commands
if not self.is_allocated:
raise supriya.exceptions.BusNotAllocated
if self.calculation_rate != CalculationRate.CONTROL:
raise supriya.exceptions.IncompatibleRate
index_count_value_triples = [(self.bus_id, len(self), value)]
request = supriya.commands.ControlBusFillRequest(
index_count_value_triples=index_count_value_triples
)
request.communicate(server=self.server, sync=False)
def free(self):
import supriya.realtime
if not self.is_allocated:
raise supriya.exceptions.BusNotAllocated
allocator = supriya.realtime.Bus._get_allocator(
calculation_rate=self.calculation_rate, server=self.server
)
allocator.free(self.bus_id)
self._bus_id = None
ServerObject.free(self)
return self
def get(self):
"""
Get bus group values.
::
>>> server = supriya.Server.default().boot()
>>> bus_group = supriya.BusGroup().control(4).allocate()
>>> bus_group.get()
(0.0, 0.0, 0.0, 0.0)
"""
import supriya.commands
if not self.is_allocated:
raise supriya.exceptions.BusNotAllocated
if self.calculation_rate != CalculationRate.CONTROL:
raise supriya.exceptions.IncompatibleRate
index_count_pairs = [(self.bus_id, len(self))]
request = supriya.commands.ControlBusGetContiguousRequest(
index_count_pairs=index_count_pairs
)
response = request.communicate(server=self.server)
assert len(response) == 1
value = response[0].bus_values
return value
def index(self, item):
return self.buses.index(item)
def kr(self):
"""
Creates a control-rate input ugen subgraph.
.. container:: example
::
>>> import supriya.realtime
>>> audio_bus_group = supriya.realtime.BusGroup(
... bus_id=8,
... bus_count=4,
... calculation_rate='audio',
... )
>>> ugen = audio_bus_group.kr()
>>> supriya.graph(ugen) # doctest: +SKIP
::
>>> print(ugen)
synthdef:
name: ffeda833c370bc644251437469e243ef
ugens:
- In.ar:
bus: 8.0
- A2K.kr/0:
source: In.ar[0]
- A2K.kr/1:
source: In.ar[1]
- A2K.kr/2:
source: In.ar[2]
- A2K.kr/3:
source: In.ar[3]
.. container:: example
::
>>> control_bus_group = supriya.realtime.BusGroup(
... bus_id=8,
... bus_count=4,
... calculation_rate='control',
... )
>>> ugen = control_bus_group.kr()
>>> supriya.graph(ugen) # doctest: +SKIP
::
>>> print(ugen)
synthdef:
name: b64857a04b384841694ba85f74f0fd0b
ugens:
- In.kr:
bus: 8.0
Returns ugen.
"""
import supriya.ugens
channel_count = len(self)
if self.calculation_rate == CalculationRate.AUDIO:
ugen = supriya.ugens.In.ar(bus=self.bus_id, channel_count=channel_count)
ugen = supriya.ugens.A2K.kr(source=ugen)
else:
ugen = supriya.ugens.In.kr(bus=self.bus_id, channel_count=channel_count)
return ugen
def set(self, values):
"""
Set bus group values.
::
>>> server = supriya.Server.default().boot()
>>> bus_group = supriya.BusGroup.control(4).allocate()
>>> bus_group.get()
(0.0, 0.0, 0.0, 0.0)
::
>>> bus_group.set((-0.5, 0.5, -0.5, 0.5))
>>> bus_group.get()
(-0.5, 0.5, -0.5, 0.5)
"""
import supriya.commands
if not self.is_allocated:
raise supriya.exceptions.BusNotAllocated(self)
if self.calculation_rate != CalculationRate.CONTROL:
raise supriya.exceptions.IncompatibleRate(self)
if len(values) != len(self):
raise ValueError(values)
request = supriya.commands.ControlBusSetContiguousRequest(
index_values_pairs=[(self, values)]
)
request.communicate(sync=False)
### PUBLIC PROPERTIES ###
@property
def bus_id(self):
return self._bus_id
@property
def buses(self):
return self._buses
@property
def calculation_rate(self):
return self._calculation_rate
@property
def is_allocated(self):
return self.server is not None
@property
def map_symbol(self):
if self.bus_id is None:
raise supriya.exceptions.BusNotAllocated
if self.calculation_rate == CalculationRate.AUDIO:
map_symbol = "a"
else:
map_symbol = "c"
map_symbol += str(self.bus_id)
return map_symbol
| mit |
nilmini20s/gem5-2016-08-13 | src/arch/x86/isa/insts/x87/compare_and_test/classify.py | 91 | 2149 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FXAM
'''
| bsd-3-clause |
bobrock/eden | modules/geopy/distance.py | 51 | 13537 | from math import atan, tan, sin, cos, pi, sqrt, atan2, acos, asin
from geopy.units import radians
from geopy import units, util
from geopy.point import Point
# Average great-circle radius in kilometers, from Wikipedia.
# Using a sphere with this radius results in an error of up to about 0.5%.
EARTH_RADIUS = 6372.795
# From http://www.movable-type.co.uk/scripts/LatLongVincenty.html:
# The most accurate and widely used globally-applicable model for the earth
# ellipsoid is WGS-84, used in this script. Other ellipsoids offering a
# better fit to the local geoid include Airy (1830) in the UK, International
# 1924 in much of Europe, Clarke (1880) in Africa, and GRS-67 in South
# America. America (NAD83) and Australia (GDA) use GRS-80, functionally
# equivalent to the WGS-84 ellipsoid.
ELLIPSOIDS = {
# model major (km) minor (km) flattening
'WGS-84': (6378.137, 6356.7523142, 1 / 298.257223563),
'GRS-80': (6378.137, 6356.7523141, 1 / 298.257222101),
'Airy (1830)': (6377.563396, 6356.256909, 1 / 299.3249646),
'Intl 1924': (6378.388, 6356.911946, 1 / 297.0),
'Clarke (1880)': (6378.249145, 6356.51486955, 1 / 293.465),
'GRS-67': (6378.1600, 6356.774719, 1 / 298.25)
}
class Distance(object):
def __init__(self, *args, **kwargs):
kilometers = kwargs.pop('kilometers', 0)
if len(args) == 1:
# if we only get one argument we assume
# it's a known distance instead of
# calculating it first
kilometers += args[0]
elif len(args) > 1:
for a, b in util.pairwise(args):
kilometers += self.measure(a, b)
kilometers += units.kilometers(**kwargs)
self.__kilometers = kilometers
def __add__(self, other):
if isinstance(other, Distance):
return self.__class__(self.kilometers + other.kilometers)
else:
raise TypeError(
"Distance instance must be added with Distance instance."
)
def __neg__(self):
return self.__class__(-self.kilometers)
def __sub__(self, other):
return self + -other
def __mul__(self, other):
return self.__class__(self.kilometers * other)
def __div__(self, other):
if isinstance(other, Distance):
return self.kilometers / other.kilometers
else:
return self.__class__(self.kilometers / other)
def __abs__(self):
return self.__class__(abs(self.kilometers))
def __nonzero__(self):
return bool(self.kilometers)
def measure(self, a, b):
raise NotImplementedError
def __repr__(self):
return 'Distance(%s)' % self.kilometers
def __str__(self):
return '%s km' % self.__kilometers
def __cmp__(self, other):
if isinstance(other, Distance):
return cmp(self.kilometers, other.kilometers)
else:
return cmp(self.kilometers, other)
@property
def kilometers(self):
return self.__kilometers
@property
def km(self):
return self.kilometers
@property
def meters(self):
return units.meters(kilometers=self.kilometers)
@property
def m(self):
return self.meters
@property
def miles(self):
return units.miles(kilometers=self.kilometers)
@property
def mi(self):
return self.miles
@property
def feet(self):
return units.feet(kilometers=self.kilometers)
@property
def ft(self):
return self.feet
@property
def nautical(self):
return units.nautical(kilometers=self.kilometers)
@property
def nm(self):
return self.nautical
class GreatCircleDistance(Distance):
"""
Use spherical geometry to calculate the surface distance between two
geodesic points. This formula can be written many different ways,
including just the use of the spherical law of cosines or the haversine
formula.
The class attribute `RADIUS` indicates which radius of the earth to use,
in kilometers. The default is to use the module constant `EARTH_RADIUS`,
which uses the average great-circle radius.
"""
RADIUS = EARTH_RADIUS
def measure(self, a, b):
a, b = Point(a), Point(b)
lat1, lng1 = radians(degrees=a.latitude), radians(degrees=a.longitude)
lat2, lng2 = radians(degrees=b.latitude), radians(degrees=b.longitude)
sin_lat1, cos_lat1 = sin(lat1), cos(lat1)
sin_lat2, cos_lat2 = sin(lat2), cos(lat2)
delta_lng = lng2 - lng1
cos_delta_lng, sin_delta_lng = cos(delta_lng), sin(delta_lng)
central_angle = acos(
# We're correcting from floating point rounding errors on very-near and exact points here
min(1.0, sin_lat1 * sin_lat2 +
cos_lat1 * cos_lat2 * cos_delta_lng))
# From http://en.wikipedia.org/wiki/Great_circle_distance:
# Historically, the use of this formula was simplified by the
# availability of tables for the haversine function. Although this
# formula is accurate for most distances, it too suffers from
# rounding errors for the special (and somewhat unusual) case of
# antipodal points (on opposite ends of the sphere). A more
# complicated formula that is accurate for all distances is: (below)
d = atan2(sqrt((cos_lat2 * sin_delta_lng) ** 2 +
(cos_lat1 * sin_lat2 -
sin_lat1 * cos_lat2 * cos_delta_lng) ** 2),
sin_lat1 * sin_lat2 + cos_lat1 * cos_lat2 * cos_delta_lng)
return self.RADIUS * d
def destination(self, point, bearing, distance=None):
point = Point(point)
lat1 = units.radians(degrees=point.latitude)
lng1 = units.radians(degrees=point.longitude)
bearing = units.radians(degrees=bearing)
if distance is None:
distance = self
if isinstance(distance, Distance):
distance = distance.kilometers
d_div_r = float(distance) / self.RADIUS
lat2 = asin(
sin(lat1) * cos(d_div_r) +
cos(lat1) * sin(d_div_r) * cos(bearing)
)
lng2 = lng1 + atan2(
sin(bearing) * sin(d_div_r) * cos(lat1),
cos(d_div_r) - sin(lat1) * sin(lat2)
)
return Point(units.degrees(radians=lat2), units.degrees(radians=lng2))
class VincentyDistance(Distance):
"""
Calculate the geodesic distance between two points using the formula
devised by Thaddeus Vincenty, with an accurate ellipsoidal model of the
earth.
The class attribute `ELLIPSOID` indicates which ellipsoidal model of the
earth to use. If it is a string, it is looked up in the `ELLIPSOIDS`
dictionary to obtain the major and minor semiaxes and the flattening.
Otherwise, it should be a tuple with those values. The most globally
accurate model is WGS-84. See the comments above the `ELLIPSOIDS`
dictionary for more information.
"""
ELLIPSOID = 'WGS-84'
def measure(self, a, b):
a, b = Point(a), Point(b)
lat1, lng1 = radians(degrees=a.latitude), radians(degrees=a.longitude)
lat2, lng2 = radians(degrees=b.latitude), radians(degrees=b.longitude)
if isinstance(self.ELLIPSOID, basestring):
major, minor, f = ELLIPSOIDS[self.ELLIPSOID]
else:
major, minor, f = self.ELLIPSOID
delta_lng = lng2 - lng1
reduced_lat1 = atan((1 - f) * tan(lat1))
reduced_lat2 = atan((1 - f) * tan(lat2))
sin_reduced1, cos_reduced1 = sin(reduced_lat1), cos(reduced_lat1)
sin_reduced2, cos_reduced2 = sin(reduced_lat2), cos(reduced_lat2)
lambda_lng = delta_lng
lambda_prime = 2 * pi
iter_limit = 20
while abs(lambda_lng - lambda_prime) > 10e-12 and iter_limit > 0:
sin_lambda_lng, cos_lambda_lng = sin(lambda_lng), cos(lambda_lng)
sin_sigma = sqrt(
(cos_reduced2 * sin_lambda_lng) ** 2 +
(cos_reduced1 * sin_reduced2 -
sin_reduced1 * cos_reduced2 * cos_lambda_lng) ** 2
)
if sin_sigma == 0:
return 0 # Coincident points
cos_sigma = (
sin_reduced1 * sin_reduced2 +
cos_reduced1 * cos_reduced2 * cos_lambda_lng
)
sigma = atan2(sin_sigma, cos_sigma)
sin_alpha = (
cos_reduced1 * cos_reduced2 * sin_lambda_lng / sin_sigma
)
cos_sq_alpha = 1 - sin_alpha ** 2
if cos_sq_alpha != 0:
cos2_sigma_m = cos_sigma - 2 * (
sin_reduced1 * sin_reduced2 / cos_sq_alpha
)
else:
cos2_sigma_m = 0.0 # Equatorial line
C = f / 16. * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha))
lambda_prime = lambda_lng
lambda_lng = (
delta_lng + (1 - C) * f * sin_alpha * (
sigma + C * sin_sigma * (
cos2_sigma_m + C * cos_sigma * (
-1 + 2 * cos2_sigma_m ** 2
)
)
)
)
iter_limit -= 1
if iter_limit == 0:
raise ValueError("Vincenty formula failed to converge!")
u_sq = cos_sq_alpha * (major ** 2 - minor ** 2) / minor ** 2
A = 1 + u_sq / 16384. * (
4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq))
)
B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))
delta_sigma = (
B * sin_sigma * (
cos2_sigma_m + B / 4. * (
cos_sigma * (
-1 + 2 * cos2_sigma_m ** 2
) - B / 6. * cos2_sigma_m * (
-3 + 4 * sin_sigma ** 2
) * (
-3 + 4 * cos2_sigma_m ** 2
)
)
)
)
s = minor * A * (sigma - delta_sigma)
return s
def destination(self, point, bearing, distance=None):
point = Point(point)
lat1 = units.radians(degrees=point.latitude)
lng1 = units.radians(degrees=point.longitude)
bearing = units.radians(degrees=bearing)
if distance is None:
distance = self
if isinstance(distance, Distance):
distance = distance.kilometers
ellipsoid = self.ELLIPSOID
if isinstance(ellipsoid, basestring):
ellipsoid = ELLIPSOIDS[ellipsoid]
major, minor, f = ellipsoid
tan_reduced1 = (1 - f) * tan(lat1)
cos_reduced1 = 1 / sqrt(1 + tan_reduced1 ** 2)
sin_reduced1 = tan_reduced1 * cos_reduced1
sin_bearing, cos_bearing = sin(bearing), cos(bearing)
sigma1 = atan2(tan_reduced1, cos_bearing)
sin_alpha = cos_reduced1 * sin_bearing
cos_sq_alpha = 1 - sin_alpha ** 2
u_sq = cos_sq_alpha * (major ** 2 - minor ** 2) / minor ** 2
A = 1 + u_sq / 16384. * (
4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq))
)
B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))
sigma = distance / (minor * A)
sigma_prime = 2 * pi
while abs(sigma - sigma_prime) > 10e-12:
cos2_sigma_m = cos(2 * sigma1 + sigma)
sin_sigma, cos_sigma = sin(sigma), cos(sigma)
delta_sigma = B * sin_sigma * (
cos2_sigma_m + B / 4. * (
cos_sigma * (
-1 + 2 * cos2_sigma_m
) - B / 6. * cos2_sigma_m * (
-3 + 4 * sin_sigma ** 2) * (
-3 + 4 * cos2_sigma_m ** 2
)
)
)
sigma_prime = sigma
sigma = distance / (minor * A) + delta_sigma
sin_sigma, cos_sigma = sin(sigma), cos(sigma)
lat2 = atan2(
sin_reduced1 * cos_sigma + cos_reduced1 * sin_sigma * cos_bearing,
(1 - f) * sqrt(
sin_alpha ** 2 + (
sin_reduced1 * sin_sigma -
cos_reduced1 * cos_sigma * cos_bearing
) ** 2
)
)
lambda_lng = atan2(
sin_sigma * sin_bearing,
cos_reduced1 * cos_sigma - sin_reduced1 * sin_sigma * cos_bearing
)
C = f / 16. * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha))
delta_lng = (
lambda_lng - (1 - C) * f * sin_alpha * (
sigma + C * sin_sigma * (
cos2_sigma_m + C * cos_sigma * (
-1 + 2 * cos2_sigma_m ** 2
)
)
)
)
final_bearing = atan2(
sin_alpha,
cos_reduced1 * cos_sigma * cos_bearing - sin_reduced1 * sin_sigma
)
lng2 = lng1 + delta_lng
return Point(units.degrees(radians=lat2), units.degrees(radians=lng2))
# Set the default distance formula to the most generally accurate.
distance = VincentyDistance
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.