repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringclasses 981 values | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15 values |
|---|---|---|---|---|---|
fbogner/lsdns | dependencies/dnspython-1.12.0/dns/rdtypes/ANY/RRSIG.py | 15 | 5774 | # Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import calendar
import struct
import time
import dns.dnssec
import dns.exception
import dns.rdata
import dns.rdatatype
class BadSigTime(dns.exception.DNSException):
"""Raised when a SIG or RRSIG RR's time cannot be parsed."""
pass
def sigtime_to_posixtime(what):
if len(what) != 14:
raise BadSigTime
year = int(what[0:4])
month = int(what[4:6])
day = int(what[6:8])
hour = int(what[8:10])
minute = int(what[10:12])
second = int(what[12:14])
return calendar.timegm((year, month, day, hour, minute, second,
0, 0, 0))
def posixtime_to_sigtime(what):
return time.strftime('%Y%m%d%H%M%S', time.gmtime(what))
class RRSIG(dns.rdata.Rdata):
"""RRSIG record
@ivar type_covered: the rdata type this signature covers
@type type_covered: int
@ivar algorithm: the algorithm used for the sig
@type algorithm: int
@ivar labels: number of labels
@type labels: int
@ivar original_ttl: the original TTL
@type original_ttl: long
@ivar expiration: signature expiration time
@type expiration: long
@ivar inception: signature inception time
@type inception: long
@ivar key_tag: the key tag
@type key_tag: int
@ivar signer: the signer
@type signer: dns.name.Name object
@ivar signature: the signature
@type signature: string"""
__slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl',
'expiration', 'inception', 'key_tag', 'signer',
'signature']
def __init__(self, rdclass, rdtype, type_covered, algorithm, labels,
original_ttl, expiration, inception, key_tag, signer,
signature):
super(RRSIG, self).__init__(rdclass, rdtype)
self.type_covered = type_covered
self.algorithm = algorithm
self.labels = labels
self.original_ttl = original_ttl
self.expiration = expiration
self.inception = inception
self.key_tag = key_tag
self.signer = signer
self.signature = signature
def covers(self):
return self.type_covered
def to_text(self, origin=None, relativize=True, **kw):
return '%s %d %d %d %s %s %d %s %s' % (
dns.rdatatype.to_text(self.type_covered),
self.algorithm,
self.labels,
self.original_ttl,
posixtime_to_sigtime(self.expiration),
posixtime_to_sigtime(self.inception),
self.key_tag,
self.signer.choose_relativity(origin, relativize),
dns.rdata._base64ify(self.signature)
)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
type_covered = dns.rdatatype.from_text(tok.get_string())
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
labels = tok.get_int()
original_ttl = tok.get_ttl()
expiration = sigtime_to_posixtime(tok.get_string())
inception = sigtime_to_posixtime(tok.get_string())
key_tag = tok.get_int()
signer = tok.get_name()
signer = signer.choose_relativity(origin, relativize)
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
signature = b64.decode('base64_codec')
return cls(rdclass, rdtype, type_covered, algorithm, labels,
original_ttl, expiration, inception, key_tag, signer,
signature)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack('!HBBIIIH', self.type_covered,
self.algorithm, self.labels,
self.original_ttl, self.expiration,
self.inception, self.key_tag)
file.write(header)
self.signer.to_wire(file, None, origin)
file.write(self.signature)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
header = struct.unpack('!HBBIIIH', wire[current : current + 18])
current += 18
rdlen -= 18
(signer, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
if not origin is None:
signer = signer.relativize(origin)
signature = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], header[2],
header[3], header[4], header[5], header[6], signer,
signature)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.signer = self.signer.choose_relativity(origin, relativize)
def _cmp(self, other):
return self._wire_cmp(other)
| mit |
chainer/chainercv | chainercv/utils/testing/assertions/assert_is_semantic_segmentation_link.py | 3 | 1225 | import numpy as np
import six
def assert_is_semantic_segmentation_link(link, n_class):
"""Checks if a link satisfies semantic segmentation link APIs.
This function checks if a given link satisfies semantic segmentation link
APIs or not.
If the link does not satifiy the APIs, this function raises an
:class:`AssertionError`.
Args:
link: A link to be checked.
n_class (int): The number of classes including background.
"""
imgs = [
np.random.randint(0, 256, size=(3, 480, 640)).astype(np.float32),
np.random.randint(0, 256, size=(3, 480, 320)).astype(np.float32)]
labels = link.predict(imgs)
assert len(labels) == len(imgs), \
'The length of labels must be same as that of imgs.'
for img, label in six.moves.zip(imgs, labels):
assert isinstance(label, np.ndarray), \
'label must be a numpy.ndarray.'
assert label.dtype == np.int32, \
'The type of label must be numpy.int32.'
assert label.shape == img.shape[1:], \
'The shape of label must be (H, W).'
assert label.min() >= 0 and label.max() < n_class, \
'The value of label must be in [0, n_class - 1].'
| mit |
spvkgn/youtube-dl | youtube_dl/extractor/cnn.py | 16 | 6428 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .turner import TurnerBaseIE
from ..utils import url_basename
class CNNIE(TurnerBaseIE):
_VALID_URL = r'''(?x)https?://(?:(?P<sub_domain>edition|www|money)\.)?cnn\.com/(?:video/(?:data/.+?|\?)/)?videos?/
(?P<path>.+?/(?P<title>[^/]+?)(?:\.(?:[a-z\-]+)|(?=&)))'''
_TESTS = [{
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
'md5': '3e6121ea48df7e2259fe73a0628605c4',
'info_dict': {
'id': 'sports/2013/06/09/nadal-1-on-1.cnn',
'ext': 'mp4',
'title': 'Nadal wins 8th French Open title',
'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
'duration': 135,
'upload_date': '20130609',
},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29',
'md5': 'b5cc60c60a3477d185af8f19a2a26f4e',
'info_dict': {
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
'ext': 'mp4',
'title': "Student's epic speech stuns new freshmen",
'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
'upload_date': '20130821',
},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
'md5': 'f14d02ebd264df951feb2400e2c25a1b',
'info_dict': {
'id': 'living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln',
'ext': 'mp4',
'title': 'Nashville Ep. 1: Hand crafted skateboards',
'description': 'md5:e7223a503315c9f150acac52e76de086',
'upload_date': '20141222',
},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'http://money.cnn.com/video/news/2016/08/19/netflix-stunning-stats.cnnmoney/index.html',
'md5': '52a515dc1b0f001cd82e4ceda32be9d1',
'info_dict': {
'id': '/video/news/2016/08/19/netflix-stunning-stats.cnnmoney',
'ext': 'mp4',
'title': '5 stunning stats about Netflix',
'description': 'Did you know that Netflix has more than 80 million members? Here are five facts about the online video distributor that you probably didn\'t know.',
'upload_date': '20160819',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://cnn.com/video/?/video/politics/2015/03/27/pkg-arizona-senator-church-attendance-mandatory.ktvk',
'only_matching': True,
}, {
'url': 'http://cnn.com/video/?/video/us/2015/04/06/dnt-baker-refuses-anti-gay-order.wkmg',
'only_matching': True,
}, {
'url': 'http://edition.cnn.com/videos/arts/2016/04/21/olympic-games-cultural-a-z-brazil.cnn',
'only_matching': True,
}]
_CONFIG = {
# http://edition.cnn.com/.element/apps/cvp/3.0/cfg/spider/cnn/expansion/config.xml
'edition': {
'data_src': 'http://edition.cnn.com/video/data/3.0/video/%s/index.xml',
'media_src': 'http://pmd.cdn.turner.com/cnn/big',
},
# http://money.cnn.com/.element/apps/cvp2/cfg/config.xml
'money': {
'data_src': 'http://money.cnn.com/video/data/4.0/video/%s.xml',
'media_src': 'http://ht3.cdn.turner.com/money/big',
},
}
def _extract_timestamp(self, video_data):
# TODO: fix timestamp extraction
return None
def _real_extract(self, url):
sub_domain, path, page_title = re.match(self._VALID_URL, url).groups()
if sub_domain not in ('money', 'edition'):
sub_domain = 'edition'
config = self._CONFIG[sub_domain]
return self._extract_cvp_info(
config['data_src'] % path, page_title, {
'default': {
'media_src': config['media_src'],
}
})
class CNNBlogsIE(InfoExtractor):
_VALID_URL = r'https?://[^\.]+\.blogs\.cnn\.com/.+'
_TEST = {
'url': 'http://reliablesources.blogs.cnn.com/2014/02/09/criminalizing-journalism/',
'md5': '3e56f97b0b6ffb4b79f4ea0749551084',
'info_dict': {
'id': 'bestoftv/2014/02/09/criminalizing-journalism.cnn',
'ext': 'mp4',
'title': 'Criminalizing journalism?',
'description': 'Glenn Greenwald responds to comments made this week on Capitol Hill that journalists could be criminal accessories.',
'upload_date': '20140209',
},
'expected_warnings': ['Failed to download m3u8 information'],
'add_ie': ['CNN'],
}
def _real_extract(self, url):
webpage = self._download_webpage(url, url_basename(url))
cnn_url = self._html_search_regex(r'data-url="(.+?)"', webpage, 'cnn url')
return self.url_result(cnn_url, CNNIE.ie_key())
class CNNArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:edition|www)\.)?cnn\.com/(?!videos?/)'
_TEST = {
'url': 'http://www.cnn.com/2014/12/21/politics/obama-north-koreas-hack-not-war-but-cyber-vandalism/',
'md5': '689034c2a3d9c6dc4aa72d65a81efd01',
'info_dict': {
'id': 'bestoftv/2014/12/21/ip-north-korea-obama.cnn',
'ext': 'mp4',
'title': 'Obama: Cyberattack not an act of war',
'description': 'md5:0a802a40d2376f60e6b04c8d5bcebc4b',
'upload_date': '20141221',
},
'expected_warnings': ['Failed to download m3u8 information'],
'add_ie': ['CNN'],
}
def _real_extract(self, url):
webpage = self._download_webpage(url, url_basename(url))
cnn_url = self._html_search_regex(r"video:\s*'([^']+)'", webpage, 'cnn url')
return self.url_result('http://cnn.com/video/?/video/' + cnn_url, CNNIE.ie_key())
| unlicense |
gsamokovarov/frames.py | test.py | 1 | 2402 | import inspect
import os
from contextlib import contextmanager
import pytest
import frames
@contextmanager
def non_native_frames():
previous_native = frames.NATIVE
try:
frames.NATIVE = False
yield
finally:
frames.NATIVE = previous_native
def test_is_new_style_class():
assert issubclass(frames.Frame, object)
def test_is_a_frame():
assert inspect.isframe(frames.current_frame())
def test_non_native_is_a_frame():
with non_native_frames():
test_is_a_frame()
def test_have_read_only_shortcuts():
frame = frames.current_frame()
assert frame.back == frame.f_back
assert frame.code == frame.f_code
assert frame.globals == frame.f_globals
assert frame.locals == frame.f_locals
assert frame.restricted == frame.f_restricted
def test_have_special_shortcuts():
frame = frames.current_frame()
assert frame.lineno == frame.f_lineno - 1
assert frame.last_instruction != frame.last_instruction
assert frame.trace is None
try:
@apply
def test_errors():
try:
raise
finally:
assert frame.exc_type is not None
assert frame.exc_value is not None
assert frame.exc_traceback is not None
except:
assert frame.exc_type is None
assert frame.exc_value is None
assert frame.exc_traceback is None
def test_current_frame_is_really_the_current_frame():
apples = 'yep'
assert 'apples' in frames.current_frame().f_locals
def test_raises_lookup_error_when_frames_are_not_found():
with pytest.raises(LookupError):
frames.locate_frame(lambda f: os.urandom(24) in f.locals)
# Some non-native alternatives for the current tests.
def test_non_native_have_read_only_shortcuts():
with non_native_frames():
test_have_read_only_shortcuts()
def test_non_native_current_frame_is_really_the_current_frame():
with non_native_frames():
test_current_frame_is_really_the_current_frame()
def test_non_native_behave_like_native_frames():
# Not the best scenario to test it since not all implementations actually
# support the `level` argument. But the ones that do, raise `ValueError`.
with pytest.raises(ValueError):
frames._getframe(999)
with pytest.raises(ValueError):
import sys
sys._getframe(999)
| mit |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/boto/cloudfront/distribution.py | 92 | 31275 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import base64
import time
from boto.compat import six, json
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
from boto.cloudfront.logging import LoggingInfo
from boto.cloudfront.origin import S3Origin, CustomOrigin
from boto.s3.acl import ACL
class DistributionConfig(object):
def __init__(self, connection=None, origin=None, enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, default_root_object=None,
logging=None):
"""
:param origin: Origin information to associate with the
distribution. If your distribution will use
an Amazon S3 origin, then this should be an
S3Origin object. If your distribution will use
a custom origin (non Amazon S3), then this
should be a CustomOrigin object.
:type origin: :class:`boto.cloudfront.origin.S3Origin` or
:class:`boto.cloudfront.origin.CustomOrigin`
:param enabled: Whether the distribution is enabled to accept
end user requests for content.
:type enabled: bool
:param caller_reference: A unique number that ensures the
request can't be replayed. If no
caller_reference is provided, boto
will generate a type 4 UUID for use
as the caller reference.
:type enabled: str
:param cnames: A CNAME alias you want to associate with this
distribution. You can have up to 10 CNAME aliases
per distribution.
:type enabled: array of str
:param comment: Any comments you want to include about the
distribution.
:type comment: str
:param trusted_signers: Specifies any AWS accounts you want to
permit to create signed URLs for private
content. If you want the distribution to
use signed URLs, this should contain a
TrustedSigners object; if you want the
distribution to use basic URLs, leave
this None.
:type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners`
:param default_root_object: Designates a default root object.
Only include a DefaultRootObject value
if you are going to assign a default
root object for the distribution.
:type comment: str
:param logging: Controls whether access logs are written for the
distribution. If you want to turn on access logs,
this should contain a LoggingInfo object; otherwise
it should contain None.
:type logging: :class`boto.cloudfront.logging.LoggingInfo`
"""
self.connection = connection
self.origin = origin
self.enabled = enabled
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.cnames = []
if cnames:
self.cnames = cnames
self.comment = comment
self.trusted_signers = trusted_signers
self.logging = logging
self.default_root_object = default_root_object
def __repr__(self):
return "DistributionConfig:%s" % self.origin
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self></Self>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
if self.default_root_object:
dro = self.default_root_object
s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
s += '</DistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'Logging':
self.logging = LoggingInfo()
return self.logging
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
else:
return None
def endElement(self, name, value, connection):
if name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'CallerReference':
self.caller_reference = value
elif name == 'DefaultRootObject':
self.default_root_object = value
else:
setattr(self, name, value)
class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, logging=None):
super(StreamingDistributionConfig, self).__init__(connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers,
logging=logging)
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self/>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
s += '</StreamingDistributionConfig>\n'
return s
class DistributionSummary(object):
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin=None,
cname='', comment='', enabled=False):
self.connection = connection
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.origin = origin
self.enabled = enabled
self.cnames = []
if cname:
self.cnames.append(cname)
self.comment = comment
self.trusted_signers = None
self.etag = None
self.streaming = False
def __repr__(self):
return "DistributionSummary:%s" % self.domain_name
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'DomainName':
self.domain_name = value
elif name == 'Origin':
self.origin = value
elif name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'StreamingDistributionSummary':
self.streaming = True
else:
setattr(self, name, value)
def get_distribution(self):
return self.connection.get_distribution_info(self.id)
class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
class Distribution(object):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
self.connection = connection
self.config = config
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.in_progress_invalidation_batches = 0
self.active_signers = None
self.etag = None
self._bucket = None
self._object_class = Object
def __repr__(self):
return "Distribution:%s" % self.domain_name
def startElement(self, name, attrs, connection):
if name == 'DistributionConfig':
self.config = DistributionConfig()
return self.config
elif name == 'ActiveTrustedSigners':
self.active_signers = ActiveTrustedSigners()
return self.active_signers
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'Status':
self.status = value
elif name == 'InProgressInvalidationBatches':
self.in_progress_invalidation_batches = int(value)
elif name == 'DomainName':
self.domain_name = value
else:
setattr(self, name, value)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the Distribution. The only values
of the DistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set ``Distribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = DistributionConfig(self.connection, self.config.origin,
self.config.enabled, self.config.caller_reference,
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
self._object_class = Object
def enable(self):
"""
Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
def delete(self):
"""
Delete this CloudFront Distribution. The content
associated with the Distribution is not deleted from
the underlying Origin bucket in S3.
"""
self.connection.delete_distribution(self.id, self.etag)
def _get_bucket(self):
if isinstance(self.config.origin, S3Origin):
if not self._bucket:
bucket_dns_name = self.config.origin.dns_name
bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '')
from boto.s3.connection import S3Connection
s3 = S3Connection(self.connection.aws_access_key_id,
self.connection.aws_secret_access_key,
proxy=self.connection.proxy,
proxy_port=self.connection.proxy_port,
proxy_user=self.connection.proxy_user,
proxy_pass=self.connection.proxy_pass)
self._bucket = s3.get_bucket(bucket_name)
self._bucket.distribution = self
self._bucket.set_key_class(self._object_class)
return self._bucket
else:
raise NotImplementedError('Unable to get_objects on CustomOrigin')
def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
if isinstance(self.config.origin, S3Origin):
if self.config.origin.origin_access_identity:
id = self.config.origin.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read')
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key, replace)
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object
def create_signed_url(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates a signed CloudFront URL that is only valid within the specified
parameters.
:type url: str
:param url: The URL of the protected object.
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
theURL. This ID MUST correspond to the private key
specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
will expire after the time has passed. If not provided the URL will
never expire. Format is a unix epoch.
Use time.time() + duration_in_sec.
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
after valid_after_time. Format is a unix epoch.
Use time.time() + secs_until_valid.
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
IP address. Use '192.168.0.10' for a single IP or
use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
wildcard globs in the URL. For example, you could
provide: 'http://example.com/media/\*' and the policy
and signature would allow access to all contents of
the media subdirectory. If not specified, only
allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
private key file used for signing or an open
file object containing the private key
contents. Only one of private_key_file or
private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
used for signing. Only one of private_key_file or
private_key_string can be provided.
:rtype: str
:return: The signed URL.
"""
# Get the required parameters
params = self._create_signing_params(
url=url, keypair_id=keypair_id, expire_time=expire_time,
valid_after_time=valid_after_time, ip_address=ip_address,
policy_url=policy_url, private_key_file=private_key_file,
private_key_string=private_key_string)
#combine these into a full url
if "?" in url:
sep = "&"
else:
sep = "?"
signed_url_params = []
for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]:
if key in params:
param = "%s=%s" % (key, params[key])
signed_url_params.append(param)
signed_url = url + sep + "&".join(signed_url_params)
return signed_url
def _create_signing_params(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates the required URL parameters for a signed URL.
"""
params = {}
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url:
# we manually construct this policy string to ensure formatting
# matches signature
policy = self._canned_policy(url, expire_time)
params["Expires"] = str(expire_time)
else:
# If no policy_url is specified, default to the full url.
if policy_url is None:
policy_url = url
# Can't use canned policy
policy = self._custom_policy(policy_url, expires=expire_time,
valid_after=valid_after_time,
ip_address=ip_address)
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
signature = self._sign_string(policy, private_key_file, private_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = self._url_base64_encode(signature)
params["Signature"] = encoded_signature
params["Key-Pair-Id"] = keypair_id
return params
@staticmethod
def _canned_policy(resource, expires):
"""
Creates a canned policy string.
"""
policy = ('{"Statement":[{"Resource":"%(resource)s",'
'"Condition":{"DateLessThan":{"AWS:EpochTime":'
'%(expires)s}}}]}' % locals())
return policy
@staticmethod
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):
"""
Creates a custom policy string based on the supplied parameters.
"""
condition = {}
# SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy
# The 'DateLessThan' property is required.
if not expires:
# Defaults to ONE day
expires = int(time.time()) + 86400
condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
if '/' not in ip_address:
ip_address += "/32"
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
policy = {"Statement": [{
"Resource": resource,
"Condition": condition}]}
return json.dumps(policy, separators=(",", ":"))
@staticmethod
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront.
Requires the rsa library be installed.
"""
try:
import rsa
except ImportError:
raise NotImplementedError("Boto depends on the python rsa "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# If private_key_file is a file name, open it and read it
if private_key_string is None:
if isinstance(private_key_file, six.string_types):
with open(private_key_file, 'r') as file_handle:
private_key_string = file_handle.read()
# Otherwise, treat it like a file
else:
private_key_string = private_key_file.read()
# Sign it!
private_key = rsa.PrivateKey.load_pkcs1(private_key_string)
signature = rsa.sign(str(message), private_key, 'SHA-1')
return signature
@staticmethod
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
"""
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64
class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
super(StreamingDistribution, self).__init__(connection, config,
domain_name, id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
if name == 'StreamingDistributionConfig':
self.config = StreamingDistributionConfig()
return self.config
else:
return super(StreamingDistribution, self).startElement(name, attrs,
connection)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the StreamingDistribution. The only values
of the StreamingDistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set
``StreamingDistribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the StreamingDistribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = StreamingDistributionConfig(self.connection,
self.config.origin,
self.config.enabled,
self.config.caller_reference,
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
new_config)
self.config = new_config
self._object_class = StreamingObject
def delete(self):
self.connection.delete_streaming_distribution(self.id, self.etag)
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/ensemble/tests/test_forest.py | 57 | 35265 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
40223110/2015CDAFinal_test2 | static/Brython3.1.1-20150328-091302/Lib/getopt.py | 845 | 7488 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <liw@iki.fi>.
#
# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
| gpl-3.0 |
spcui/tp-qemu | qemu/tests/enospc.py | 3 | 7710 | import logging
import time
import re
import os
from autotest.client.shared import error
from autotest.client import utils
from virttest import virt_vm, utils_misc, qemu_storage, data_dir
class EnospcConfig(object):
"""
Performs setup for the test enospc. This is a borg class, similar to a
singleton. The idea is to keep state in memory for when we call cleanup()
on postprocessing.
"""
__shared_state = {}
def __init__(self, test, params):
self.__dict__ = self.__shared_state
root_dir = test.bindir
self.tmpdir = test.tmpdir
self.qemu_img_binary = params['qemu_img_binary']
if not os.path.isfile(self.qemu_img_binary):
self.qemu_img_binary = utils_misc.get_qemu_img_binary(params)
self.raw_file_path = os.path.join(self.tmpdir, 'enospc.raw')
# Here we're trying to choose fairly explanatory names so it's less
# likely that we run in conflict with other devices in the system
self.vgtest_name = params["vgtest_name"]
self.lvtest_name = params["lvtest_name"]
self.lvtest_device = "/dev/%s/%s" % (
self.vgtest_name, self.lvtest_name)
image_dir = os.path.join(data_dir.get_data_dir(),
os.path.dirname(params["image_name"]))
self.qcow_file_path = os.path.join(image_dir, 'enospc.qcow2')
try:
getattr(self, 'loopback')
except AttributeError:
self.loopback = ''
@error.context_aware
def setup(self):
logging.debug("Starting enospc setup")
error.context("performing enospc setup")
utils_misc.display_attributes(self)
# Double check if there aren't any leftovers
self.cleanup()
try:
utils.run("%s create -f raw %s 10G" %
(self.qemu_img_binary, self.raw_file_path))
# Associate a loopback device with the raw file.
# Subject to race conditions, that's why try here to associate
# it with the raw file as quickly as possible
l_result = utils.run("losetup -f")
utils.run("losetup -f %s" % self.raw_file_path)
self.loopback = l_result.stdout.strip()
# Add the loopback device configured to the list of pvs
# recognized by LVM
utils.run("pvcreate %s" % self.loopback)
utils.run("vgcreate %s %s" % (self.vgtest_name, self.loopback))
# Create an lv inside the vg with starting size of 200M
utils.run("lvcreate -L 200M -n %s %s" %
(self.lvtest_name, self.vgtest_name))
# Create a 10GB qcow2 image in the logical volume
utils.run("%s create -f qcow2 %s 10G" %
(self.qemu_img_binary, self.lvtest_device))
# Let's symlink the logical volume with the image name that autotest
# expects this device to have
os.symlink(self.lvtest_device, self.qcow_file_path)
except Exception:
try:
self.cleanup()
except Exception, e:
logging.warn(e)
raise
@error.context_aware
def cleanup(self):
error.context("performing enospc cleanup")
if os.path.islink(self.lvtest_device):
utils.run("fuser -k %s" % self.lvtest_device, ignore_status=True)
time.sleep(2)
l_result = utils.run("lvdisplay")
# Let's remove all volumes inside the volume group created
if self.lvtest_name in l_result.stdout:
utils.run("lvremove -f %s" % self.lvtest_device)
# Now, removing the volume group itself
v_result = utils.run("vgdisplay")
if self.vgtest_name in v_result.stdout:
utils.run("vgremove -f %s" % self.vgtest_name)
# Now, if we can, let's remove the physical volume from lvm list
if self.loopback:
p_result = utils.run("pvdisplay")
if self.loopback in p_result.stdout:
utils.run("pvremove -f %s" % self.loopback)
l_result = utils.run('losetup -a')
if self.loopback and (self.loopback in l_result.stdout):
try:
utils.run("losetup -d %s" % self.loopback)
except error.CmdError:
logging.error("Failed to liberate loopback %s", self.loopback)
if os.path.islink(self.qcow_file_path):
os.remove(self.qcow_file_path)
if os.path.isfile(self.raw_file_path):
os.remove(self.raw_file_path)
@error.context_aware
def run(test, params, env):
"""
ENOSPC test
1) Create a virtual disk on lvm
2) Boot up guest with two disks
3) Continually write data to second disk
4) Check images and extend second disk when no space
5) Continue paused guest
6) Repeat step 3~5 several times
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
error.context("Create a virtual disk on lvm")
enospc_config = EnospcConfig(test, params)
enospc_config.setup()
error.context("Boot up guest with two disks")
vm = env.get_vm(params["main_vm"])
vm.create()
login_timeout = int(params.get("login_timeout", 360))
session_serial = vm.wait_for_serial_login(timeout=login_timeout)
vgtest_name = params["vgtest_name"]
lvtest_name = params["lvtest_name"]
logical_volume = "/dev/%s/%s" % (vgtest_name, lvtest_name)
drive_format = params["drive_format"]
output = session_serial.cmd_output("dir /dev")
devname = "/dev/" + re.findall("([shv]db)\s", output)[0]
cmd = params["background_cmd"]
cmd %= devname
error.context("Continually write data to second disk")
logging.info("Sending background cmd '%s'", cmd)
session_serial.sendline(cmd)
iterations = int(params.get("repeat_time", 40))
i = 0
pause_n = 0
while i < iterations:
if vm.monitor.verify_status("paused"):
pause_n += 1
error.context("Checking all images in use by %s" % vm.name,
logging.info)
for image_name in vm.params.objects("images"):
image_params = vm.params.object_params(image_name)
try:
image = qemu_storage.QemuImg(image_params,
data_dir.get_data_dir(), image_name)
image.check_image(image_params, data_dir.get_data_dir())
except (virt_vm.VMError, error.TestWarn), e:
logging.error(e)
error.context("Guest paused, extending Logical Volume size",
logging.info)
try:
utils.run("lvextend -L +200M %s" % logical_volume)
except error.CmdError, e:
logging.debug(e.result_obj.stdout)
error.context("Continue paused guest", logging.info)
vm.resume()
elif not vm.monitor.verify_status("running"):
status = str(vm.monitor.info("status"))
raise error.TestError("Unexpected guest status: %s" % status)
time.sleep(10)
i += 1
logging.info("Final %s", str(vm.monitor.info("status")))
# Shutdown guest before remove the image on LVM.
vm.destroy(gracefully=vm.monitor.verify_status("running"))
try:
enospc_config.cleanup()
except Exception, e:
logging.warn(e)
if pause_n == 0:
raise error.TestFail("Guest didn't pause during loop")
else:
logging.info("Guest paused %s times from %s iterations",
pause_n, iterations)
| gpl-2.0 |
gsehub/edx-platform | lms/djangoapps/grades/management/commands/recalculate_subsection_grades.py | 18 | 4087 | """
Command to recalculate grades for all subsections with problem submissions
in the specified time range.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from pytz import utc
from courseware.models import StudentModule
from lms.djangoapps.grades.constants import ScoreDatabaseTableEnum
from lms.djangoapps.grades.events import PROBLEM_SUBMITTED_EVENT_TYPE
from lms.djangoapps.grades.tasks import recalculate_subsection_grade_v3
from student.models import user_by_anonymous_id
from submissions.models import Submission
from track.event_transaction_utils import create_new_event_transaction_id, set_event_transaction_type
from util.date_utils import to_timestamp
log = logging.getLogger(__name__)
DATE_FORMAT = "%Y-%m-%d %H:%M"
class Command(BaseCommand):
"""
Example usage:
$ ./manage.py lms recalculate_subsection_grades
--modified_start '2016-08-23 16:43' --modified_end '2016-08-25 16:43' --settings=devstack
"""
args = 'fill this in'
help = 'Recalculates subsection grades for all subsections modified within the given time range.'
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
parser.add_argument(
'--modified_start',
dest='modified_start',
help='Starting range for modified date (inclusive): e.g. "2016-08-23 16:43"; expected in UTC.',
)
parser.add_argument(
'--modified_end',
dest='modified_end',
help='Ending range for modified date (inclusive): e.g. "2016-12-23 16:43"; expected in UTC.',
)
def handle(self, *args, **options):
if 'modified_start' not in options:
raise CommandError('modified_start must be provided.')
if 'modified_end' not in options:
raise CommandError('modified_end must be provided.')
modified_start = utc.localize(datetime.strptime(options['modified_start'], DATE_FORMAT))
modified_end = utc.localize(datetime.strptime(options['modified_end'], DATE_FORMAT))
event_transaction_id = create_new_event_transaction_id()
set_event_transaction_type(PROBLEM_SUBMITTED_EVENT_TYPE)
kwargs = {'modified__range': (modified_start, modified_end), 'module_type': 'problem'}
for record in StudentModule.objects.filter(**kwargs):
task_args = {
"user_id": record.student_id,
"course_id": unicode(record.course_id),
"usage_id": unicode(record.module_state_key),
"only_if_higher": False,
"expected_modified_time": to_timestamp(record.modified),
"score_deleted": False,
"event_transaction_id": unicode(event_transaction_id),
"event_transaction_type": PROBLEM_SUBMITTED_EVENT_TYPE,
"score_db_table": ScoreDatabaseTableEnum.courseware_student_module,
}
recalculate_subsection_grade_v3.apply_async(kwargs=task_args)
kwargs = {'created_at__range': (modified_start, modified_end)}
for record in Submission.objects.filter(**kwargs):
task_args = {
"user_id": user_by_anonymous_id(record.student_item.student_id).id,
"anonymous_user_id": record.student_item.student_id,
"course_id": unicode(record.student_item.course_id),
"usage_id": unicode(record.student_item.item_id),
"only_if_higher": False,
"expected_modified_time": to_timestamp(record.created_at),
"score_deleted": False,
"event_transaction_id": unicode(event_transaction_id),
"event_transaction_type": PROBLEM_SUBMITTED_EVENT_TYPE,
"score_db_table": ScoreDatabaseTableEnum.submissions,
}
recalculate_subsection_grade_v3.apply_async(kwargs=task_args)
| agpl-3.0 |
medspx/QGIS | tests/src/python/test_qgsencodingselectiondialog.py | 21 | 1175 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsEncodingSelectionDialog
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '21/11/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.gui import QgsEncodingSelectionDialog
from qgis.testing import start_app, unittest
start_app()
class TestQgsEncodingSelectionDialog(unittest.TestCase):
def testGettersSetters(self):
""" test dialog getters/setters """
dlg = qgis.gui.QgsEncodingSelectionDialog(encoding='UTF-16')
self.assertEqual(dlg.encoding(), 'UTF-16')
dlg.setEncoding('UTF-8')
self.assertEqual(dlg.encoding(), 'UTF-8')
# custom encoding option
dlg.setEncoding('trisolarian')
self.assertEqual(dlg.encoding(), 'trisolarian')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
SivilTaram/edx-platform | common/test/acceptance/pages/studio/html_component_editor.py | 115 | 1139 | from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from component_editor import ComponentEditorView
class HtmlComponentEditorView(ComponentEditorView):
"""
Represents the rendered view of an HTML component editor.
"""
def set_content_and_save(self, content):
"""
Types content into the html component and presses Save.
"""
self.set_content(content)
self.save()
def set_content_and_cancel(self, content):
"""
Types content into the html component and presses Cancel to abort the change.
"""
self.set_content(content)
self.cancel()
def set_content(self, content):
"""
Types content into the html component, leaving the component open.
"""
self.q(css='.edit-xblock-modal .editor-modes .editor-button').click()
editor = self.q(css=self._bounded_selector('.html-editor .mce-edit-area'))[0]
ActionChains(self.browser).click(editor).\
send_keys([Keys.CONTROL, 'a']).key_up(Keys.CONTROL).send_keys(content).perform()
| agpl-3.0 |
opendatateam/udata | udata/core/organization/commands.py | 2 | 1623 | import logging
import click
from udata.commands import cli, exit_with_error
from udata.models import GeoZone, Organization
log = logging.getLogger(__name__)
@cli.group('organizations')
def grp():
'''Organizations related operations'''
pass
@grp.command()
@click.argument('geoid', metavar='<geoid>')
@click.argument('organization_id_or_slug', metavar='<organization>')
def attach_zone(geoid, organization_id_or_slug):
'''Attach a zone <geoid> restricted to level for a given <organization>.'''
organization = Organization.objects.get_by_id_or_slug(
organization_id_or_slug)
if not organization:
log.error('No organization found for %s', organization_id_or_slug)
geozone = GeoZone.objects.get(id=geoid)
if not geozone:
log.error('No geozone found for %s', geoid)
log.info('Attaching {organization} with {geozone.name}'.format(
organization=organization, geozone=geozone))
organization.zone = geozone.id
organization.save()
log.info('Done')
@grp.command()
@click.argument('organization_id_or_slug', metavar='<organization>')
def detach_zone(organization_id_or_slug):
'''Detach the zone of a given <organization>.'''
organization = Organization.objects.get_by_id_or_slug(
organization_id_or_slug)
if not organization:
exit_with_error(
'No organization found for {0}'.format(organization_id_or_slug)
)
log.info('Detaching {organization} from {organization.zone}'.format(
organization=organization))
organization.zone = None
organization.save()
log.info('Done')
| agpl-3.0 |
Solinea/horizon | openstack_dashboard/dashboards/project/volumes/volumes/urls.py | 60 | 2451 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.volumes \
.volumes import views
from openstack_dashboard.dashboards.project.volumes.backups \
import views as backup_views
VIEWS_MOD = ('openstack_dashboard.dashboards.project.volumes.volumes.views')
urlpatterns = patterns(
VIEWS_MOD,
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<volume_id>[^/]+)/extend/$',
views.ExtendView.as_view(),
name='extend'),
url(r'^(?P<volume_id>[^/]+)/attach/$',
views.EditAttachmentsView.as_view(),
name='attach'),
url(r'^(?P<volume_id>[^/]+)/create_snapshot/$',
views.CreateSnapshotView.as_view(),
name='create_snapshot'),
url(r'^(?P<volume_id>[^/]+)/create_transfer/$',
views.CreateTransferView.as_view(),
name='create_transfer'),
url(r'^accept_transfer/$',
views.AcceptTransferView.as_view(),
name='accept_transfer'),
url(r'^(?P<transfer_id>[^/]+)/auth/(?P<auth_key>[^/]+)/$',
views.ShowTransferView.as_view(),
name='show_transfer'),
url(r'^(?P<volume_id>[^/]+)/create_backup/$',
backup_views.CreateBackupView.as_view(),
name='create_backup'),
url(r'^(?P<volume_id>[^/]+)/$',
views.DetailView.as_view(),
name='detail'),
url(r'^(?P<volume_id>[^/]+)/upload_to_image/$',
views.UploadToImageView.as_view(),
name='upload_to_image'),
url(r'^(?P<volume_id>[^/]+)/update/$',
views.UpdateView.as_view(),
name='update'),
url(r'^(?P<volume_id>[^/]+)/retype/$',
views.RetypeView.as_view(),
name='retype'),
url(r'^(?P<volume_id>[^/]+)/encryption_detail/$',
views.EncryptionDetailView.as_view(),
name='encryption_detail'),
)
| apache-2.0 |
jomolinare/kobocat | onadata/libs/utils/chart_tools.py | 3 | 5073 | import re
from onadata.libs.data.query import get_form_submissions_grouped_by_field
from onadata.libs.utils import common_tags
# list of fields we can chart
CHART_FIELDS = ['select one', 'integer', 'decimal', 'date', 'datetime',
'start', 'end', 'today']
# numeric, categorized
DATA_TYPE_MAP = {
'integer': 'numeric',
'decimal': 'numeric',
'datetime': 'time_based',
'date': 'time_based',
'start': 'time_based',
'end': 'time_based',
'today': 'time_based',
}
CHARTS_PER_PAGE = 20
POSTGRES_ALIAS_LENGTH = 63
timezone_re = re.compile(r'(.+)\+(\d+)')
def utc_time_string_for_javascript(date_string):
"""
Convert 2014-01-16T12:07:23.322+03 to 2014-01-16T12:07:23.322+03:00
Cant use datetime.str[fp]time here since python 2.7's %z is platform
dependant - http://stackoverflow.com/questions/2609259/converting-string-t\
o-datetime-object-in-python
"""
match = timezone_re.match(date_string)
if not match:
raise ValueError(
"{} fos not match the format 2014-01-16T12:07:23.322+03".format(
date_string))
date_time = match.groups()[0]
tz = match.groups()[1]
if len(tz) == 2:
tz += '00'
elif len(tz) != 4:
raise ValueError("len of {} must either be 2 or 4")
return "{}+{}".format(date_time, tz)
def get_choice_label(choices, string):
labels = []
if string:
for name in string.split(' '):
for choice in choices:
if choice['name'] == name:
labels.append(choice['label'])
break
return labels
def build_chart_data_for_field(xform, field, language_index=0):
# check if its the special _submission_time META
if isinstance(field, basestring) and field == common_tags.SUBMISSION_TIME:
field_label = 'Submission Time'
field_xpath = '_submission_time'
field_type = 'datetime'
else:
# TODO: merge choices with results and set 0's on any missing fields,
# i.e. they didn't have responses
# check if label is dict i.e. multilang
if isinstance(field.label, dict) and len(field.label.keys()) > 0:
languages = field.label.keys()
language_index = min(language_index, len(languages) - 1)
field_label = field.label[languages[language_index]]
else:
field_label = field.label or field.name
field_xpath = field.get_abbreviated_xpath()
field_type = field.type
data_type = DATA_TYPE_MAP.get(field_type, 'categorized')
field_name = field.name if not isinstance(field, basestring) else field
result = get_form_submissions_grouped_by_field(
xform, field_xpath, field_name)
# truncate field name to 63 characters to fix #354
truncated_name = field_name[0:POSTGRES_ALIAS_LENGTH]
truncated_name = truncated_name.encode('utf-8')
if data_type == 'categorized':
if result:
for item in result:
item[truncated_name] = get_choice_label(
field.children, item[truncated_name])
# replace truncated field names in the result set with the field name key
field_name = field_name.encode('utf-8')
for item in result:
if field_name != truncated_name:
item[field_name] = item[truncated_name]
del(item[truncated_name])
result = sorted(result, key=lambda d: d['count'])
# for date fields, strip out None values
if data_type == 'time_based':
result = [r for r in result if r.get(field_name) is not None]
# for each check if it matches the timezone regexp and convert for js
for r in result:
if timezone_re.match(r[field_name]):
try:
r[field_name] = utc_time_string_for_javascript(
r[field_name])
except ValueError:
pass
return {
'data': result,
'data_type': data_type,
'field_label': field_label,
'field_xpath': field_name,
'field_name': field_xpath.replace('/', '-'),
'field_type': field_type
}
def calculate_ranges(page, items_per_page, total_items):
"""Return the offset and end indices for a slice."""
# offset cannot be more than total_items
offset = min(page * items_per_page, total_items)
end = min(offset + items_per_page, total_items)
# returns the offset and the end for a slice
return offset, end
def build_chart_data(xform, language_index=0, page=0):
dd = xform.data_dictionary()
# only use chart-able fields
fields = filter(
lambda f: f.type in CHART_FIELDS, [e for e in dd.survey_elements])
# prepend submission time
fields[:0] = [common_tags.SUBMISSION_TIME]
# get chart data for fields within this `page`
start, end = calculate_ranges(page, CHARTS_PER_PAGE, len(fields))
fields = fields[start:end]
return [build_chart_data_for_field(xform, field, language_index)
for field in fields]
| bsd-2-clause |
glove747/liberty-neutron | neutron/agent/metadata/driver.py | 5 | 7456 | # Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from neutron.agent.common import config
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import namespaces
from neutron.agent.linux import external_process
from neutron.agent.linux import utils
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants
from neutron.common import exceptions
LOG = logging.getLogger(__name__)
# Access with redirection to metadata proxy iptables mark mask
METADATA_SERVICE_NAME = 'metadata-proxy'
class MetadataDriver(object):
def __init__(self, l3_agent):
self.metadata_port = l3_agent.conf.metadata_port
self.metadata_access_mark = l3_agent.conf.metadata_access_mark
registry.subscribe(
after_router_added, resources.ROUTER, events.AFTER_CREATE)
registry.subscribe(
before_router_removed, resources.ROUTER, events.BEFORE_DELETE)
@classmethod
def metadata_filter_rules(cls, port, mark):
return [('INPUT', '-m mark --mark %s/%s -j ACCEPT' %
(mark, constants.ROUTER_MARK_MASK)),
('INPUT', '-p tcp -m tcp --dport %s '
'-j DROP' % port)]
@classmethod
def metadata_mangle_rules(cls, mark):
return [('PREROUTING', '-d 169.254.169.254/32 '
'-i %(interface_name)s '
'-p tcp -m tcp --dport 80 '
'-j MARK --set-xmark %(value)s/%(mask)s' %
{'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+',
'value': mark,
'mask': constants.ROUTER_MARK_MASK})]
@classmethod
def metadata_nat_rules(cls, port):
return [('PREROUTING', '-d 169.254.169.254/32 '
'-i %(interface_name)s '
'-p tcp -m tcp --dport 80 -j REDIRECT '
'--to-port %(port)s' %
{'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+',
'port': port})]
@classmethod
def _get_metadata_proxy_user_group_watchlog(cls, conf):
user = conf.metadata_proxy_user or str(os.geteuid())
group = conf.metadata_proxy_group or str(os.getegid())
watch_log = conf.metadata_proxy_watch_log
if watch_log is None:
# NOTE(cbrandily): Commonly, log watching can be enabled only
# when metadata proxy user is agent effective user (id/name).
watch_log = utils.is_effective_user(user)
return user, group, watch_log
@classmethod
def _get_metadata_proxy_callback(cls, port, conf, network_id=None,
router_id=None):
uuid = network_id or router_id
if uuid is None:
raise exceptions.NetworkIdOrRouterIdRequiredError()
if network_id:
lookup_param = '--network_id=%s' % network_id
else:
lookup_param = '--router_id=%s' % router_id
def callback(pid_file):
metadata_proxy_socket = conf.metadata_proxy_socket
user, group, watch_log = (
cls._get_metadata_proxy_user_group_watchlog(conf))
proxy_cmd = ['neutron-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--metadata_proxy_socket=%s' % metadata_proxy_socket,
lookup_param,
'--state_path=%s' % conf.state_path,
'--metadata_port=%s' % port,
'--metadata_proxy_user=%s' % user,
'--metadata_proxy_group=%s' % group]
proxy_cmd.extend(config.get_log_args(
conf, 'neutron-ns-metadata-proxy-%s.log' % uuid,
metadata_proxy_watch_log=watch_log))
return proxy_cmd
return callback
@classmethod
def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
network_id=None, router_id=None):
uuid = network_id or router_id
callback = cls._get_metadata_proxy_callback(
port, conf, network_id=network_id, router_id=router_id)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name,
callback=callback)
pm.enable()
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
@classmethod
def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf):
monitor.unregister(uuid, METADATA_SERVICE_NAME)
# No need to pass ns name as it's not needed for disable()
pm = cls._get_metadata_proxy_process_manager(uuid, conf)
pm.disable()
@classmethod
def _get_metadata_proxy_process_manager(cls, router_id, conf, ns_name=None,
callback=None):
return external_process.ProcessManager(
conf=conf,
uuid=router_id,
namespace=ns_name,
default_cmd_callback=callback)
def after_router_added(resource, event, l3_agent, **kwargs):
router = kwargs['router']
proxy = l3_agent.metadata_driver
for c, r in proxy.metadata_filter_rules(proxy.metadata_port,
proxy.metadata_access_mark):
router.iptables_manager.ipv4['filter'].add_rule(c, r)
for c, r in proxy.metadata_mangle_rules(proxy.metadata_access_mark):
router.iptables_manager.ipv4['mangle'].add_rule(c, r)
for c, r in proxy.metadata_nat_rules(proxy.metadata_port):
router.iptables_manager.ipv4['nat'].add_rule(c, r)
router.iptables_manager.apply()
if not isinstance(router, ha_router.HaRouter):
proxy.spawn_monitored_metadata_proxy(
l3_agent.process_monitor,
router.ns_name,
proxy.metadata_port,
l3_agent.conf,
router_id=router.router_id)
def before_router_removed(resource, event, l3_agent, **kwargs):
router = kwargs['router']
proxy = l3_agent.metadata_driver
for c, r in proxy.metadata_filter_rules(proxy.metadata_port,
proxy.metadata_access_mark):
router.iptables_manager.ipv4['filter'].remove_rule(c, r)
for c, r in proxy.metadata_mangle_rules(proxy.metadata_access_mark):
router.iptables_manager.ipv4['mangle'].remove_rule(c, r)
for c, r in proxy.metadata_nat_rules(proxy.metadata_port):
router.iptables_manager.ipv4['nat'].remove_rule(c, r)
router.iptables_manager.apply()
proxy.destroy_monitored_metadata_proxy(l3_agent.process_monitor,
router.router['id'],
l3_agent.conf)
| apache-2.0 |
cesargtz/YecoraOdoo | addons/stock_dropshipping/wizard/stock_invoice_onshipping.py | 270 | 2139 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_invoice_onshipping(osv.osv_memory):
_inherit = "stock.invoice.onshipping"
def _get_journal_type(self, cr, uid, context=None):
if context is None:
context = {}
res_ids = context and context.get('active_ids', [])
pick_obj = self.pool.get('stock.picking')
pickings = pick_obj.browse(cr, uid, res_ids, context=context)
pick = pickings and pickings[0]
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
if src_usage == 'supplier' and dest_usage == 'customer':
pick_purchase = pick.move_lines and pick.move_lines[0].purchase_line_id and pick.move_lines[0].purchase_line_id.order_id.invoice_method == 'picking'
if pick_purchase:
return 'purchase'
else:
return 'sale'
else:
return super(stock_invoice_onshipping, self)._get_journal_type(cr, uid, context=context)
_defaults = {
'journal_type': _get_journal_type,
} | agpl-3.0 |
nelango/ViralityAnalysis | model/lib/textblob/utils.py | 17 | 1526 | # -*- coding: utf-8 -*-
import re
import string
PUNCTUATION_REGEX = re.compile('[{0}]'.format(re.escape(string.punctuation)))
def strip_punc(s, all=False):
"""Removes punctuation from a string.
:param s: The string.
:param all: Remove all punctuation. If False, only removes punctuation from
the ends of the string.
"""
if all:
return PUNCTUATION_REGEX.sub('', s.strip())
else:
return s.strip().strip(string.punctuation)
def lowerstrip(s, all=False):
"""Makes text all lowercase and strips punctuation and whitespace.
:param s: The string.
:param all: Remove all punctuation. If False, only removes punctuation from
the ends of the string.
"""
return strip_punc(s.lower().strip(), all=all)
def tree2str(tree, concat=' '):
"""Convert a nltk.tree.Tree to a string.
For example:
(NP a/DT beautiful/JJ new/JJ dashboard/NN) -> "a beautiful dashboard"
"""
return concat.join([word for (word, tag) in tree])
def filter_insignificant(chunk, tag_suffixes=('DT', 'CC', 'PRP$', 'PRP')):
"""Filter out insignificant (word, tag) tuples from a chunk of text."""
good = []
for word, tag in chunk:
ok = True
for suffix in tag_suffixes:
if tag.endswith(suffix):
ok = False
break
if ok:
good.append((word, tag))
return good
def is_filelike(obj):
"""Return whether ``obj`` is a file-like object."""
return hasattr(obj, 'read')
| mit |
alu042/edx-platform | common/djangoapps/contentserver/test/test_contentserver.py | 6 | 12992 | """
Tests for StaticContentServer
"""
import copy
import datetime
import ddt
import logging
import unittest
from uuid import uuid4
from django.conf import settings
from django.test.client import Client
from django.test.utils import override_settings
from mock import patch
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.xml_importer import import_course_from_xml
from contentserver.middleware import parse_range_header, HTTP_DATE_FORMAT, StaticContentServer
from student.models import CourseEnrollment
log = logging.getLogger(__name__)
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
@ddt.ddt
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class ContentStoreToyCourseTest(ModuleStoreTestCase):
"""
Tests that use the toy course.
"""
def setUp(self):
"""
Create user and login.
"""
self.staff_pwd = super(ContentStoreToyCourseTest, self).setUp()
self.staff_usr = self.user
self.non_staff_usr, self.non_staff_pwd = self.create_non_staff_user()
self.client = Client()
self.contentstore = contentstore()
store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) # pylint: disable=protected-access
self.course_key = store.make_course_key('edX', 'toy', '2012_Fall')
import_course_from_xml(
store, self.user.id, TEST_DATA_DIR, ['toy'],
static_content_store=self.contentstore, verbose=True
)
# A locked asset
self.locked_asset = self.course_key.make_asset_key('asset', 'sample_static.txt')
self.url_locked = unicode(self.locked_asset)
self.contentstore.set_attr(self.locked_asset, 'locked', True)
# An unlocked asset
self.unlocked_asset = self.course_key.make_asset_key('asset', 'another_static.txt')
self.url_unlocked = unicode(self.unlocked_asset)
self.length_unlocked = self.contentstore.get_attr(self.unlocked_asset, 'length')
def test_unlocked_asset(self):
"""
Test that unlocked assets are being served.
"""
self.client.logout()
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
def test_locked_asset_not_logged_in(self):
"""
Test that locked assets behave appropriately in case the user is not
logged in.
"""
self.client.logout()
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 403)
def test_locked_asset_not_registered(self):
"""
Test that locked assets behave appropriately in case user is logged in
in but not registered for the course.
"""
self.client.login(username=self.non_staff_usr, password=self.non_staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 403)
def test_locked_asset_registered(self):
"""
Test that locked assets behave appropriately in case user is logged in
and registered for the course.
"""
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password=self.non_staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
def test_locked_asset_staff(self):
"""
Test that locked assets behave appropriately in case user is staff.
"""
self.client.login(username=self.staff_usr, password=self.staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
def test_range_request_full_file(self):
"""
Test that a range request from byte 0 to last,
outputs partial content status code and valid Content-Range and Content-Length.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes=0-')
self.assertEqual(resp.status_code, 206) # HTTP_206_PARTIAL_CONTENT
self.assertEqual(
resp['Content-Range'],
'bytes {first}-{last}/{length}'.format(
first=0, last=self.length_unlocked - 1,
length=self.length_unlocked
)
)
self.assertEqual(resp['Content-Length'], str(self.length_unlocked))
def test_range_request_partial_file(self):
"""
Test that a range request for a partial file,
outputs partial content status code and valid Content-Range and Content-Length.
first_byte and last_byte are chosen to be simple but non trivial values.
"""
first_byte = self.length_unlocked / 4
last_byte = self.length_unlocked / 2
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=first_byte, last=last_byte))
self.assertEqual(resp.status_code, 206) # HTTP_206_PARTIAL_CONTENT
self.assertEqual(resp['Content-Range'], 'bytes {first}-{last}/{length}'.format(
first=first_byte, last=last_byte, length=self.length_unlocked))
self.assertEqual(resp['Content-Length'], str(last_byte - first_byte + 1))
def test_range_request_multiple_ranges(self):
"""
Test that multiple ranges in request outputs the full content.
"""
first_byte = self.length_unlocked / 4
last_byte = self.length_unlocked / 2
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}, -100'.format(
first=first_byte, last=last_byte))
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Content-Range', resp)
self.assertEqual(resp['Content-Length'], str(self.length_unlocked))
@ddt.data(
'bytes 0-',
'bits=0-',
'bytes=0',
'bytes=one-',
)
def test_syntax_errors_in_range(self, header_value):
"""
Test that syntactically invalid Range values result in a 200 OK full content response.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE=header_value)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Content-Range', resp)
def test_range_request_malformed_invalid_range(self):
"""
Test that a range request with malformed Range (first_byte > last_byte) outputs
416 Requested Range Not Satisfiable.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=(self.length_unlocked / 2), last=(self.length_unlocked / 4)))
self.assertEqual(resp.status_code, 416)
def test_range_request_malformed_out_of_bounds(self):
"""
Test that a range request with malformed Range (first_byte, last_byte == totalLength, offset by 1 error)
outputs 416 Requested Range Not Satisfiable.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=(self.length_unlocked), last=(self.length_unlocked)))
self.assertEqual(resp.status_code, 416)
@patch('contentserver.models.CourseAssetCacheTtlConfig.get_cache_ttl')
def test_cache_headers_with_ttl_unlocked(self, mock_get_cache_ttl):
"""
Tests that when a cache TTL is set, an unlocked asset will be sent back with
the correct cache control/expires headers.
"""
mock_get_cache_ttl.return_value = 10
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
self.assertIn('Expires', resp)
self.assertEquals('public, max-age=10, s-maxage=10', resp['Cache-Control'])
@patch('contentserver.models.CourseAssetCacheTtlConfig.get_cache_ttl')
def test_cache_headers_with_ttl_locked(self, mock_get_cache_ttl):
"""
Tests that when a cache TTL is set, a locked asset will be sent back without
any cache control/expires headers.
"""
mock_get_cache_ttl.return_value = 10
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password=self.non_staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Expires', resp)
self.assertEquals('private, no-cache, no-store', resp['Cache-Control'])
@patch('contentserver.models.CourseAssetCacheTtlConfig.get_cache_ttl')
def test_cache_headers_without_ttl_unlocked(self, mock_get_cache_ttl):
"""
Tests that when a cache TTL is not set, an unlocked asset will be sent back without
any cache control/expires headers.
"""
mock_get_cache_ttl.return_value = 0
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Expires', resp)
self.assertNotIn('Cache-Control', resp)
@patch('contentserver.models.CourseAssetCacheTtlConfig.get_cache_ttl')
def test_cache_headers_without_ttl_locked(self, mock_get_cache_ttl):
"""
Tests that when a cache TTL is not set, a locked asset will be sent back with a
cache-control header that indicates this asset should not be cached.
"""
mock_get_cache_ttl.return_value = 0
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password=self.non_staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Expires', resp)
self.assertEquals('private, no-cache, no-store', resp['Cache-Control'])
def test_get_expiration_value(self):
start_dt = datetime.datetime.strptime("Thu, 01 Dec 1983 20:00:00 GMT", HTTP_DATE_FORMAT)
near_expire_dt = StaticContentServer.get_expiration_value(start_dt, 55)
self.assertEqual("Thu, 01 Dec 1983 20:00:55 GMT", near_expire_dt)
def test_response_no_vary_header_unlocked(self):
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Vary', resp)
def test_response_no_vary_header_locked(self):
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password=self.non_staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Vary', resp)
@ddt.ddt
class ParseRangeHeaderTestCase(unittest.TestCase):
"""
Tests for the parse_range_header function.
"""
def setUp(self):
super(ParseRangeHeaderTestCase, self).setUp()
self.content_length = 10000
def test_bytes_unit(self):
unit, __ = parse_range_header('bytes=100-', self.content_length)
self.assertEqual(unit, 'bytes')
@ddt.data(
('bytes=100-', 1, [(100, 9999)]),
('bytes=1000-', 1, [(1000, 9999)]),
('bytes=100-199, 200-', 2, [(100, 199), (200, 9999)]),
('bytes=100-199, 200-499', 2, [(100, 199), (200, 499)]),
('bytes=-100', 1, [(9900, 9999)]),
('bytes=-100, -200', 2, [(9900, 9999), (9800, 9999)])
)
@ddt.unpack
def test_valid_syntax(self, header_value, excepted_ranges_length, expected_ranges):
__, ranges = parse_range_header(header_value, self.content_length)
self.assertEqual(len(ranges), excepted_ranges_length)
self.assertEqual(ranges, expected_ranges)
@ddt.data(
('bytes=one-20', ValueError, 'invalid literal for int()'),
('bytes=-one', ValueError, 'invalid literal for int()'),
('bytes=-', ValueError, 'invalid literal for int()'),
('bytes=--', ValueError, 'invalid literal for int()'),
('bytes', ValueError, 'Invalid syntax'),
('bytes=', ValueError, 'Invalid syntax'),
('bytes=0', ValueError, 'Invalid syntax'),
('bytes=0-10,0', ValueError, 'Invalid syntax'),
('bytes=0=', ValueError, 'too many values to unpack'),
)
@ddt.unpack
def test_invalid_syntax(self, header_value, exception_class, exception_message_regex):
self.assertRaisesRegexp(
exception_class, exception_message_regex, parse_range_header, header_value, self.content_length
)
| agpl-3.0 |
milmd90/TwitterBot | twitter/api.py | 3 | 173090 | #!/usr/bin/env python
#
# vim: sw=2 ts=2 sts=2
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python interface to the Twitter API"""
from __future__ import division
from __future__ import print_function
import sys
import gzip
import time
import base64
import re
import datetime
from calendar import timegm
import requests
from requests_oauthlib import OAuth1
import io
import warnings
from uuid import uuid4
from past.utils import old_div
try:
# python 3
from urllib.parse import urlparse, urlunparse, urlencode
from urllib.request import urlopen
from urllib.request import __version__ as urllib_version
except ImportError:
from urlparse import urlparse, urlunparse
from urllib2 import urlopen
from urllib import urlencode
from urllib import __version__ as urllib_version
from twitter import (__version__, _FileCache, json, DirectMessage, List,
Status, Trend, TwitterError, User, UserStatus)
from twitter.category import Category
from twitter.ratelimit import RateLimit
from twitter.twitter_utils import (
calc_expected_status_length,
is_url,
parse_media_file,
enf_type)
warnings.simplefilter('always', DeprecationWarning)
CHARACTER_LIMIT = 140
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
class Api(object):
"""A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print([s.text for s in statuses])
To use authentication, instantiate the twitter.Api class with a
consumer key and secret; and the oAuth key and secret:
>>> api = twitter.Api(consumer_key='twitter consumer key',
consumer_secret='twitter consumer secret',
access_token_key='the_key_given',
access_token_secret='the_key_secret')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print([u.name for u in users])
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print(status.text)
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetHomeTimeline()
>>> api.GetStatus(id)
>>> api.DestroyStatus(id)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.GetSentDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.LookupFriendship(user)
>>> api.VerifyCredentials()
"""
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token_key=None,
access_token_secret=None,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE,
shortner=None,
base_url=None,
stream_url=None,
upload_url=None,
chunk_size=1024*1024,
use_gzip_compression=False,
debugHTTP=False,
timeout=None,
sleep_on_rate_limit=False):
"""Instantiate a new twitter.Api object.
Args:
consumer_key:
Your Twitter user's consumer_key.
consumer_secret:
Your Twitter user's consumer_secret.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
input_encoding:
The encoding used to encode input strings. [Optional]
request_header:
A dictionary of additional HTTP request headers. [Optional]
cache:
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching. [Optional]
shortner:
The shortner instance to use. Defaults to None.
See shorten_url.py for an example shortner. [Optional]
base_url:
The base URL to use to contact the Twitter API.
Defaults to https://api.twitter.com. [Optional]
use_gzip_compression:
Set to True to tell enable gzip compression for any call
made to Twitter. Defaults to False. [Optional]
debugHTTP:
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False. [Optional]
timeout:
Set timeout (in seconds) of the http/https requests. If None the
requests lib default will be used. Defaults to None. [Optional]
"""
self.SetCache(cache)
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._input_encoding = input_encoding
self._use_gzip = use_gzip_compression
self._debugHTTP = debugHTTP
self._shortlink_size = 19
self._timeout = timeout
self.__auth = None
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
self.rate_limit = None
self.sleep_on_rate_limit = sleep_on_rate_limit
if base_url is None:
self.base_url = 'https://api.twitter.com/1.1'
else:
self.base_url = base_url
if stream_url is None:
self.stream_url = 'https://stream.twitter.com/1.1'
else:
self.stream_url = stream_url
if upload_url is None:
self.upload_url = 'https://upload.twitter.com/1.1'
else:
self.upload_url = upload_url
self.chunk_size = chunk_size
if self.chunk_size < 1024 * 16:
warnings.warn((
"A chunk size lower than 16384 may result in too many "
"requests to the Twitter API when uploading videos. You are "
"strongly advised to increase it above 16384"
))
if consumer_key is not None and (access_token_key is None or
access_token_secret is None):
print('Twitter now requires an oAuth Access Token for API calls. '
'If you\'re using this library from a command line utility, '
'please run the included get_access_token.py tool to '
'generate one.', file=sys.stderr)
raise TwitterError({'message': "Twitter requires oAuth Access Token for all API access"})
self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret)
if debugHTTP:
import logging
import http.client
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def SetCredentials(self,
consumer_key,
consumer_secret,
access_token_key=None,
access_token_secret=None):
"""Set the consumer_key and consumer_secret for this instance
Args:
consumer_key:
The consumer_key of the twitter account.
consumer_secret:
The consumer_secret for the twitter account.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
"""
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token_key = access_token_key
self._access_token_secret = access_token_secret
auth_list = [consumer_key, consumer_secret,
access_token_key, access_token_secret]
if all(auth_list):
self.__auth = OAuth1(consumer_key, consumer_secret,
access_token_key, access_token_secret)
self._config = None
def GetHelpConfiguration(self):
if self._config is None:
url = '%s/help/configuration.json' % self.base_url
resp = self._RequestUrl(url, 'GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
self._config = data
return self._config
def GetShortUrlLength(self, https=False):
config = self.GetHelpConfiguration()
if https:
return config['short_url_length_https']
else:
return config['short_url_length']
def ClearCredentials(self):
"""Clear any credentials for this instance
"""
self._consumer_key = None
self._consumer_secret = None
self._access_token_key = None
self._access_token_secret = None
self.__auth = None # for request upgrade
def GetSearch(self,
term=None,
raw_query=None,
geocode=None,
since_id=None,
max_id=None,
until=None,
since=None,
count=15,
lang=None,
locale=None,
result_type="mixed",
include_entities=None):
"""Return twitter search results for a given term. You must specify one
of term, geocode, or raw_query.
Args:
term (str, optional):
Term to search by. Optional if you include geocode.
raw_query (str, optional):
A raw query as a string. This should be everything after the "?" in
the URL (i.e., the query parameters). You are responsible for all
type checking and ensuring that the query string is properly
formatted, as it will only be URL-encoded before be passed directly
to Twitter with no other checks performed. For advanced usage only.
since_id (int, optional):
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID.
until (str, optional):
Returns tweets generated before the given date. Date should be
formatted as YYYY-MM-DD.
since (str, optional):
Returns tweets generated since the given date. Date should be
formatted as YYYY-MM-DD.
geocode (str or list or tuple, optional):
Geolocation within which to search for tweets. Can be either a
string in the form of "latitude,longitude,radius" where latitude
and longitude are floats and radius is a string such as "1mi" or
"1km" ("mi" or "km" are the only units allowed). For example:
>>> api.GetSearch(geocode="37.781157,-122.398720,1mi").
Otherwise, you can pass a list of either floats or strings for
lat/long and a string for radius:
>>> api.GetSearch(geocode=[37.781157, -122.398720, "1mi"])
>>> # or:
>>> api.GetSearch(geocode=(37.781157, -122.398720, "1mi"))
>>> # or:
>>> api.GetSearch(geocode=("37.781157", "-122.398720", "1mi"))
count (int, optional):
Number of results to return. Default is 15 and maxmimum that
Twitter returns is 100 irrespective of what you type in.
lang (str, optional):
Language for results as ISO 639-1 code. Default is None
(all languages).
locale (str, optional):
Language of the search query. Currently only 'ja' is effective.
This is intended for language-specific consumers and the default
should work in the majority of cases.
result_type (str, optional):
Type of result which should be returned. Default is "mixed".
Valid options are "mixed, "recent", and "popular".
include_entities (bool, optional):
If True, each tweet will include a node called "entities".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and
hashtags.
Returns:
list: A sequence of twitter.Status instances, one for each message
containing the term, within the bounds of the geocoded area, or
given by the raw_query.
"""
url = '%s/search/tweets.json' % self.base_url
parameters = {}
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if until:
parameters['until'] = enf_type('until', str, until)
if since:
parameters['since'] = enf_type('since', str, since)
if lang:
parameters['lang'] = enf_type('lang', str, lang)
if locale:
parameters['locale'] = enf_type('locale', str, locale)
if term is None and geocode is None and raw_query is None:
return []
if term is not None:
parameters['q'] = term
if geocode is not None:
if isinstance(geocode, list) or isinstance(geocode, tuple):
parameters['geocode'] = ','.join([str(geo) for geo in geocode])
else:
parameters['geocode'] = enf_type('geocode', str, geocode)
if include_entities:
parameters['include_entities'] = enf_type('include_entities',
bool,
include_entities)
parameters['count'] = enf_type('count', int, count)
if result_type in ["mixed", "popular", "recent"]:
parameters['result_type'] = result_type
if raw_query is not None:
url = "{url}?{raw_query}".format(
url=url,
raw_query=raw_query)
resp = self._RequestUrl(url, 'GET')
else:
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data.get('statuses', '')]
def GetUsersSearch(self,
term=None,
page=1,
count=20,
include_entities=None):
"""Return twitter user search results for a given term.
Args:
term:
Term to search by.
page:
Page of results to return. Default is 1
[Optional]
count:
Number of results to return. Default is 20
[Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and hashtags.
[Optional]
Returns:
A sequence of twitter.User instances, one for each message containing
the term
"""
# Build request parameters
parameters = {}
if term is not None:
parameters['q'] = term
if page != 1:
parameters['page'] = page
if include_entities:
parameters['include_entities'] = 1
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
# Make and send requests
url = '%s/users/search.json' % self.base_url
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [User.NewFromJsonDict(x) for x in data]
def GetTrendsCurrent(self, exclude=None):
"""Get the current top trending topics (global)
Args:
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
"""
return self.GetTrendsWoeid(id=1, exclude=exclude)
def GetTrendsWoeid(self, id, exclude=None):
"""Return the top 10 trending topics for a specific WOEID, if trending
information is available for it.
Args:
woeid:
the Yahoo! Where On Earth ID for a location.
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
"""
url = '%s/trends/place.json' % (self.base_url)
parameters = {'id': id}
if exclude:
parameters['exclude'] = exclude
resp = self._RequestUrl(url, verb='GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
trends = []
timestamp = data[0]['as_of']
for trend in data[0]['trends']:
trends.append(Trend.NewFromJsonDict(trend, timestamp=timestamp))
return trends
def GetUserSuggestionCategories(self):
""" Return the list of suggested user categories, this can be used in
GetUserSuggestion function
Returns:
A list of categories
"""
url = '%s/users/suggestions.json' % (self.base_url)
resp = self._RequestUrl(url, verb='GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
categories = []
for category in data:
categories.append(Category.NewFromJsonDict(category))
return categories
def GetUserSuggestion(self, category):
""" Returns a list of users in a category
Args:
category:
The Category object to limit the search by
Returns:
A list of users in that category
"""
url = '%s/users/suggestions/%s.json' % (self.base_url, category.Slug)
resp = self._RequestUrl(url, verb='GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
users = []
for user in data['users']:
users.append(User.NewFromJsonDict(user))
return users
def GetHomeTimeline(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
exclude_replies=False,
contributor_details=False,
include_entities=True):
"""Fetch a collection of the most recent Tweets and retweets posted
by the authenticating user and the users they follow.
The home timeline is central to how most users interact with Twitter.
Args:
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. Defaults to 20. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
When True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object. [Optional]
exclude_replies:
This parameter will prevent replies from appearing in the
returned timeline. Using exclude_replies with the count
parameter will mean you will receive up-to count tweets -
this is because the count parameter retrieves that many
tweets before filtering out retweets and replies. [Optional]
contributor_details:
This parameter enhances the contributors element of the
status response to include the screen_name of the contributor.
By default only the user_id of the contributor is included. [Optional]
include_entities:
The entities node will be disincluded when set to false.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
"""
url = '%s/statuses/home_timeline.json' % self.base_url
parameters = {}
if count is not None:
try:
if int(count) > 200:
raise TwitterError({'message': "'count' may not be greater than 200"})
except ValueError:
raise TwitterError({'message': "'count' must be an integer"})
parameters['count'] = count
if since_id:
try:
parameters['since_id'] = int(since_id)
except ValueError:
raise TwitterError({'message': "'since_id' must be an integer"})
if max_id:
try:
parameters['max_id'] = int(max_id)
except ValueError:
raise TwitterError({'message': "'max_id' must be an integer"})
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
if contributor_details:
parameters['contributor_details'] = 1
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
user_id=None,
screen_name=None,
since_id=None,
max_id=None,
count=None,
include_rts=True,
trim_user=None,
exclude_replies=None):
"""Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
user_id:
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name. [Optional]
screen_name:
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [Optional]
include_rts:
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets. [Optional]
trim_user:
If True, statuses will only contain the numerical user ID only.
Otherwise a full user object will be returned for each status.
[Optional]
exclude_replies:
If True, this will prevent replies from appearing in the returned
timeline. Using exclude_replies with the count parameter will mean you
will receive up-to count tweets - this is because the count parameter
retrieves that many tweets before filtering out retweets and replies.
This parameter is only supported for JSON and XML responses. [Optional]
Returns:
A sequence of Status instances, one for each message up to count
"""
parameters = {}
url = '%s/statuses/user_timeline.json' % (self.base_url)
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
if since_id:
try:
parameters['since_id'] = int(since_id)
except ValueError:
raise TwitterError({'message': "since_id must be an integer"})
if max_id:
try:
parameters['max_id'] = int(max_id)
except ValueError:
raise TwitterError({'message': "max_id must be an integer"})
if count:
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
if not include_rts:
parameters['include_rts'] = 0
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self,
id,
trim_user=False,
include_my_retweet=True,
include_entities=True):
"""Returns a single status message, specified by the id parameter.
Args:
id:
The numeric ID of the status you are trying to retrieve.
trim_user:
When set to True, each tweet returned in a timeline will include
a user object including only the status authors numerical ID.
Omit this parameter to receive the complete user object. [Optional]
include_my_retweet:
When set to True, any Tweets returned that have been retweeted by
the authenticating user will include an additional
current_user_retweet node, containing the ID of the source status
for the retweet. [Optional]
include_entities:
If False, the entities node will be disincluded.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A twitter.Status instance representing that status message
"""
url = '%s/statuses/show.json' % (self.base_url)
parameters = {}
try:
parameters['id'] = int(id)
except ValueError:
raise TwitterError({'message': "'id' must be an integer."})
if trim_user:
parameters['trim_user'] = 1
if include_my_retweet:
parameters['include_my_retweet'] = 1
if not include_entities:
parameters['include_entities'] = 'none'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetStatusOembed(self,
id=None,
url=None,
maxwidth=None,
hide_media=False,
hide_thread=False,
omit_script=False,
align=None,
related=None,
lang=None):
"""Returns information allowing the creation of an embedded representation of a
Tweet on third party sites.
Specify tweet by the id or url parameter.
Args:
id:
The numeric ID of the status you are trying to embed.
url:
The url of the status you are trying to embed.
maxwidth:
The maximum width in pixels that the embed should be rendered at.
This value is constrained to be between 250 and 550 pixels. [Optional]
hide_media:
Specifies whether the embedded Tweet should automatically expand images. [Optional]
hide_thread:
Specifies whether the embedded Tweet should automatically show the original
message in the case that the embedded Tweet is a reply. [Optional]
omit_script:
Specifies whether the embedded Tweet HTML should include a <script>
element pointing to widgets.js. [Optional]
align:
Specifies whether the embedded Tweet should be left aligned, right aligned,
or centered in the page. [Optional]
related:
A comma sperated string of related screen names. [Optional]
lang:
Language code for the rendered embed. [Optional]
Returns:
A dictionary with the response.
"""
request_url = '%s/statuses/oembed.json' % (self.base_url)
parameters = {}
if id is not None:
try:
parameters['id'] = int(id)
except ValueError:
raise TwitterError({'message': "'id' must be an integer."})
elif url is not None:
parameters['url'] = url
else:
raise TwitterError({'message': "Must specify either 'id' or 'url'"})
if maxwidth is not None:
parameters['maxwidth'] = maxwidth
if hide_media is True:
parameters['hide_media'] = 'true'
if hide_thread is True:
parameters['hide_thread'] = 'true'
if omit_script is True:
parameters['omit_script'] = 'true'
if align is not None:
if align not in ('left', 'center', 'right', 'none'):
raise TwitterError({'message': "'align' must be 'left', 'center', 'right', or 'none'"})
parameters['align'] = align
if related:
if not isinstance(related, str):
raise TwitterError({'message': "'related' should be a string of comma separated screen names"})
parameters['related'] = related
if lang is not None:
if not isinstance(lang, str):
raise TwitterError({'message': "'lang' should be string instance"})
parameters['lang'] = lang
resp = self._RequestUrl(request_url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return data
def DestroyStatus(self, id, trim_user=False):
"""Destroys the status specified by the required ID parameter.
The authenticating user must be the author of the specified
status.
Args:
id:
The numerical ID of the status you're trying to destroy.
Returns:
A twitter.Status instance representing the destroyed status message
"""
try:
post_data = {'id': int(id)}
except ValueError:
raise TwitterError({'message': "id must be an integer"})
url = '%s/statuses/destroy/%s.json' % (self.base_url, id)
if trim_user:
post_data['trim_user'] = 1
resp = self._RequestUrl(url, 'POST', data=post_data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def PostUpdate(self,
status,
media=None,
media_additional_owners=None,
media_category=None,
in_reply_to_status_id=None,
latitude=None,
longitude=None,
place_id=None,
display_coordinates=False,
trim_user=False,
verify_status_length=True):
"""Post a twitter status message from the authenticated user.
https://dev.twitter.com/docs/api/1.1/post/statuses/update
Args:
status:
The message text to be posted. Must be less than or equal to 140
characters.
media:
A URL, a local file, or a file-like object (something with a read()
method), or a list of any combination of the above.
media_additional_owners:
A list of user ids representing Twitter users that should be able
to use the uploaded media in their tweets. If you pass a list of
media, then additional_owners will apply to each object. If you
need more granular control, please use the UploadMedia* methods.
media_category:
Only for use with the AdsAPI. See
https://dev.twitter.com/ads/creative/promoted-video-overview if
this applies to your application.
in_reply_to_status_id:
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored. [Optional]
latitude:
Latitude coordinate of the tweet in degrees. Will only work
in conjunction with longitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting. [Optional]
longitude:
Longitude coordinate of the tweet in degrees. Will only work
in conjunction with latitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting. [Optional]
place_id:
A place in the world. These IDs can be retrieved from
GET geo/reverse_geocode. [Optional]
display_coordinates:
Whether or not to put a pin on the exact coordinates a tweet
has been sent from. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
verify_status_length:
If True, api throws a hard error that the status is over
140 characters. If False, Api will attempt to post the
status. [Optional]
Returns:
A twitter.Status instance representing the message posted.
"""
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, str) or self._input_encoding is None:
u_status = status
else:
u_status = str(status, self._input_encoding)
if verify_status_length and calc_expected_status_length(u_status) > 140:
raise TwitterError("Text must be less than or equal to 140 characters.")
parameters = {'status': u_status}
if media:
media_ids = []
if isinstance(media, int):
media_ids.append(media)
elif isinstance(media, list):
for media_file in media:
# If you want to pass just a media ID, it should be an int
if isinstance(media_file, int):
media_ids.append(media_file)
continue
_, _, file_size, media_type = parse_media_file(media_file)
if media_type == 'image/gif' or media_type == 'video/mp4':
raise TwitterError(
'You cannot post more than 1 GIF or 1 video in a '
'single status.')
if file_size > self.chunk_size:
media_id = self.UploadMediaChunked(
media=media_file,
additional_owners=media_additional_owners,
media_category=media_category)
else:
media_id = self.UploadMediaSimple(
media=media_file,
additional_owners=media_additional_owners,
media_category=media_category)
media_ids.append(media_id)
else:
_, _, file_size, _ = parse_media_file(media)
if file_size > self.chunk_size:
media_ids = self.UploadMediaChunked(
media,
media_additional_owners)
else:
media_ids.append(
self.UploadMediaSimple(media,
media_additional_owners))
parameters['media_ids'] = ','.join([str(mid) for mid in media_ids])
if in_reply_to_status_id:
parameters['in_reply_to_status_id'] = in_reply_to_status_id
if latitude is not None and longitude is not None:
parameters['lat'] = str(latitude)
parameters['long'] = str(longitude)
if place_id is not None:
parameters['place_id'] = str(place_id)
if display_coordinates:
parameters['display_coordinates'] = 'true'
if trim_user:
parameters['trim_user'] = 'true'
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def UploadMediaSimple(self,
media,
additional_owners=None,
media_category=None):
""" Upload a media file to Twitter in one request. Used for small file
uploads that do not require chunked uploads.
Args:
media:
File-like object to upload.
additional_owners: additional Twitter users that are allowed to use
The uploaded media. Should be a list of integers. Maximum
number of additional owners is capped at 100 by Twitter.
media_category:
Category with which to identify media upload. Only use with Ads
API & video files.
Returns:
media_id:
ID of the uploaded media returned by the Twitter API or 0.
"""
url = '%s/media/upload.json' % self.upload_url
parameters = {}
media_fp, filename, file_size, media_type = parse_media_file(media)
parameters['media'] = media_fp.read()
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
parameters['additional_owners'] = additional_owners
if media_category:
parameters['media_category'] = media_category
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
return data['media_id']
except KeyError:
raise TwitterError({'message': 'Media could not be uploaded.'})
def UploadMediaChunked(self,
media,
additional_owners=None,
media_category=None):
""" Upload a media file to Twitter in multiple requests.
Args:
media:
File-like object to upload.
additional_owners: additional Twitter users that are allowed to use
The uploaded media. Should be a list of integers. Maximum
number of additional owners is capped at 100 by Twitter.
media_category:
Category with which to identify media upload. Only use with Ads
API & video files.
Returns:
media_id:
ID of the uploaded media returned by the Twitter API or 0.
"""
url = '%s/media/upload.json' % self.upload_url
media_fp, filename, file_size, media_type = parse_media_file(media)
if not all([media_fp, filename, file_size, media_type]):
raise TwitterError({'message': 'Could not process media file'})
parameters = {}
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
parameters['additional_owners'] = additional_owners
if media_category:
parameters['media_category'] = media_category
# INIT doesn't read in any data. It's purpose is to prepare Twitter to
# receive the content in APPEND requests.
parameters['command'] = 'INIT'
parameters['media_type'] = media_type
parameters['total_bytes'] = file_size
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
media_id = data['media_id']
except KeyError:
raise TwitterError({'message': 'Media could not be uploaded'})
boundary = bytes("--{0}".format(uuid4()), 'utf-8')
media_id_bytes = bytes(str(media_id).encode('utf-8'))
headers = {'Content-Type': 'multipart/form-data; boundary={0}'.format(
str(boundary[2:], 'utf-8'))}
segment_id = 0
while True:
try:
data = media_fp.read(self.chunk_size)
except ValueError:
break
if not data:
break
body = [
boundary,
b'Content-Disposition: form-data; name="command"',
b'',
b'APPEND',
boundary,
b'Content-Disposition: form-data; name="media_id"',
b'',
media_id_bytes,
boundary,
b'Content-Disposition: form-data; name="segment_index"',
b'',
bytes(str(segment_id).encode('utf-8')),
boundary,
bytes('Content-Disposition: form-data; name="media"; filename="{0}"'.format(filename), 'utf-8'),
b'Content-Type: application/octet-stream',
b'',
data,
boundary + b'--'
]
body_data = b'\r\n'.join(body)
headers['Content-Length'] = str(len(body_data))
resp = self._RequestChunkedUpload(url=url,
headers=headers,
data=body_data)
# The body of the response should be blank, but the normal decoding
# raises a JSONDecodeError, so we should only do error checking
# if the response is not blank.
if resp.content.decode('utf-8'):
return self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
segment_id += 1
try:
media_fp.close()
except:
pass
# Finalizing the upload:
parameters = {
'command': 'FINALIZE',
'media_id': media_id
}
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
return data['media_id']
except KeyError:
raise TwitterError({'message': 'Media could not be uploaded.'})
def PostMedia(self,
status,
media,
possibly_sensitive=None,
in_reply_to_status_id=None,
latitude=None,
longitude=None,
place_id=None,
display_coordinates=False):
"""Post a twitter status message from the user with a picture attached.
Args:
status:
the text of your update
media:
This can be the location of media(PNG, JPG, GIF) on the local file
system or at an HTTP URL, it can also be a file-like object
possibly_sensitive:
set true if content is "advanced." [Optional]
in_reply_to_status_id:
ID of a status that this is in reply to. [Optional]
lat:
latitude of location. [Optional]
long:
longitude of location. [Optional]
place_id:
A place in the world identified by a Twitter place ID. [Optional]
display_coordinates:
Set true if you want to display coordinates. [Optional]
Returns:
A twitter.Status instance representing the message posted.
"""
warnings.warn((
"This endpoint has been deprecated by Twitter. Please use "
"PostUpdate() instead. Details of Twitter's deprecation can be "
"found at: "
"dev.twitter.com/rest/reference/post/statuses/update_with_media"),
DeprecationWarning)
url = '%s/statuses/update_with_media.json' % self.base_url
if isinstance(status, str) or self._input_encoding is None:
u_status = status
else:
u_status = str(status, self._input_encoding)
data = {'status': u_status}
if not hasattr(media, 'read'):
if media.startswith('http'):
data['media'] = urlopen(media).read()
else:
with open(str(media), 'rb') as f:
data['media'] = f.read()
else:
data['media'] = media.read()
if possibly_sensitive:
data['possibly_sensitive'] = 'true'
if in_reply_to_status_id:
data['in_reply_to_status_id'] = str(in_reply_to_status_id)
if latitude is not None and longitude is not None:
data['lat'] = str(latitude)
data['long'] = str(longitude)
if place_id is not None:
data['place_id'] = str(place_id)
if display_coordinates:
data['display_coordinates'] = 'true'
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def PostMultipleMedia(self, status, media, possibly_sensitive=None,
in_reply_to_status_id=None, latitude=None,
longitude=None, place_id=None,
display_coordinates=False):
"""
Post a twitter status message from the authenticated user with
multiple pictures attached.
Args:
status:
the text of your update
media:
location of multiple media elements(PNG, JPG, GIF)
possibly_sensitive:
set true is content is "advanced"
in_reply_to_status_id:
ID of a status that this is in reply to
lat:
location in latitude
long:
location in longitude
place_id:
A place in the world identified by a Twitter place ID
display_coordinates:
Returns:
A twitter.Status instance representing the message posted.
"""
warnings.warn((
"This method is deprecated. Please use PostUpdate instead, "
"passing a list of media that you would like to associate "
"with the updated."), DeprecationWarning, stacklevel=2)
if type(media) is not list:
raise TwitterError("Must by multiple media elements")
if media.__len__() > 4:
raise TwitterError("Maximum of 4 media elements can be allocated to a tweet")
url = '%s/media/upload.json' % self.upload_url
if isinstance(status, str) or self._input_encoding is None:
u_status = status
else:
u_status = str(status, self._input_encoding)
media_ids = ''
for m in range(0, len(media)):
data = {}
if not hasattr(media[m], 'read'):
if media[m].startswith('http'):
data['media'] = urlopen(media[m]).read()
else:
data['media'] = open(str(media[m]), 'rb').read()
else:
data['media'] = media[m].read()
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
media_ids += str(data['media_id_string'])
if m is not len(media) - 1:
media_ids += ","
data = {'status': u_status, 'media_ids': media_ids}
url = '%s/statuses/update.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def _TweetTextWrap(self,
status,
char_lim=140):
if not self._config:
self.GetHelpConfiguration()
tweets = []
line = []
line_length = 0
words = re.split(r'\s', status)
if len(words) == 1 and not is_url(words):
if len(words[0]) > 140:
raise TwitterError({"message": "Unable to split status into tweetable parts. Word was: {0}/{1}".format(len(words[0]), char_lim)})
else:
tweets.append(words[0])
return tweets
for word in words:
if len(word) > char_lim:
raise TwitterError({"message": "Unable to split status into tweetable parts. Word was: {0}/{1}".format(len(word), char_lim)})
new_len = line_length
if is_url(word):
new_len = line_length + self._config['short_url_length_https'] + 1
else:
new_len += len(word) + 1
if new_len > 140:
tweets.append(' '.join(line))
line = [word]
line_length = new_len - line_length
else:
line.append(word)
line_length = new_len
tweets.append(' '.join(line))
return tweets
def PostUpdates(self,
status,
continuation=None,
**kwargs):
"""Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than 140 characters.
Args:
status:
The message text to be posted.
May be longer than 140 characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
"""
results = list()
if continuation is None:
continuation = ''
char_limit = CHARACTER_LIMIT - len(continuation)
tweets = self._TweetTextWrap(status=status, char_lim=char_limit)
if len(tweets) == 1:
results.append(self.PostUpdate(status=tweets[0]))
return results
for tweet in tweets[0:-1]:
print('tweeting', tweet)
results.append(self.PostUpdate(status=tweet + continuation, **kwargs))
results.append(self.PostUpdate(status=tweets[-1], **kwargs))
return results
def PostRetweet(self, original_id, trim_user=False):
"""Retweet a tweet with the Retweet API.
Args:
original_id:
The numerical id of the tweet that will be retweeted
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A twitter.Status instance representing the original tweet with retweet details embedded.
"""
try:
if int(original_id) <= 0:
raise TwitterError({'message': "'original_id' must be a positive number"})
except ValueError:
raise TwitterError({'message': "'original_id' must be an integer"})
url = '%s/statuses/retweet/%s.json' % (self.base_url, original_id)
data = {'id': original_id}
if trim_user:
data['trim_user'] = 'true'
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetUserRetweets(self,
count=None,
since_id=None,
max_id=None,
trim_user=False):
"""Fetch the sequence of retweets made by the authenticated user.
Args:
count:
The number of status messages to retrieve. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each message up to count
"""
return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user,
exclude_replies=True, include_rts=True)
def GetReplies(self,
since_id=None,
count=None,
max_id=None,
trim_user=False):
"""Get a sequence of status messages representing the 20 most
recent replies (status updates prefixed with @twitterID) to the
authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
"""
return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user,
exclude_replies=False, include_rts=False)
def GetRetweets(self,
statusid,
count=None,
trim_user=False):
"""Returns up to 100 of the first retweets of the tweet identified
by statusid
Args:
statusid:
The ID of the tweet for which retweets should be searched for
count:
The number of status messages to retrieve. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A list of twitter.Status instances, which are retweets of statusid
"""
url = '%s/statuses/retweets/%s.json' % (self.base_url, statusid)
parameters = {}
if trim_user:
parameters['trim_user'] = 'true'
if count:
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(s) for s in data]
def GetRetweeters(self,
status_id,
cursor=None,
stringify_ids=None):
"""Returns a collection of up to 100 user IDs belonging to users who have
retweeted the tweet specified by the status_id parameter.
Args:
status_id:
the tweet's numerical ID
cursor:
breaks the ids into pages of no more than 100.
stringify_ids:
returns the IDs as unicode strings. [Optional]
Returns:
A list of user IDs
"""
url = '%s/statuses/retweeters/ids.json' % (self.base_url)
parameters = {}
parameters['id'] = status_id
if stringify_ids:
parameters['stringify_ids'] = 'true'
result = []
total_count = 0
while True:
if cursor:
try:
parameters['count'] = int(cursor)
except ValueError:
raise TwitterError({'message': "cursor must be an integer"})
break
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def GetRetweetsOfMe(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
include_entities=True,
include_user_entities=True):
"""Returns up to 100 of the most recent tweets of the user that have been
retweeted by others.
Args:
count:
The number of retweets to retrieve, up to 100.
Defaults to 20. [Optional]
since_id:
Returns results with an ID greater than
(newer than) this ID. [Optional]
max_id:
Returns results with an ID less than or equal
to this ID. [Optional]
trim_user:
When True, the user object for each tweet will
only be an ID. [Optional]
include_entities:
When True, the tweet entities will be included. [Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
"""
url = '%s/statuses/retweets_of_me.json' % self.base_url
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError({'message': "'count' may not be greater than 100"})
except ValueError:
raise TwitterError({'message': "'count' must be an integer"})
if count:
parameters['count'] = count
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if trim_user:
parameters['trim_user'] = trim_user
if not include_entities:
parameters['include_entities'] = include_entities
if not include_user_entities:
parameters['include_user_entities'] = include_user_entities
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(s) for s in data]
def GetBlocksPaged(self,
cursor=-1,
skip_status=False,
include_user_entities=False):
""" Fetch a page of the users (as twitter.User instances)
blocked by the currently authenticated user.
Args:
cursor:
Should be set to -1 if you want the first page, thereafter denotes
the page of blocked users that you want to return.
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, list of twitter.User instances,
one for each blocked user.
"""
url = '%s/blocks/list.json' % self.base_url
result = []
parameters = {}
if skip_status:
parameters['skip_status'] = True
if include_user_entities:
parameters['include_user_entities'] = True
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
result += [User.NewFromJsonDict(x) for x in data['users']]
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
return next_cursor, previous_cursor, result
def GetBlocks(self,
skip_status=False,
include_user_entities=False):
""" Fetch the sequence of all users (as twitter.User instances),
blocked by the currently authenticated user.
Args:
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A list of twitter.User instances, one for each blocked user.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, users = self.GetBlocksPaged(
cursor=cursor,
skip_status=skip_status,
include_user_entities=include_user_entities)
result += users
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetBlocksIDsPaged(self,
cursor=-1,
skip_status=None,
include_user_entities=None):
""" Fetch a page of the user IDs (integers) blocked by the currently
authenticated user.
Args:
cursor:
Should be set to -1 if you want the first page, thereafter denotes
the page of blocked users that you want to return.
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, list of user IDs of blocked users.
"""
url = '%s/blocks/ids.json' % self.base_url
parameters = {}
if skip_status:
parameters['skip_status'] = True
if include_user_entities:
parameters['include_user_entities'] = True
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
user_ids = data.get('ids', [])
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
return next_cursor, previous_cursor, user_ids
def GetBlocksIDs(self,
skip_status=None,
include_user_entities=None):
""" Fetch the sequence of all users (as integer user ids),
blocked by the currently authenticated user.
Args:
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A list of user IDs for all blocked users.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, user_ids = self.GetBlocksIDsPaged(
cursor=cursor,
skip_status=skip_status,
include_user_entities=include_user_entities)
result += user_ids
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def DestroyBlock(self, id, trim_user=False):
"""Destroys the block for the user specified by the required ID
parameter.
The authenticating user must have blocked the user specified by the
required ID parameter.
Args:
id:
The numerical ID of the user to be un-blocked.
Returns:
A twitter.User instance representing the un-blocked user.
"""
try:
post_data = {'user_id': int(id)}
except ValueError:
raise TwitterError({'message': "id must be an integer"})
url = '%s/blocks/destroy.json' % (self.base_url)
if trim_user:
post_data['trim_user'] = 1
resp = self._RequestUrl(url, 'POST', data=post_data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def _GetIDsPaged(self,
url,
user_id,
screen_name,
cursor,
stringify_ids,
count):
"""
This is the lowest level paging logic for fetching IDs. It is used
solely by GetFollowerIDsPaged and GetFriendIDsPaged. It is not intended
for other use.
See GetFollowerIDsPaged or GetFriendIDsPaged for an explanation of the
input arguments.
"""
result = []
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if count is not None:
parameters['count'] = count
parameters['stringify_ids'] = stringify_ids
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if 'ids' in data:
result.extend([x for x in data['ids']])
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
return next_cursor, previous_cursor, result
def GetFollowerIDsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
stringify_ids=False,
count=5000):
"""Make a cursor driven call to return a list of one page followers.
The caller is responsible for handling the cursor value and looping
to gather all of the data
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of user ids,
one for each follower
"""
url = '%s/followers/ids.json' % self.base_url
return self._GetIDsPaged(url,
user_id,
screen_name,
cursor,
stringify_ids,
count)
def GetFriendIDsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
stringify_ids=False,
count=5000):
"""Make a cursor driven call to return the list of all friends
The caller is responsible for handling the cursor value and looping
to gather all of the data
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User instances,
one for each friend
"""
url = '%s/friends/ids.json' % self.base_url
return self._GetIDsPaged(url,
user_id,
screen_name,
cursor,
stringify_ids,
count)
def _GetFriendFollowerIDs(self,
url=None,
user_id=None,
screen_name=None,
cursor=None,
count=None,
stringify_ids=False,
total_count=None):
""" Common method for GetFriendIDs and GetFollowerIDs """
if cursor is not None or count is not None:
warnings.warn(
"Use of 'cursor' and 'count' parameters are deprecated as of "
"python-twitter 3.0. Please use GetFriendIDsPaged or "
"GetFollowerIDsPaged instead.",
DeprecationWarning, stacklevel=2)
count = 5000
cursor = -1
result = []
if total_count:
try:
total_count = int(total_count)
except ValueError:
raise TwitterError({'message': "total_count must be an integer"})
if total_count and total_count < count:
count = total_count
while True:
if total_count is not None and len(result) + count > total_count:
break
next_cursor, previous_cursor, data = self._GetIDsPaged(
url,
user_id,
screen_name,
cursor,
stringify_ids,
count)
result.extend([x for x in data])
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetFollowerIDs(self,
user_id=None,
screen_name=None,
cursor=None,
stringify_ids=False,
count=None,
total_count=None):
"""Returns a list of twitter user id's for every person
that is following the specified user.
Args:
user_id:
The id of the user to retrieve the id list for. [Optional]
screen_name:
The screen_name of the user to retrieve the id list for. [Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits. [Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
total_count:
The total amount of UIDs to retrieve. Good if the account has many
followers and you don't want to get rate limited. The data returned
might contain more UIDs if total_count is not a multiple of count
(5000 by default). [Optional]
Returns:
A list of integers, one for each user id.
"""
url = '%s/followers/ids.json' % self.base_url
return self._GetFriendFollowerIDs(url,
user_id,
screen_name,
cursor,
stringify_ids,
count,
total_count)
def GetFriendIDs(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
stringify_ids=False,
total_count=None):
""" Fetch a sequence of user ids, one for each friend.
Returns a list of all the given user's friends' IDs. If no user_id or
screen_name is given, the friends will be those of the authenticated
user.
Args:
user_id:
The id of the user to retrieve the id list for. [Optional]
screen_name:
The screen_name of the user to retrieve the id list for. [Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits. [Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of integers.
[Optional]
count:
The number of user id's to retrieve per API request. Please be aware that
this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
total_count:
The total amount of UIDs to retrieve. Good if the account has many followers
and you don't want to get rate limited. The data returned might contain more
UIDs if total_count is not a multiple of count (5000 by default). [Optional]
Returns:
A list of integers, one for each user id.
"""
url = '%s/friends/ids.json' % self.base_url
return self._GetFriendFollowerIDs(url,
user_id,
screen_name,
cursor,
count,
stringify_ids,
total_count)
def _GetFriendsFollowersPaged(self,
url=None,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of 1 page of friends
or followers.
Args:
url:
Endpoint from which to get data. Either
base_url+'/followers/list.json' or base_url+'/friends/list.json'.
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
if user_id and screen_name:
warnings.warn(
"If both user_id and screen_name are specified, Twitter will "
"return the followers of the user specified by screen_name, "
"however this behavior is undocumented by Twitter and might "
"change without warning.", stacklevel=2)
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
parameters['skip_status'] = skip_status
parameters['include_user_entities'] = include_user_entities
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if 'users' in data:
users = [User.NewFromJsonDict(user) for user in data['users']]
else:
users = []
if 'next_cursor' in data:
next_cursor = data['next_cursor']
else:
next_cursor = 0
if 'previous_cursor' in data:
previous_cursor = data['previous_cursor']
else:
previous_cursor = 0
return next_cursor, previous_cursor, users
def GetFollowersPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of all followers
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
url = '%s/followers/list.json' % self.base_url
return self._GetFriendsFollowersPaged(url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
def GetFriendsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of all friends.
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a current maximum of
200. Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
url = '%s/friends/list.json' % self.base_url
return self._GetFriendsFollowersPaged(url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
def _GetFriendsFollowers(self,
url=None,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
""" Fetch the sequence of twitter.User instances, one for each friend
or follower.
Args:
url:
URL to get. Either base_url + ('/followers/list.json' or
'/friends/list.json').
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend or follower
"""
if cursor is not None or count is not None:
warnings.warn(
"Use of 'cursor' and 'count' parameters are deprecated as of "
"python-twitter 3.0. Please use GetFriendsPaged instead.",
DeprecationWarning, stacklevel=2)
count = 200
cursor = -1
result = []
if total_count:
try:
total_count = int(total_count)
except ValueError:
raise TwitterError({'message': "total_count must be an integer"})
if total_count <= 200:
count = total_count
while True:
if total_count is not None and len(result) + count > total_count:
break
next_cursor, previous_cursor, data = self._GetFriendsFollowersPaged(
url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
if next_cursor:
cursor = next_cursor
result.extend(data)
if next_cursor == 0 or next_cursor == previous_cursor:
break
return result
def GetFollowers(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
"""Fetch the sequence of twitter.User instances, one for each follower.
If both user_id and screen_name are specified, this call will return
the followers of the user specified by screen_name, however this
behavior is undocumented by Twitter and may change without warning.
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items. [Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each follower
"""
url = '%s/followers/list.json' % self.base_url
return self._GetFriendsFollowers(url,
user_id,
screen_name,
cursor,
count,
total_count,
skip_status,
include_user_entities)
def GetFriends(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
"""Fetch the sequence of twitter.User instances, one for each friend.
If both user_id and screen_name are specified, this call will return
the followers of the user specified by screen_name, however this
behavior is undocumented by Twitter and may change without warning.
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend
"""
url = '%s/friends/list.json' % self.base_url
return self._GetFriendsFollowers(url,
user_id,
screen_name,
cursor,
count,
total_count,
skip_status,
include_user_entities)
def UsersLookup(self,
user_id=None,
screen_name=None,
users=None,
include_entities=True):
"""Fetch extended information for the specified users.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
Args:
user_id:
A list of user_ids to retrieve extended information. [Optional]
screen_name:
A list of screen_names to retrieve extended information. [Optional]
users:
A list of twitter.User objects to retrieve extended information.
[Optional]
include_entities:
The entities node that may appear within embedded statuses will be
disincluded when set to False. [Optional]
Returns:
A list of twitter.User objects for the requested users
"""
if not user_id and not screen_name and not users:
raise TwitterError({'message': "Specify at least one of user_id, screen_name, or users."})
url = '%s/users/lookup.json' % self.base_url
parameters = {}
uids = list()
if user_id:
uids.extend(user_id)
if users:
uids.extend([u.id for u in users])
if len(uids):
parameters['user_id'] = ','.join(["%s" % u for u in uids])
if screen_name:
parameters['screen_name'] = ','.join(screen_name)
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
try:
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
except TwitterError as e:
_, e, _ = sys.exc_info()
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
data = []
else:
raise
return [User.NewFromJsonDict(u) for u in data]
def GetUser(self,
user_id=None,
screen_name=None,
include_entities=True):
"""Returns a single user.
Args:
user_id:
The id of the user to retrieve. [Optional]
screen_name:
The screen name of the user for whom to return results for.
Either a user_id or screen_name is required for this method.
[Optional]
include_entities:
The entities node will be omitted when set to False.
[Optional]
Returns:
A twitter.User instance representing that user
"""
url = '%s/users/show.json' % (self.base_url)
parameters = {}
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
else:
raise TwitterError({'message': "Specify at least one of user_id or screen_name."})
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def GetDirectMessages(self,
since_id=None,
max_id=None,
count=None,
include_entities=True,
skip_status=False,
full_text=False,
page=None):
"""Returns a list of the direct messages sent to the authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
include_entities:
The entities node will be omitted when set to False.
[Optional]
skip_status:
When set to True statuses will not be included in the returned user
objects. [Optional]
full_text:
When set to True full message will be included in the returned message
object if message length is bigger than 140 characters. [Optional]
page:
If you want more than 200 messages, you can use this and get 20 messages
each time. You must recall it and increment the page value until it
return nothing. You can't use count option with it. First value is 1 and
not 0.
Returns:
A sequence of twitter.DirectMessage instances
"""
url = '%s/direct_messages.json' % self.base_url
parameters = {}
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if count:
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
if not include_entities:
parameters['include_entities'] = 'false'
if skip_status:
parameters['skip_status'] = 1
if full_text:
parameters['full_text'] = 'true'
if page:
parameters['page'] = page
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [DirectMessage.NewFromJsonDict(x) for x in data]
def GetSentDirectMessages(self,
since_id=None,
max_id=None,
count=None,
page=None,
include_entities=True):
"""Returns a list of the direct messages sent by the authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
include_entities:
The entities node will be omitted when set to False.
[Optional]
Returns:
A sequence of twitter.DirectMessage instances
"""
url = '%s/direct_messages/sent.json' % self.base_url
parameters = {}
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
if max_id:
parameters['max_id'] = max_id
if count:
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self,
text,
user_id=None,
screen_name=None):
"""Post a twitter direct message from the authenticated user.
Args:
text: The message text to be posted. Must be less than 140 characters.
user_id:
The ID of the user who should receive the direct message. [Optional]
screen_name:
The screen name of the user who should receive the direct message. [Optional]
Returns:
A twitter.DirectMessage instance representing the message posted
"""
url = '%s/direct_messages/new.json' % self.base_url
data = {'text': text}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError({'message': "Specify at least one of user_id or screen_name."})
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return DirectMessage.NewFromJsonDict(data)
def DestroyDirectMessage(self, id, include_entities=True):
"""Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
id: The id of the direct message to be destroyed
Returns:
A twitter.DirectMessage instance representing the message destroyed
"""
url = '%s/direct_messages/destroy.json' % self.base_url
data = {'id': id}
if not include_entities:
data['include_entities'] = 'false'
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user_id=None, screen_name=None, follow=True):
"""Befriends the user specified by the user_id or screen_name.
Args:
user_id:
A user_id to follow [Optional]
screen_name:
A screen_name to follow [Optional]
follow:
Set to False to disable notifications for the target user
Returns:
A twitter.User instance representing the befriended user.
"""
return self._AddOrEditFriendship(user_id=user_id, screen_name=screen_name, follow=follow)
def _AddOrEditFriendship(self, user_id=None, screen_name=None, uri_end='create', follow_key='follow', follow=True):
"""
Shared method for Create/Update Friendship.
"""
url = '%s/friendships/%s.json' % (self.base_url, uri_end)
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError({'message': "Specify at least one of user_id or screen_name."})
follow_json = json.dumps(follow)
data['{}'.format(follow_key)] = follow_json
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def UpdateFriendship(self, user_id=None, screen_name=None, follow=True, **kwargs): # api compat with Create
"""Updates a friendship with the user specified by the user_id or screen_name.
Args:
user_id:
A user_id to update [Optional]
screen_name:
A screen_name to update [Optional]
follow:
Set to False to disable notifications for the target user
device:
Set to False to disable notifications for the target user
Returns:
A twitter.User instance representing the befriended user.
"""
follow = kwargs.get('device', follow)
return self._AddOrEditFriendship(user_id=user_id, screen_name=screen_name, follow=follow, follow_key='device',
uri_end='update')
def DestroyFriendship(self, user_id=None, screen_name=None):
"""Discontinues friendship with a user_id or screen_name.
Args:
user_id:
A user_id to unfollow [Optional]
screen_name:
A screen_name to unfollow [Optional]
Returns:
A twitter.User instance representing the discontinued friend.
"""
url = '%s/friendships/destroy.json' % self.base_url
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError({'message': "Specify at least one of user_id or screen_name."})
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def LookupFriendship(self,
user_id=None,
screen_name=None):
"""Lookup friendship status for user to authed user.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
Up to 100 users may be specified.
Args:
user_id (int, User, or list of ints or Users, optional):
A list of user_ids to retrieve extended information.
screen_name (string, User, or list of strings or Users, optional):
A list of screen_names to retrieve extended information.
Returns:
list: A list of twitter.UserStatus instance representing the
friendship status between the specified users and the authenticated
user.
"""
url = '%s/friendships/lookup.json' % (self.base_url)
parameters = {}
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
uids = list()
for user in user_id:
if isinstance(user, User):
uids.append(user.id)
else:
uids.append(enf_type('user_id', int, user))
parameters['user_id'] = ",".join([str(uid) for uid in uids])
else:
if isinstance(user_id, User):
parameters['user_id'] = user_id.id
else:
parameters['user_id'] = enf_type('user_id', int, user_id)
if screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
sn_list = list()
for user in screen_name:
if isinstance(user, User):
sn_list.append(user.screen_name)
else:
sn_list.append(enf_type('screen_name', str, user))
parameters['screen_name'] = ','.join(sn_list)
else:
if isinstance(screen_name, User):
parameters['screen_name'] = screen_name.screen_name
else:
parameters['screen_name'] = enf_type('screen_name', str, screen_name)
if not user_id and not screen_name:
raise TwitterError(
"Specify at least one of user_id or screen_name.")
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [UserStatus.NewFromJsonDict(x) for x in data]
def CreateFavorite(self,
status=None,
id=None,
include_entities=True):
"""Favorites the specified status object or id as the authenticating user.
Returns the favorite status when successful.
Args:
id:
The id of the twitter status to mark as a favorite. [Optional]
status:
The twitter.Status object to mark as a favorite. [Optional]
include_entities:
The entities node will be omitted when set to False. [Optional]
Returns:
A twitter.Status instance representing the newly-marked favorite.
"""
url = '%s/favorites/create.json' % self.base_url
data = {}
if id:
data['id'] = id
elif status:
data['id'] = status.id
else:
raise TwitterError({'message': "Specify id or status"})
if not include_entities:
data['include_entities'] = 'false'
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def DestroyFavorite(self,
status=None,
id=None,
include_entities=True):
"""Un-Favorites the specified status object or id as the authenticating user.
Returns the un-favorited status when successful.
Args:
id:
The id of the twitter status to unmark as a favorite. [Optional]
status:
The twitter.Status object to unmark as a favorite. [Optional]
include_entities:
The entities node will be omitted when set to False. [Optional]
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
"""
url = '%s/favorites/destroy.json' % self.base_url
data = {}
if id:
data['id'] = id
elif status:
data['id'] = status.id
else:
raise TwitterError({'message': "Specify id or status"})
if not include_entities:
data['include_entities'] = 'false'
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetFavorites(self,
user_id=None,
screen_name=None,
count=None,
since_id=None,
max_id=None,
include_entities=True):
"""Return a list of Status objects representing favorited tweets.
Returns up to 200 most recent tweets for the authenticated user.
Args:
user_id:
Specifies the ID of the user for whom to return the
favorites. Helpful for disambiguating when a valid user ID
is also a valid screen name. [Optional]
screen_name:
Specifies the screen name of the user for whom to return the
favorites. Helpful for disambiguating when a valid screen
name is also a user ID. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [Optional]
include_entities:
The entities node will be omitted when set to False. [Optional]
Returns:
A sequence of Status instances, one for each favorited tweet up to count
"""
parameters = {}
url = '%s/favorites/list.json' % self.base_url
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
if since_id:
try:
parameters['since_id'] = int(since_id)
except ValueError:
raise TwitterError({'message': "since_id must be an integer"})
if max_id:
try:
parameters['max_id'] = int(max_id)
except ValueError:
raise TwitterError({'message': "max_id must be an integer"})
if count:
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
if include_entities:
parameters['include_entities'] = True
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
def GetMentions(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
contributor_details=False,
include_entities=True):
"""Returns the 20 most recent mentions (status containing @screen_name)
for the authenticating user.
Args:
count:
Specifies the number of tweets to try and retrieve, up to a maximum of
200. The value of count is best thought of as a limit to the number of
tweets to return because suspended or deleted content is removed after
the count has been applied. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than
(that is, older than) the specified ID. [Optional]
trim_user:
When set to True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object. [Optional]
contributor_details:
If set to True, this parameter enhances the contributors element of the
status response to include the screen_name of the contributor. By
default only the user_id of the contributor is included. [Optional]
include_entities:
The entities node will be disincluded when set to False. [Optional]
Returns:
A sequence of twitter.Status instances, one for each mention of the user.
"""
url = '%s/statuses/mentions_timeline.json' % self.base_url
parameters = {}
if count:
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
if since_id:
try:
parameters['since_id'] = int(since_id)
except ValueError:
raise TwitterError({'message': "since_id must be an integer"})
if max_id:
try:
parameters['max_id'] = int(max_id)
except ValueError:
raise TwitterError({'message': "max_id must be an integer"})
if trim_user:
parameters['trim_user'] = 1
if contributor_details:
parameters['contributor_details'] = 'true'
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
@staticmethod
def _IDList(list_id, slug, owner_id, owner_screen_name):
parameters = {}
if list_id is not None:
parameters['list_id'] = enf_type('list_id', int, list_id)
elif slug is not None:
parameters['slug'] = slug
if owner_id is not None:
parameters['owner_id'] = enf_type('owner_id', int, owner_id)
elif owner_screen_name is not None:
parameters['owner_screen_name'] = owner_screen_name
else:
raise TwitterError({'message': (
'If specifying a list by slug, an owner_id or '
'owner_screen_name must also be given.')})
else:
raise TwitterError({'message': (
'Either list_id or slug and one of owner_id and '
'owner_screen_name must be passed.')})
return parameters
def CreateList(self, name, mode=None, description=None):
"""Creates a new list with the give name for the authenticated user.
Args:
name (str):
New name for the list
mode (str, optional):
'public' or 'private'. Defaults to 'public'.
description (str, optional):
Description of the list.
Returns:
twitter.list.List: A twitter.List instance representing the new list
"""
url = '%s/lists/create.json' % self.base_url
parameters = {'name': name}
if mode is not None:
parameters['mode'] = mode
if description is not None:
parameters['description'] = description
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def DestroyList(self,
owner_screen_name=False,
owner_id=False,
list_id=None,
slug=None):
"""Destroys the list identified by list_id or slug and one of
owner_screen_name or owner_id.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.list.List: A twitter.List instance representing the
removed list.
"""
url = '%s/lists/destroy.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def CreateSubscription(self,
owner_screen_name=False,
owner_id=False,
list_id=None,
slug=None):
"""Creates a subscription to a list by the authenticated user.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.user.User: A twitter.User instance representing the user subscribed
"""
url = '%s/lists/subscribers/create.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def DestroySubscription(self,
owner_screen_name=False,
owner_id=False,
list_id=None,
slug=None):
"""Destroys the subscription to a list for the authenticated user.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify the
list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.list.List: A twitter.List instance representing
the removed list.
"""
url = '%s/lists/subscribers/destroy.json' % (self.base_url)
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def ShowSubscription(self,
owner_screen_name=False,
owner_id=False,
list_id=None,
slug=None,
user_id=None,
screen_name=None,
include_entities=False,
skip_status=False):
"""Check if the specified user is a subscriber of the specified list.
Returns the user if they are subscriber.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical ID of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical ID.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
user_id (int, optional):
The user_id or a list of user_id's to add to the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of screen_name's to add to the list.
If not given, then user_id is required.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
Returns:
twitter.user.User: A twitter.User instance representing the user
requested.
"""
url = '%s/lists/subscribers/show.json' % (self.base_url)
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if skip_status:
parameters['skip_status'] = True
if include_entities:
parameters['include_entities'] = True
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def GetSubscriptions(self,
user_id=None,
screen_name=None,
count=20,
cursor=-1):
"""Obtain a collection of the lists the specified user is
subscribed to. If neither user_id or screen_name is specified, the
data returned will be for the authenticated user.
The list will contain a maximum of 20 lists per page by default.
Does not include the user's own lists.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return results for.
count (int, optional):
The amount of results to return per page.
No more than 1000 results will ever be returned in a single
page. Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the
list sequence from. Use the value of -1 to start at the
beginning. Twitter will return in the result the values for
next_cursor and previous_cursor.
Returns:
twitter.list.List: A sequence of twitter.List instances,
one for each list
"""
url = '%s/lists/subscriptions.json' % (self.base_url)
parameters = {}
parameters['cursor'] = enf_type('cursor', int, cursor)
parameters['count'] = enf_type('count', int, count)
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetMemberships(self,
user_id=None,
screen_name=None,
count=20,
cursor=-1,
filter_to_owned_lists=False):
"""Obtain the lists the specified user is a member of. If no user_id or
screen_name is specified, the data returned will be for the
authenticated user.
Returns a maximum of 20 lists per page by default.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return
results for.
count (int, optional):
The amount of results to return per page.
No more than 1000 results will ever be returned in a single page.
Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor.
filter_to_owned_lists (bool, optional):
Set to True to return only the lists the authenticating user
owns, and the user specified by user_id or screen_name is a
member of. Default value is False.
Returns:
list: A list of twitter.List instances, one for each list in which
the user specified by user_id or screen_name is a member
"""
url = '%s/lists/memberships.json' % (self.base_url)
parameters = {}
if cursor is not None:
parameters['cursor'] = enf_type('cursor', int, cursor)
if count is not None:
parameters['count'] = enf_type('count', int, count)
if filter_to_owned_lists:
parameters['filter_to_owned_lists'] = enf_type(
'filter_to_owned_lists', bool, filter_to_owned_lists)
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetListsList(self,
screen_name=None,
user_id=None,
reverse=False):
"""Returns all lists the user subscribes to, including their own.
If no user_id or screen_name is specified, the data returned will be
for the authenticated user.
Args:
screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
user_id (int, optional):
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
reverse (bool, optional):
If False, the owned lists will be returned first, othewise
subscribed lists will be at the top. Returns a maximum of 100
entries regardless. Defaults to False.
Returns:
list: A sequence of twitter.List instances.
"""
url = '%s/lists/list.json' % (self.base_url)
parameters = {}
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if reverse:
parameters['reverse'] = enf_type('reverse', bool, reverse)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [List.NewFromJsonDict(x) for x in data]
def GetListTimeline(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
since_id=None,
max_id=None,
count=None,
include_rts=True,
include_entities=True):
"""Fetch the sequence of Status messages for a given List ID.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
since_id (int, optional):
Returns results with an ID greater than (that is, more recent than)
the specified ID. There are limits to the number of Tweets which
can be accessed through the API.
If the limit of Tweets has occurred since the since_id, the
since_id will be forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older than) or
equal to the specified ID.
count (int, optional):
Specifies the number of statuses to retrieve.
May not be greater than 200.
include_rts (bool, optional):
If True, the timeline will contain native retweets (if they exist)
in addition to the standard stream of tweets.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
Returns:
list: A list of twitter.status.Status instances, one for each
message up to count.
"""
url = '%s/lists/statuses.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if count:
parameters['count'] = enf_type('count', int, count)
if not include_rts:
parameters['include_rts'] = enf_type('include_rts', bool, include_rts)
if not include_entities:
parameters['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
def GetListMembersPaged(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
cursor=-1,
count=100,
skip_status=False,
include_entities=True):
"""Fetch the sequence of twitter.User instances, one for each member
of the given list_id or slug.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
cursor (int, optional):
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
Returns:
list: A sequence of twitter.user.User instances, one for each
member of the twitter.list.List.
"""
url = '%s/lists/members.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if count:
parameters['count'] = enf_type('count', int, count)
if cursor:
parameters['cursor'] = enf_type('cursor', int, cursor)
parameters['skip_status'] = enf_type('skip_status', bool, skip_status)
parameters['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
users = [User.NewFromJsonDict(user) for user in data.get('users', [])]
return next_cursor, previous_cursor, users
def GetListMembers(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
skip_status=False,
include_entities=False):
"""Fetch the sequence of twitter.User instances, one for each member
of the given list_id or slug.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
Returns:
list: A sequence of twitter.user.User instances, one for each
member of the twitter.list.List.
"""
cursor = -1
result = []
while True:
next_cursor, previous_cursor, users = self.GetListMembersPaged(
list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name,
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
result += users
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def CreateListsMember(self,
list_id=None,
slug=None,
user_id=None,
screen_name=None,
owner_screen_name=None,
owner_id=None):
"""Add a new member (or list of members) to the specified list.
Args:
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify the
list owner using the owner_id or owner_screen_name parameters.
user_id (int, optional):
The user_id or a list of user_id's to add to the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of screen_name's to add to the list.
If not given, then user_id is required.
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested by
a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested by
a slug.
Returns:
twitter.list.List: A twitter.List instance representing the list
subscribed to.
"""
is_list = False
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
is_list = True
uids = [str(enf_type('user_id', int, uid)) for uid in user_id]
parameters['user_id'] = ','.join(uids)
else:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
is_list = True
parameters['screen_name'] = ','.join(screen_name)
else:
parameters['screen_name'] = screen_name
if is_list:
url = '%s/lists/members/create_all.json' % self.base_url
else:
url = '%s/lists/members/create.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def DestroyListsMember(self,
list_id=None,
slug=None,
owner_screen_name=False,
owner_id=False,
user_id=None,
screen_name=None):
"""Destroys the subscription to a list for the authenticated user.
Args:
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested by a
slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested by a slug.
user_id (int, optional):
The user_id or a list of user_id's to add to the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of Screen_name's to add to the list.
If not given, then user_id is required.
Returns:
twitter.list.List: A twitter.List instance representing the
removed list.
"""
is_list = False
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
is_list = True
uids = [str(enf_type('user_id', int, uid)) for uid in user_id]
parameters['user_id'] = ','.join(uids)
else:
parameters['user_id'] = int(user_id)
elif screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
is_list = True
parameters['screen_name'] = ','.join(screen_name)
else:
parameters['screen_name'] = screen_name
if is_list:
url = '%s/lists/members/destroy_all.json' % self.base_url
else:
url = '%s/lists/members/destroy.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def GetListsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=20):
""" Fetch the sequence of lists for a user. If no user_id or
screen_name is passed, the data returned will be for the
authenticated user.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return results
for.
count (int, optional):
The amount of results to return per page. No more than 1000 results
will ever be returned in a single page. Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor.
Returns:
next_cursor (int), previous_cursor (int), list of twitter.List
instances, one for each list
"""
url = '%s/lists/ownerships.json' % self.base_url
parameters = {}
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
if count is not None:
parameters['count'] = enf_type('count', int, count)
parameters['cursor'] = enf_type('cursor', int, cursor)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
lists = [List.NewFromJsonDict(x) for x in data.get('lists', [])]
return next_cursor, previous_cursor, lists
def GetLists(self,
user_id=None,
screen_name=None):
"""Fetch the sequence of lists for a user. If no user_id or screen_name
is passed, the data returned will be for the authenticated user.
Args:
user_id:
The ID of the user for whom to return results for. [Optional]
screen_name:
The screen name of the user for whom to return results
for. [Optional]
count:
The amount of results to return per page.
No more than 1000 results will ever be returned in a single page.
Defaults to 20. [Optional]
cursor:
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
"""
result = []
cursor = -1
while True:
next_cursor, prev_cursor, lists = self.GetListsPaged(
user_id=user_id,
screen_name=screen_name,
cursor=cursor)
result += lists
if next_cursor == 0 or next_cursor == prev_cursor:
break
else:
cursor = next_cursor
return result
def UpdateProfile(self,
name=None,
profileURL=None,
location=None,
description=None,
profile_link_color=None,
include_entities=False,
skip_status=False):
"""Update's the authenticated user's profile data.
Args:
name:
Full name associated with the profile.
Maximum of 20 characters. [Optional]
profileURL:
URL associated with the profile.
Will be prepended with "http://" if not present.
Maximum of 100 characters. [Optional]
location:
The city or country describing where the user of the account is located.
The contents are not normalized or geocoded in any way.
Maximum of 30 characters. [Optional]
description:
A description of the user owning the account.
Maximum of 160 characters. [Optional]
profile_link_color:
hex value of profile color theme. formated without '#' or '0x'. Ex: FF00FF
[Optional]
include_entities:
The entities node will be omitted when set to False.
[Optional]
skip_status:
When set to either True, t or 1 then statuses will not be included
in the returned user objects. [Optional]
Returns:
A twitter.User instance representing the modified user.
"""
url = '%s/account/update_profile.json' % (self.base_url)
data = {}
if name:
data['name'] = name
if profileURL:
data['url'] = profileURL
if location:
data['location'] = location
if description:
data['description'] = description
if profile_link_color:
data['profile_link_color'] = profile_link_color
if include_entities:
data['include_entities'] = include_entities
if skip_status:
data['skip_status'] = skip_status
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def UpdateBackgroundImage(self,
image,
tile=False,
include_entities=False,
skip_status=False):
url = '%s/account/update_profile_background_image.json' % (self.base_url)
with open(image, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {
'image': encoded_image
}
if tile:
data['tile'] = 1
if include_entities:
data['include_entities'] = 1
if skip_status:
data['skip_status'] = 1
resp = self._RequestUrl(url, 'POST', data=data)
if resp.status_code in [200, 201, 202]:
return True
if resp.status_code == 400:
raise TwitterError({'message': "Image data could not be processed"})
if resp.status_code == 422:
raise TwitterError({'message': "The image could not be resized or is too large."})
def UpdateImage(self,
image,
include_entities=False,
skip_status=False):
url = '%s/account/update_profile_image.json' % (self.base_url)
with open(image, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {
'image': encoded_image
}
if include_entities:
data['include_entities'] = 1
if skip_status:
data['skip_status'] = 1
resp = self._RequestUrl(url, 'POST', data=data)
if resp.status_code in [200, 201, 202]:
return True
if resp.status_code == 400:
raise TwitterError({'message': "Image data could not be processed"})
if resp.status_code == 422:
raise TwitterError({'message': "The image could not be resized or is too large."})
def UpdateBanner(self,
image,
include_entities=False,
skip_status=False):
"""Updates the authenticated users profile banner.
Args:
image:
Location of image in file system
include_entities:
If True, each tweet will include a node called "entities."
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and hashtags.
[Optional]
Returns:
A twitter.List instance representing the list subscribed to
"""
url = '%s/account/update_profile_banner.json' % (self.base_url)
with open(image, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {
# When updated for API v1.1 use image, not banner
# https://dev.twitter.com/docs/api/1.1/post/account/update_profile_banner
# 'image': encoded_image
'banner': encoded_image
}
if include_entities:
data['include_entities'] = 1
if skip_status:
data['skip_status'] = 1
resp = self._RequestUrl(url, 'POST', data=data)
if resp.status_code in [200, 201, 202]:
return True
if resp.status_code == 400:
raise TwitterError({'message': "Image data could not be processed"})
if resp.status_code == 422:
raise TwitterError({'message': "The image could not be resized or is too large."})
raise TwitterError({'message': "Unkown banner image upload issue"})
def GetStreamSample(self, delimited=None, stall_warnings=None):
"""Returns a small sample of public statuses.
Args:
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
Returns:
A Twitter stream
"""
url = '%s/statuses/sample.json' % self.stream_url
resp = self._RequestStream(url, 'GET')
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
def GetStreamFilter(self,
follow=None,
track=None,
locations=None,
delimited=None,
stall_warnings=None):
"""Returns a filtered view of public statuses.
Args:
follow:
A list of user IDs to track. [Optional]
track:
A list of expressions to track. [Optional]
locations:
A list of Latitude,Longitude pairs (as strings) specifying
bounding boxes for the tweets' origin. [Optional]
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
Returns:
A twitter stream
"""
if all((follow is None, track is None, locations is None)):
raise ValueError({'message': "No filter parameters specified."})
url = '%s/statuses/filter.json' % self.stream_url
data = {}
if follow is not None:
data['follow'] = ','.join(follow)
if track is not None:
data['track'] = ','.join(track)
if locations is not None:
data['locations'] = ','.join(locations)
if delimited is not None:
data['delimited'] = str(delimited)
if stall_warnings is not None:
data['stall_warnings'] = str(stall_warnings)
resp = self._RequestStream(url, 'POST', data=data)
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
def GetUserStream(self,
replies='all',
withuser='user',
track=None,
locations=None,
delimited=None,
stall_warnings=None,
stringify_friend_ids=False):
"""Returns the data from the user stream.
Args:
replies:
Specifies whether to return additional @replies in the stream.
Defaults to 'all'.
withuser:
Specifies whether to return information for just the authenticating
user, or include messages from accounts the user follows. [Optional]
track:
A list of expressions to track. [Optional]
locations:
A list of Latitude,Longitude pairs (as strings) specifying
bounding boxes for the tweets' origin. [Optional]
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
stringify_friend_ids:
Specifies whether to send the friends list preamble as an array of
integers or an array of strings. [Optional]
Returns:
A twitter stream
"""
url = 'https://userstream.twitter.com/1.1/user.json'
data = {}
if stringify_friend_ids:
data['stringify_friend_ids'] = 'true'
if replies is not None:
data['replies'] = replies
if withuser is not None:
data['with'] = withuser
if track is not None:
data['track'] = ','.join(track)
if locations is not None:
data['locations'] = ','.join(locations)
if delimited is not None:
data['delimited'] = str(delimited)
if stall_warnings is not None:
data['stall_warnings'] = str(stall_warnings)
resp = self._RequestStream(url, 'POST', data=data)
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
def VerifyCredentials(self):
"""Returns a twitter.User instance if the authenticating user is valid.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
"""
url = '%s/account/verify_credentials.json' % self.base_url
resp = self._RequestUrl(url, 'GET') # No_cache
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def SetCache(self, cache):
"""Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the twitter._FileCache
"""
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
"""Override the default urllib implementation.
Args:
urllib:
An instance that supports the same API as the urllib2 module
"""
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
"""Override the default cache timeout.
Args:
cache_timeout:
Time, in seconds, that responses should be reused.
"""
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
"""Override the default user agent.
Args:
user_agent:
A string that should be send to the server as the user-agent.
"""
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
"""Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
"""
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
"""Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server.
New source values are authorized on a case by case basis by the
Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
"""
self._default_params['source'] = source
def InitializeRateLimit(self):
""" Make a call to the Twitter API to get the rate limit
status for the currently authenticated user or application.
Returns:
None.
"""
_sleep = self.sleep_on_rate_limit
if self.sleep_on_rate_limit:
self.sleep_on_rate_limit = False
url = '%s/application/rate_limit_status.json' % self.base_url
resp = self._RequestUrl(url, 'GET') # No-Cache
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
self.sleep_on_rate_limit = _sleep
self.rate_limit = RateLimit(**data)
def CheckRateLimit(self, url):
""" Checks a URL to see the rate limit status for that endpoint.
Args:
url (str):
URL to check against the current rate limits.
Returns:
namedtuple: EndpointRateLimit namedtuple.
"""
if not self.rate_limit:
self.InitializeRateLimit()
if url:
limit = self.rate_limit.get_limit(url)
return limit
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into constituent parts
(scheme, netloc, path, params, query, fragment) = urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(urllib_version, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
def _DecompressGzippedResponse(self, response):
raw_data = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
url_data = gzip.GzipFile(fileobj=io.StringIO(raw_data)).read()
else:
url_data = raw_data
return url_data
def _Encode(self, s):
if self._input_encoding:
return str(s, self._input_encoding).encode('utf-8')
else:
return str(s).encode('utf-8')
def _EncodeParameters(self, parameters):
"""Return a string in key=value&key=value form.
Values of None are not included in the output string.
Args:
parameters:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
"""
if parameters is None:
return None
else:
return urlencode(dict([(k, self._Encode(v)) for k, v in list(parameters.items()) if v is not None]))
def _EncodePostData(self, post_data):
"""Return a string in key=value&key=value form.
Values are assumed to be encoded in the format specified by self._encoding,
and are subsequently URL encoded.
Args:
post_data:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
"""
if post_data is None:
return None
else:
return urlencode(dict([(k, self._Encode(v)) for k, v in list(post_data.items())]))
def _ParseAndCheckTwitter(self, json_data):
"""Try and parse the JSON returned from Twitter and return
an empty dictionary if there is any error.
This is a purely defensive check because during some Twitter
network outages it will return an HTML failwhale page.
"""
try:
data = json.loads(json_data)
self._CheckForTwitterError(data)
except ValueError:
if "<title>Twitter / Over capacity</title>" in json_data:
raise TwitterError({'message': "Capacity Error"})
if "<title>Twitter / Error</title>" in json_data:
raise TwitterError({'message': "Technical Error"})
if "Exceeded connection limit for user" in json_data:
raise TwitterError({'message': "Exceeded connection limit for user"})
raise TwitterError({'message': "json decoding"})
return data
def _CheckForTwitterError(self, data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data:
A python dict created from the Twitter json response
Raises:
TwitterError wrapping the twitter error message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
if 'errors' in data:
raise TwitterError(data['errors'])
def _RequestChunkedUpload(self, url, headers, data):
try:
return requests.post(
url,
headers=headers,
data=data,
auth=self.__auth,
timeout=self._timeout
)
except requests.RequestException as e:
raise TwitterError(str(e))
def _RequestUrl(self, url, verb, data=None):
"""Request a url.
Args:
url:
The web location we want to retrieve.
verb:
Either POST or GET.
data:
A dict of (str, unicode) key/value pairs.
Returns:
A JSON object.
"""
if not self.__auth:
raise TwitterError(
"The twitter.Api instance must be authenticated.")
if url and self.sleep_on_rate_limit:
limit = self.CheckRateLimit(url)
if limit.remaining == 0:
try:
time.sleep(int(limit.reset - time.time()))
except ValueError:
pass
if verb == 'POST':
if 'media_ids' in data:
url = self._BuildUrl(url, extra_params={'media_ids': data['media_ids']})
if 'media' in data:
try:
resp = requests.post(url,
files=data,
auth=self.__auth,
timeout=self._timeout)
except requests.RequestException as e:
raise TwitterError(str(e))
else:
try:
resp = requests.post(url,
data=data,
auth=self.__auth,
timeout=self._timeout)
except requests.RequestException as e:
raise TwitterError(str(e))
elif verb == 'GET':
url = self._BuildUrl(url, extra_params=data)
try:
resp = requests.get(url,
auth=self.__auth,
timeout=self._timeout)
except requests.RequestException as e:
raise TwitterError(str(e))
else:
resp = 0 # if not a POST or GET request
if url and self.sleep_on_rate_limit and self.rate_limit:
limit = resp.headers.get('x-rate-limit-limit', 0)
remaining = resp.headers.get('x-rate-limit-remaining', 0)
reset = resp.headers.get('x-rate-limit-reset', 0)
self.rate_limit.set_limit(url, limit, remaining, reset)
return resp
def _RequestStream(self, url, verb, data=None):
"""Request a stream of data.
Args:
url:
The web location we want to retrieve.
verb:
Either POST or GET.
data:
A dict of (str, unicode) key/value pairs.
Returns:
A twitter stream.
"""
if verb == 'POST':
try:
return requests.post(url, data=data, stream=True,
auth=self.__auth,
timeout=self._timeout)
except requests.RequestException as e:
raise TwitterError(str(e))
if verb == 'GET':
url = self._BuildUrl(url, extra_params=data)
try:
return requests.get(url, stream=True, auth=self.__auth,
timeout=self._timeout)
except requests.RequestException as e:
raise TwitterError(str(e))
return 0 # if not a POST or GET request
| apache-2.0 |
minghuascode/pyj | examples/gmaps/ControlSimple.py | 11 | 1546 | # Copyright (C) 2009 Daniel Carvalho <idnael@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas.ui.RootPanel import RootPanel, RootPanelCls
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas import DOM
from pyjamas.Timer import Timer
from pyjamas.gmaps.Map import Map, MapTypeId, MapOptions
from pyjamas.gmaps.Base import LatLng
class ControlSimple(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
self.setSize('100%', '100%')
#options = MapOptions()
#options.zoom = 4
#options.center = LatLng(-33, 151)
#options.mapTypeId = MapTypeId.ROADMAP
#options.navigationControl = False
#options.scaleControl = True
options = MapOptions(zoom=4, center=LatLng(-33, 151),
mapTypeId=MapTypeId.ROADMAP,
navigationControl=False, scaleControl=True)
self.map = Map(self.getElement(), options)
if __name__ == '__main__':
root = RootPanel()
root.add(ControlSimple())
| apache-2.0 |
marc-sensenich/ansible | lib/ansible/modules/windows/win_wakeonlan.py | 52 | 1767 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_wakeonlan
version_added: '2.4'
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The C(win_wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
- For non-Windows targets, use the M(wakeonlan) module instead.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for.
type: str
required: yes
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
type: str
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet.
type: int
default: 7
todo:
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked. It always report a change.
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS).
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
seealso:
- module: wakeonlan
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
win_wakeonlan:
mac: 00:00:5E:00:53:66
broadcast: 192.0.2.23
- name: Send a magic Wake-On-LAN packet on port 9 to 00-00-5E-00-53-66
win_wakeonlan:
mac: 00-00-5E-00-53-66
port: 9
delegate_to: remote_system
'''
RETURN = r'''
# Default return values
'''
| gpl-3.0 |
kdkeyser/readthedocs.org | readthedocs/rtd_tests/tests/test_sync_versions.py | 34 | 11004 | import json
from django.test import TestCase
from readthedocs.builds.models import Version
from readthedocs.builds.constants import STABLE
from readthedocs.projects.models import Project
class TestSyncVersions(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
Version.objects.create(project=self.pip, identifier='origin/master',
verbose_name='master', active=True,
machine=True)
Version.objects.create(project=self.pip, identifier='to_delete',
verbose_name='to_delete', active=False)
def test_proper_url_no_slash(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
]}
r = self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
json_data = json.loads(r.content)
self.assertEqual(json_data['deleted_versions'], ['to_delete'])
self.assertEqual(json_data['added_versions'], ['to_add'])
def test_new_tag_update_active(self):
Version.objects.create(project=self.pip, identifier='0.8.3',
verbose_name='0.8.3', active=True)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_9 = Version.objects.get(slug='0.9')
self.assertTrue(version_9.active)
def test_new_tag_update_inactive(self):
Version.objects.create(project=self.pip, identifier='0.8.3',
verbose_name='0.8.3', active=False)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_9 = Version.objects.get(slug='0.9')
self.assertTrue(version_9.active is False)
class TestStableVersion(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
def test_stable_versions(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8',
'verbose_name': '0.8',
},
]
}
self.assertRaises(
Version.DoesNotExist,
Version.objects.get,
slug=STABLE
)
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_pre_release_are_not_stable(self):
version_post_data = {
'branches': [],
'tags': [
{'identifier': '1.0a1', 'verbose_name': '1.0a1'},
{'identifier': '0.9', 'verbose_name': '0.9'},
{'identifier': '0.9b1', 'verbose_name': '0.9b1'},
{'identifier': '0.8', 'verbose_name': '0.8'},
{'identifier': '0.8rc2', 'verbose_name': '0.8rc2'},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_post_releases_are_stable(self):
version_post_data = {
'branches': [],
'tags': [
{'identifier': '1.0', 'verbose_name': '1.0'},
{'identifier': '1.0.post1', 'verbose_name': '1.0.post1'},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.post1')
def test_invalid_version_numbers_are_not_stable(self):
self.pip.versions.all().delete()
version_post_data = {
'branches': [],
'tags': [
{'identifier': 'this.is.invalid', 'verbose_name': 'this.is.invalid'},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertFalse(Version.objects.filter(slug=STABLE).exists())
version_post_data = {
'branches': [],
'tags': [
{'identifier': '1.0', 'verbose_name': '1.0'},
{'identifier': 'this.is.invalid', 'verbose_name': 'this.is.invalid'},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0')
def test_update_stable_version(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8',
'verbose_name': '0.8',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
version_post_data = {
'tags': [
{
'identifier': '1.0.0',
'verbose_name': '1.0.0',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
version_post_data = {
'tags': [
{
'identifier': '0.7',
'verbose_name': '0.7',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
def test_update_inactive_stable_version(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertEqual(version_stable.identifier, '0.9')
version_stable.active = False
version_stable.save()
version_post_data['tags'].append({
'identifier': '1.0.0',
'verbose_name': '1.0.0',
})
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertFalse(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
| mit |
snyaggarwal/oclapi | ocl/oclapi/management/commands/create_tokens.py | 2 | 3407 | from optparse import make_option
import os.path
import requests
import json
from django.contrib.auth.models import User
from django.core.management import BaseCommand, CommandError
from rest_framework.authtoken.models import Token
from users.models import UserProfile
from orgs.models import Organization
class Command(BaseCommand):
help = 'Create initial user data for new installation, output the tokens required for the web.'
option_list = BaseCommand.option_list + (
make_option('--password',
action='store',
dest='pwd',
default=None,
help='Password for root user.'),
make_option('--test',
action='store_true',
dest='test_mode',
default=False,
help='Test mode. Do not update database.'),
make_option('--create',
action='store_true',
dest='create_mode',
default=False,
help='Create root user.'),
make_option('--token',
action='store',
dest='token',
default=None,
help='Set root user token.')
)
def print_users(self):
print 'Django users...'
for n,u in enumerate(User.objects.all(), start=1):
print 'Django User %d -----' % n
print 'user id:', u.id
print 'user name:', u.username
print 'is staff:', u.is_staff
print 'is superuser:', u.is_superuser
print 'API users...'
for n, u in enumerate(UserProfile.objects.all(), start=1):
print 'API User %d -----' % n
print 'user id:', u.id
print 'mnemonic:', u.mnemonic
print 'name:', u.name
def print_tokens(self):
""" Just print out the tokens, in a form that easily put
in a shell script.
"""
for t in Token.objects.all():
res = User.objects.filter(id=t.user_id)
if len(res) == 1:
un = res[0].username
else:
un = 'n/a'
if un == 'admin':
print "export OCL_API_TOKEN='%s'" % t.key
if un == 'anonymous':
print "export OCL_ANON_API_TOKEN='%s'" % t.key
def handle(self, *args, **options):
create_mode = options['create_mode']
pwd = options['pwd']
token = options['token']
if create_mode:
if pwd is None:
raise CommandError('--password is required.')
superusers = User.objects.filter(username='root')
if not superusers:
UserProfile.objects.create(
user=User.objects.create_superuser('root', 'root@openconceptlab.org', password=pwd),
organizations=map(lambda o: o.id, Organization.objects.filter(created_by='root')),
mnemonic='root')
superusers = User.objects.filter(username='root')
superuser = superusers[0]
superuser.set_password(pwd)
superuser.save()
if token:
Token.objects.filter(user=superuser).delete()
Token.objects.create(user=superuser, key=token)
else:
self.print_tokens()
| mpl-2.0 |
pmolchanov/sd-fbn-search | sd-fbn-search.py | 1 | 13552 | #!/usr/bin/env python
import sys, lxml.html, httplib, urllib, csv
if len(sys.argv) < 2:
print "sd-fbn-search - Lookup San Diego County ficticious business name info"
print "usage: sd-fbn-search <search-by> <fbn>\n"
print "search-by options:"
print " 0 - Business Name"
print " 1 - Owner Name"
print " 2 - File Number\n"
print 'example: sd-fbn-search 0 "Promiscuous Fork"'
sys.exit(1)
# 0 - Business Name
# 1 - Owner Name
# 2 - File Number
query_type = urllib.quote(sys.argv[1])
# e.g. "Promiscuous Fork"
query = urllib.quote(sys.argv[2])
# Pulled from doing test queries against https://arcc.sdcounty.ca.gov/Pages/Fictitious.aspx
# with a query value of FOOBAR
body_templates = [ \
'ctl00%24ScriptManager=ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24FBNUpdatePanel%7Cctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24btnSearch&MSOWebPartPage_PostbackSource=&MSOTlPn_SelectedWpId=&MSOTlPn_View=0&MSOTlPn_ShowSettings=False&MSOGallery_SelectedLibrary=&MSOGallery_FilterString=&MSOTlPn_Button=none&__REQUESTDIGEST=0x50617A744C327DF47B3E69747912DE372526AAE6563CE27A8463791024111A941AC7C6D8F94D7EA030E64B10D9222C7AEB5FDDE6B2CC03AB430F4C6B940CE375%2C14%20Oct%202015%2021%3A19%3A01%20-0000&MSOSPWebPartManager_DisplayModeName=Browse&MSOSPWebPartManager_ExitingDesignMode=false&MSOWebPartPage_Shared=&MSOLayout_LayoutChanges=&MSOLayout_InDesignMode=&MSOSPWebPartManager_OldDisplayModeName=Browse&MSOSPWebPartManager_StartWebPartEditingName=false&MSOSPWebPartManager_EndWebPartEditing=false&_maintainWorkspaceScrollPosition=0&InputKeywords=Search%20this%20site...&ctl00%24PlaceHolderSearchArea%24ctl01%24ctl03=0&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24ddlSearchBy=0&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24txtBusName=FOOBAR&__spText1=&__spText2=&_wpcmWpid=&wpcmVal=&__EVENTTARGET=&__EVENTARGUMENT=&__LASTFOCUS=&__VIEWSTATE=rnzu14haEj%2Ff9pIO9N2UOqBspqvKR%2BioWhSBzihv8v6txnX9B0dgPFL%2FJHYC8ooFEWjDiXw43vXEFnPOFtmd1yCDcGdTxEs%2BOslMYZCQDYXkvusTGNewjnPCXg7lS%2BMp9jSTNTYcDBVvfnFh16DfeWXotU0rZESrTrGrtpWWlrM0NbqPsWO1AmlL43SfI2wUybJB5cQ9rKkCpz37AQMdCuAa4kkpQQ7RPl29%2Fs3Fqkk3Fbr5jfVmkGM%2F6XxXVgHXwB1wdPNDP7q9HU9cJkraV3am%2BymkUDgIIFKWNBoVBkVIxvA6pxkrPWfhfC3anBDlXTA8Tcp7H9LJZsnu4xt3Se007iUrFgXsCAIvoFMe%2FHlrKp6bK80mKv60bD3ZluvTxf%2Fk%2B8H%2BCYHG95G8w1fx9O2pmkEW6lIFca4NFgfVx9Bj2if4FewPzcdRH%2FXT1B4S9r2Rs5C3AMXUFhmySdQKLeoHhQ8UrzLWLMRmgKGNLcvrrq6I8ji0Ezrc7TSP1MQZnO1esUfKVx94SLQvz%2BxnznLrDAS4idegtTBc19c2oOFHHvVGzV9CCAZ2GI%2BZMxWGehJW80XMsZvVFIUb5thAZ60RH9ZnCyaDBJqLRxAzltiybTfIEK2Cc9h0tvlDU1uyxRvzkhiI0MMImA6NGwtJtLSFly%2Bbf%2BQKqEar0AH%2FsGloq0EWGFb0vBfYncWlGKuP1YVE8SgqD4q0iWmRIkk8gq8Dre3%2F7SMDsb5sGsTyOIClspZmI%2BCx6MCHlg8%2B4IuzQbwBPhtGRwbxCrLSL2Gdbu%2B0Ti9C0XTND5pR89dwM3XDAwsvOF%2F04JbHYnglVVFALGxUfzCjTEGAcyh1Cjk92qHWyoRhWU3q1LujtWZW43FCHo0scg8qU98p9IIOF4yNW%2FWaFHXxTuX7JYFALUBbxq4DSiljBOpu7YLMkpSQdt86qyzrfM%2FypGtMa2IVLWd9x3nTrwN4n8fKZHM6A6NzR3juFnKDV%2FoL44uyYgqfp9QRk3xUD1WrBy%2FnbSWErwKTFzpjSpInDIU7nFkEmuct0UUf%2BFdjZyrqSk1EwVE8MiIy%2BKnrNAdRV66IfDJ0jnt3oYVdecDmaoE%2FYZ5DQqosE%2BhK60fBNK77ODJ67kiNBkrCBI4U8f9HFr%2FuI1QO7s%2BsiPlH758SJN%2FZ1qN3kMG0PnNXIrNEB770FnLB7m9Nc5s5qCtoUlgrieDZBIZrhTly%2B%2BKB8pyTe96f%2FSKikP9YBMPAB1UC2WQ7FNqZ5hurHQ2rPZiTqZEyIxgPGS1BU0yOR1J8VIFnXwQVl%2BrBOwjx9ziIJnFSaVD69QPmUyTz2TT8NA4CZzOpatN7GZXPPqR7hd2Ge1dXz0hYuPVJNEAF%2BjBrTz8%2F5QiS4%2Frv1BDJKhk5vyywbcrKB3VC6D4BrlIVVodOz%2Bu3%2FI%2BFpbg%2FWyXbYlVjRUFy8pqDIws7VfwBsKcB2yL%2F4XcMIN1CXxqpV%2BtmgX8T%2FxhHXIff%2FZc0aWzwu83hVWYcfpONjkF2CgremZmC4ozuesb3%2BQ8vlGJJThEkcL0Bu9dECpSNu0Mc8%2FkpJx%2FpGoblFfpGVFU5I1Fc7peAebKoa6O%2BYzDGjZ5ylh2Qsg9x788QAFo2IQ0RjN98F6bCnUHbYNvA2CQR8%2FxQ01A8hMizXQ9XWidJZBN%2Fvibxn5%2FHQx9EKzsTWaLd6OR8wOPbMyIyPQp0mcYj6RsIGXtzE6z9zdu4%2B7s91%2FtmQF2sxp2c55xBIAbbystrUMicpHwf7gHGWiU0FGX6GYTy34Fo5WZDKkLrNMfYzwNKyjSzm0I1wOtXsfulFS1IIUzmud4cjWR5tq10%2BBs67axmVbLjfDY%2BX%2F6ViXuauIOdpseS9ijb6N%2FOFOhBxpBV9b4GutVHqhyX62QH8eEQV%2FAQiGzFwmUycO5GDXpcRsKX7PNlPz%2BYY%2B9W1L4PmTavTAN4zzQS4VZzWilx89qZnYmeTpjWXkjOmNd8FNcnTN%2BObqWQ0a3ORbPr%2FqSFs%2BU9HeUNkm6bfl9oJgm05ZRPgEjeN1caL2xC31MqGAB9qlijYPdHpojvOZ1wEZR49ezpFVkq6htcToScQQs6MnnS4t%2FvWrfFvbBSGlEAKVJtyPV3Rb4kH1rAfg%3D%3D&__VIEWSTATEENCRYPTED=&__EVENTVALIDATION=cmLtJ5xZSfUn3EQwRRM0a%2B2n4QDbrcnfvhnfUg8zeW7qXkKfVoh2smxAKoLqYsOxJeDXEZEnHS01RllGZtDYNg4uCUhocvADzNHlvPzr2ZVQdmCvzOHJLGeOeB5Twcexy4%2F1VMHHhTaoKoomvSwZ%2BTw0BLU%3D&_wpSelected=&_wzSelected=&__ASYNCPOST=true&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24btnSearch=Search', \
'ctl00%24ScriptManager=ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24FBNUpdatePanel%7Cctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24btnSearch&MSOWebPartPage_PostbackSource=&MSOTlPn_SelectedWpId=&MSOTlPn_View=0&MSOTlPn_ShowSettings=False&MSOGallery_SelectedLibrary=&MSOGallery_FilterString=&MSOTlPn_Button=none&__REQUESTDIGEST=0xA164F11A39004D48F2FCE507B2C22E4492B77449BC1E30B5484F5AD9EB6018B4FEBE7DB6744667A1F3059B1CB506BD8D25DFC0FAA623D8176B4AB7D77B3CA410%2C14%20Oct%202015%2021%3A17%3A48%20-0000&MSOSPWebPartManager_DisplayModeName=Browse&MSOSPWebPartManager_ExitingDesignMode=false&MSOWebPartPage_Shared=&MSOLayout_LayoutChanges=&MSOLayout_InDesignMode=&MSOSPWebPartManager_OldDisplayModeName=Browse&MSOSPWebPartManager_StartWebPartEditingName=false&MSOSPWebPartManager_EndWebPartEditing=false&_maintainWorkspaceScrollPosition=0&InputKeywords=Search%20this%20site...&ctl00%24PlaceHolderSearchArea%24ctl01%24ctl03=0&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24ddlSearchBy=1&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24txtOwnerName=FOOBAR&__spText1=&__spText2=&_wpcmWpid=&wpcmVal=&__EVENTTARGET=&__EVENTARGUMENT=&__LASTFOCUS=&__VIEWSTATE=SwamHz2M4vlDU5xESDE%2BYzlhgnpp3zmU7%2Bp0rUy08dtMjFyaYzN4sGNn89LXjeesJp2Lg1OQPWQ6COOC7h9fbIaZ4pZgm%2F9cLwNpoEc%2BQ54cequlIaMCeMkUkluaWt9NyRsng%2FD6BQTtlxOqxf7GB3Klr3%2BNf0xHfDe23YtZLeSdO0Cpu1HPxTzgfbc%2FrPhoXVhD9ZLya8kWFSbq2hKv8DA5z1SjXbdICT5H7efUfTrYS6LnB3bKK1DxRY1xxmTuixmL1o7DXxCSep1DFGm8uGID4JS%2FezJ0EEbFuXyq2zB0OggHl8XM0DgQ0saHJ9xCpG8hLmmiwgMtoPpzu7DMdHwhRQXILMY%2FF7xT7nZHgWw4UQ1UcjM8Cf4LNq85xORdHpCXKCdkNY6Vwkjmpir18OsMq%2F910UVuC%2FFrDBRHo0fXeEurUV1NG3jvOw4%2FQto1pKt9fAg6IbiCAYmUcCF%2BWkZUjTcD97jU5HGgnXir6aVUfAJKF6opevl%2FJvEnTHeQpuGra3VeIMEfVl2FkFYIZNcMJ26fles8SdOyQi64eWa9MunUFfVayojcg0EW2JZ6KzBNaKmFwjWr2DGVsju%2BJReTna468daT1jZnHVCiEYOSKCxLpiKNwOikRR42QhajRaDz%2BvXm7ZqSrgYsHSaOXY6KkeECzRZxSEdq09xMLsiDaehnFhM%2FR61MPQpPE1CN7%2BTutVA5sXftoKqEtjbzHFWugl%2FihArvU6fwSvb%2BG%2B%2Bdx%2Br1Ag%2F%2B7dezfMYgDVF7BNP0mV9%2BnHi3IYSdHwDxXfrc5vMHOZik102MjzafT6xCUsABNRn4qQNIwh4cAtlhO00I%2FAfMkTiH59DdzgOtCSPifxzqxUyfNc4kgHSLT4PPqn%2F3y8JBKIuo765%2FZ9DVm0SbFCOhVLUhsQZYFVzh%2FMghSTjqNoO99iv5JWcoxSH1w46ra%2FEredYqb%2FJl3zacE7%2FDC8n%2FIupk7frpn4H1fxEUZGcbu1GUeM3XKwexteRP47Q6zKohrYDNtTPg00Q8A1eTYapFG8ZxMmzIJpcWyZ7strGJSxotw%2FVQCQe1zvKFzMTu8fbhLFr3YW5MogIuAVoGZtrAWpT00hn5V0AjEfSr608ozILT3RL4LN2lXPTEe2Fh5t6SU1ByFKVmh%2BGAbuPrjWgJN6mEOBvU2UAa0FJR4AnTodE33PfLbgry1R%2FA%2FHB%2B5yxuaKrxKwoTFOA9KlUBdfHS9qNAvXcruZxX0mQtzV060oUu2ylmPGQYuYoTaRzKx%2BnilgyLdrQbgj77VdJuoSbB1RQAH%2BzLJulMRofKXA8vQb%2Bx8Y%2BvqFbHie%2FnWhpK%2BSrZRnDmtLhiMnnT8%2BJm2yI%2BiTcBGCOzA4ri4br4qwlFUe1aU1gjygD6uM7ZD0723XJ%2Fvno8T7jnULUiYSivZCM30gh9DGiN5vw6bZSXxY2J6LIKkF2EMrjUG6R7tiiWgx%2FWJbfoazfLohqJDmOOGBVtpfkX9JCDNNIAUH2LKuqyUIdKCVtm5FZxp3EXEPf0kr5YGhlCqF62CM7cVAUmUSFiuGE%2BOtSBk%2FS0ys%2F0z4XKl%2BmDPkSRq0J2egI7RN5nZmPXj3oGUQZzTxwqdRbsW7Td6dwTrqL7k1UbbNc%2Fk3OpABw8fik97sXxrbWqW2DII6AyQl7qVm5Io0AHpv%2B3tujrJyNNoRWoACpLK75fgWWDwqXlNwNLksvH6UFLlGnRWweuVZdfnhp8X7QyxN8hOnOBID015a5n%2Ble2t7wvy78Awg0er8XK%2ByBqLiKppq7%2F%2BdGTGThl4n%2Fxzs3zl6OISYvOAxAt3nYoxHg9qsLuNJ12tD4DNKV9GhQA7XeeC3H54A9%2FoaKeEw7PpbjtZDBeAV%2FXFpXV6dF4wS6XG%2BTW3u%2F3QOaGzetwUM8DaXkYnoaJhgtirQMVLDdo%2FozyYEFK68wndPtNi9h%2FglSzE8Yk1%2BYbgLXPNAvly%2Fe6auK3VwzZtjW6GmjXGsW8Sj3Y%2FVhieLthHEp%2Bbs%2Bfvcihb0O7RhzSCMBXVIKc1wfJeeB%2BnthSs0ffx%2FoFWYGGNUzeM3Fy4BlF%2BAGj8voFp%2FJ7CACyXRICJwwxt3ISYr3hL%2BPzhchcCo23vJZuK30jq1TlbrvU6A%3D%3D&__VIEWSTATEENCRYPTED=&__EVENTVALIDATION=dG3TJnf58GbWdMG1P%2FC8Wfam9R%2FBRz0d%2BihM8N8Nee9EPJUcGcFf%2BMP4qC%2FUrKW%2BBrZCC1k%2BN09NVlsveug3cNtcMGEtyjxJxRSO1JuoeFAjeKXbxOZhrIvNSARLek72eMg9KVygJXQ7R9lnTumhGf%2F51fo%3D&_wpSelected=&_wzSelected=&__ASYNCPOST=true&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24btnSearch=Search', \
'ctl00%24ScriptManager=ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24FBNUpdatePanel%7Cctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24btnSearch&MSOWebPartPage_PostbackSource=&MSOTlPn_SelectedWpId=&MSOTlPn_View=0&MSOTlPn_ShowSettings=False&MSOGallery_SelectedLibrary=&MSOGallery_FilterString=&MSOTlPn_Button=none&__REQUESTDIGEST=0x3F6B60E9F6A82F752A43C2808320CCDF3A06323E41F680B08317CC1AEFBFEEF328C3616B552555269B997BC6E997C65878149FF2F7D76047C81D9C4CD352330D%2C14%20Oct%202015%2021%3A19%3A39%20-0000&MSOSPWebPartManager_DisplayModeName=Browse&MSOSPWebPartManager_ExitingDesignMode=false&MSOWebPartPage_Shared=&MSOLayout_LayoutChanges=&MSOLayout_InDesignMode=&MSOSPWebPartManager_OldDisplayModeName=Browse&MSOSPWebPartManager_StartWebPartEditingName=false&MSOSPWebPartManager_EndWebPartEditing=false&_maintainWorkspaceScrollPosition=0&InputKeywords=Search%20this%20site...&ctl00%24PlaceHolderSearchArea%24ctl01%24ctl03=0&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24ddlSearchBy=2&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24txtFileNum=FOOBAR&__spText1=&__spText2=&_wpcmWpid=&wpcmVal=&__EVENTTARGET=&__EVENTARGUMENT=&__LASTFOCUS=&__VIEWSTATE=gBtBUp7llSgxk2AuIe%2By76lp877de%2FraRHoi9IUXpgDdDDDoG%2Bm1DFuplJCnw3kF7OpdYEuywy4Y8jgBCpG6aR%2FSQS0ORORvwdwLQcIvOkmr%2Bp2006uIiWBbkFBke%2BBCLyxYKcsDyWm%2Fd9pNoX%2FGwzuF1oLIfmvWeENJRKcpzmccEHtkKy5b46pqCHpQGizsjeNcJ%2F0kD0hfZ8fRy8IgmmVgV6U8oTXLlNIXfQygVy3j4U8zvCmavMvZSTh229YIM0CE0E5PGV0tKZ4NLLCGMiaisOtC9TwUrx1cEqMM7CMnNfBtIgeJlezOfippqADG5zljaJ3Y6msjq%2F8XtGqd74lq%2BvRxaUksv3DARz5yCDImS3vrU6LZxF1OlM8CRzuSt02mQsn3IeHGGWM1npdV8RU7knw3AlwbnnhyfRnnvcBXsT3J1uZZBwwbMXrobcOhUbVqfEHv6ywD4CSdA9ApcLBiyyUji66bYrU31bokDngokkci1XEJUhRgoyvBM4%2F2ly5m%2Bt7%2B8bcZx%2B7eMO%2Ft6laZ4tIWOcysaERTGl8MMdnxi9nI41R2Wp3zdg4HXDyLV%2B0JDhPtxkDPmxyfIt2n%2Bfq8JEVd6NVDw4%2BoZZKIJ%2F7%2BRbbrzNjox7I2Zh63JL8FIeRxHIG72qINbcqSDm%2BDz6flaPUa%2FRhtDbGOrAxQcyM21zLZt4mqtuy4%2BgXDC7D7tY7aU4%2BLtPQWCI0H17xC8pEnQAQ8slPxqBRyRmZxncYquHcQUGENWp8981JMBeq4VESxpVBBSvgTXDvNQu%2FLN3%2BQhof1Ypt5o5GbLRbcFbHFvLrXBhzPNAG2hD9PWy77ZkZMsBXtfFxxtmgcwsbR26pLkd%2B%2BdJBMEVQ5aDU%2B3KSpAk6DmWetfQDrJGPQNqBE0FMn7iJGByjpU1%2F5%2BcALDm%2Bs03doM%2FbJrSOUPDj8OTWc0FIA4WmkhZdEPLMp4%2BlGwuUWATFcFQbtuy0znaVArqnArHgoFcmn52sZuQCRzaydTtHYHNbOJJrq%2B%2BV3p42Qqf2x%2B89RrQgsU%2BmEVrvaBXmow%2F9ckKBQ%2BCDxhEgHqpPNQeeNRwd30O0fw2yoA6UnAEruo96CgAzhX1qVGdtTWZN07dIs3Qb1Ncv26xNmQkkhxmAheagIDlDhdyjHtsgnk%2FXCE6WPCnyKufRL0VR5Oy5n%2FXKuhvwHa2pyX7xshhJFIEwVj53hOwpwOlwBloJSIVFUUv66wJfPz7bsWG%2Bj%2FlgbVwpJpk0vcc0C4M3hp4Fj1W0EdRmVlCXoEwfBdWYO4LlZc2JRxpNZuNQYPKpwS0JcSJPs82x4WJEO%2BG5COqVFI0HrSkev%2BPioCoPj0RPikcyvcvjg29IrzAKqaVFwO2dCw6IRSYpgv8NZuO9o5qeY%2FOFG4EdI3UFQGmdygWfcV0wZfDsvxZaAHVMazV0f3FQ7kxyWa8UCgJ8MKJo0gq6xgLsha%2BleCSRqrasuEpWpdkD5bJtT7%2F0Ls9KThmziUC%2BOTyhH%2FfxEx5cdqPy0WGEC%2FwoJtLUbbqMADjHk%2Fkk3oCsBA5Dtp9FRomJYcdeBX%2BJliRFoNc4nxLSaM4FHysvOeUM3xPMUrwCsliMlYu5BH9zD%2BOOMdZoVov5vnWd3bJq4tctURHZ2BhqfgBxzDgvsYZajr1ZHsXswBxWNM8JuJWXev1lH6m0teTfFSxCLlxP9TC4GeAsPSbcwFOwbayStgzp6seVG7itoAiwhLvTMqMxDKfnndcUSSYd1P0mY0RdUtqHWIsDTLkdH6dD%2B9By9bGdAGHw9uHHaQ%2FtEjHoroYj5jDrWHPc4bIO5v5E8Y%2FG7KiFoLYVUuv6%2F7COTpGrOd415T%2BVrUkjTLPRmnqQTm%2BbE43ixkeXGgf%2Byut5SDt5R6m%2B14x9ZwzIoN9g9YzbCTRHFdE0bAsAFufHgQWDwQj88%2FmYS%2FW9nqcuH3QZjag%2Bivd8eKrI6Ygzyw%2F5Ef%2FOhIkjC5DAqIACA6rM3uwoFpgvCym9T7k0xgf2uIQsWDQ4JbEm8tvKNLgqBeKlWe%2FHAkMBYpGXKK3bbLPWeYOc%2BAkpCZdPbKrY6IL2PPZYH4kuPNHsPPVQSBCn%2FaxuFWkMS12lNOtVt6J8JDCn1WrM9pIkCQQ%3D%3D&__VIEWSTATEENCRYPTED=&__EVENTVALIDATION=Uj4FmKlcP5zhMZ%2BwjLLVr9T%2Fz5N5aspkrTr3Ingwy5hcCaopTPdWuCMedJPO2S%2FvE5jn0IyE08zRBDc5uddey8%2B72SReV4JlSus5%2F2Yq0Gaqt7B46gWLidFi6V5PefOLhlOKeLzXdtTPpopxlI81AxEhFHk%3D&_wpSelected=&_wzSelected=&__ASYNCPOST=true&ctl00%24m%24g_edc659ca_c734_4370_bfb7_526a87def9c5%24ctl00%24btnSearch=Search'
]
body = body_templates[int(query_type)]
body = body.replace('FOOBAR', query)
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)", "Content-Type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("arcc.sdcounty.ca.gov")
# conn.set_debuglevel(1)
conn.request("POST", "/Pages/fictitious.aspx", body, headers)
response = conn.getresponse()
response_data = response.read()
conn.close()
update_panel_html = lxml.html.fromstring(response_data);
search_result_rows = update_panel_html.xpath('//*[contains(@id,"gvSearchResults")]/tr[position()>1]')
if len(search_result_rows) < 1:
sys.stderr.write('No results\n')
sys.exit(1)
# TODO: handle more than 10 results (requires handling paged responses)
if len(search_result_rows) > 10:
sys.stderr.write('More than 10 results, showing only first 10\n')
del search_result_rows[10:]
writer = csv.writer(sys.stdout)
csv_header = ['Filing Number','Business Name','Owner Name','Filing Date']
writer.writerow(csv_header)
for search_result_row in search_result_rows:
csv_row = [None] * 4
csv_row[0] = search_result_row.xpath('./td[2]/a/text()')[0].strip()
csv_row[1] = search_result_row.xpath('./td[3]/text()')[0].strip()
csv_row[2] = search_result_row.xpath('./td[4]/text()')[0].strip()
csv_row[3] = search_result_row.xpath('./td[6]/text()')[0].strip()
writer.writerow(csv_row)
sys.exit(0)
| mit |
mrbox/django | django/contrib/gis/gdal/srs.py | 71 | 12184 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
def __init__(self, srs_input='', srs_type='user'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
try:
capi.release_srs(self._ptr)
except (AttributeError, TypeError):
pass # Some part might already have been garbage collected
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
try:
capi.destroy_ct(self._ptr)
except (AttributeError, TypeError):
pass
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
jimbobhickville/taskflow | taskflow/engines/__init__.py | 4 | 1397 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import eventletutils as _eventletutils
# Give a nice warning that if eventlet is being used these modules
# are highly recommended to be patched (or otherwise bad things could
# happen).
_eventletutils.warn_eventlet_not_patched(
expected_patched_modules=['time', 'thread'])
# Promote helpers to this module namespace (for easy access).
from taskflow.engines.helpers import flow_from_detail # noqa
from taskflow.engines.helpers import load # noqa
from taskflow.engines.helpers import load_from_detail # noqa
from taskflow.engines.helpers import load_from_factory # noqa
from taskflow.engines.helpers import run # noqa
from taskflow.engines.helpers import save_factory_details # noqa
| apache-2.0 |
synicalsyntax/zulip | zerver/lib/remote_server.py | 3 | 7286 | import logging
import urllib
from typing import Any, Dict, List, Mapping, Tuple, Union
import requests
import ujson
from django.conf import settings
from django.forms.models import model_to_dict
from django.utils.translation import ugettext as _
from analytics.models import InstallationCount, RealmCount
from version import ZULIP_VERSION
from zerver.lib.exceptions import JsonableError
from zerver.lib.export import floatify_datetime_fields
from zerver.models import RealmAuditLog
class PushNotificationBouncerException(Exception):
pass
class PushNotificationBouncerRetryLaterError(JsonableError):
http_status_code = 502
def send_to_push_bouncer(method: str,
endpoint: str,
post_data: Union[str, Dict[str, Any]],
extra_headers: Mapping[str, Any] = {}) -> Dict[str, Any]:
"""While it does actually send the notice, this function has a lot of
code and comments around error handling for the push notifications
bouncer. There are several classes of failures, each with its own
potential solution:
* Network errors with requests.request. We raise an exception to signal
it to the callers.
* 500 errors from the push bouncer or other unexpected responses;
we don't try to parse the response, but do make clear the cause.
* 400 errors from the push bouncer. Here there are 2 categories:
Our server failed to connect to the push bouncer (should throw)
vs. client-side errors like and invalid token.
"""
url = urllib.parse.urljoin(settings.PUSH_NOTIFICATION_BOUNCER_URL,
'/api/v1/remotes/' + endpoint)
api_auth = requests.auth.HTTPBasicAuth(settings.ZULIP_ORG_ID,
settings.ZULIP_ORG_KEY)
headers = {"User-agent": f"ZulipServer/{ZULIP_VERSION}"}
headers.update(extra_headers)
try:
res = requests.request(method,
url,
data=post_data,
auth=api_auth,
timeout=30,
verify=True,
headers=headers)
except (requests.exceptions.Timeout, requests.exceptions.SSLError,
requests.exceptions.ConnectionError) as e:
raise PushNotificationBouncerRetryLaterError(
f"{e.__class__.__name__} while trying to connect to push notification bouncer")
if res.status_code >= 500:
# 500s should be resolved by the people who run the push
# notification bouncer service, and they'll get an appropriate
# error notification from the server. We raise an exception to signal
# to the callers that the attempt failed and they can retry.
error_msg = "Received 500 from push notification bouncer"
logging.warning(error_msg)
raise PushNotificationBouncerRetryLaterError(error_msg)
elif res.status_code >= 400:
# If JSON parsing errors, just let that exception happen
result_dict = ujson.loads(res.content)
msg = result_dict['msg']
if 'code' in result_dict and result_dict['code'] == 'INVALID_ZULIP_SERVER':
# Invalid Zulip server credentials should email this server's admins
raise PushNotificationBouncerException(
_("Push notifications bouncer error: {}").format(msg))
else:
# But most other errors coming from the push bouncer
# server are client errors (e.g. never-registered token)
# and should be handled as such.
raise JsonableError(msg)
elif res.status_code != 200:
# Anything else is unexpected and likely suggests a bug in
# this version of Zulip, so we throw an exception that will
# email the server admins.
raise PushNotificationBouncerException(
f"Push notification bouncer returned unexpected status code {res.status_code}")
# If we don't throw an exception, it's a successful bounce!
return ujson.loads(res.content)
def send_json_to_push_bouncer(method: str, endpoint: str, post_data: Dict[str, Any]) -> None:
send_to_push_bouncer(
method,
endpoint,
ujson.dumps(post_data),
extra_headers={"Content-type": "application/json"},
)
REALMAUDITLOG_PUSHED_FIELDS = ['id', 'realm', 'event_time', 'backfilled', 'extra_data', 'event_type']
def build_analytics_data(realm_count_query: Any,
installation_count_query: Any,
realmauditlog_query: Any) -> Tuple[List[Dict[str, Any]],
List[Dict[str, Any]],
List[Dict[str, Any]]]:
# We limit the batch size on the client side to avoid OOM kills timeouts, etc.
MAX_CLIENT_BATCH_SIZE = 10000
data = {}
data['analytics_realmcount'] = [
model_to_dict(row) for row in
realm_count_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
data['analytics_installationcount'] = [
model_to_dict(row) for row in
installation_count_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
data['zerver_realmauditlog'] = [
model_to_dict(row, fields=REALMAUDITLOG_PUSHED_FIELDS) for row in
realmauditlog_query.order_by("id")[0:MAX_CLIENT_BATCH_SIZE]
]
floatify_datetime_fields(data, 'analytics_realmcount')
floatify_datetime_fields(data, 'analytics_installationcount')
floatify_datetime_fields(data, 'zerver_realmauditlog')
return (data['analytics_realmcount'], data['analytics_installationcount'],
data['zerver_realmauditlog'])
def send_analytics_to_remote_server() -> None:
# first, check what's latest
try:
result = send_to_push_bouncer("GET", "server/analytics/status", {})
except PushNotificationBouncerRetryLaterError as e:
logging.warning(e.msg)
return
last_acked_realm_count_id = result['last_realm_count_id']
last_acked_installation_count_id = result['last_installation_count_id']
last_acked_realmauditlog_id = result['last_realmauditlog_id']
(realm_count_data, installation_count_data, realmauditlog_data) = build_analytics_data(
realm_count_query=RealmCount.objects.filter(
id__gt=last_acked_realm_count_id),
installation_count_query=InstallationCount.objects.filter(
id__gt=last_acked_installation_count_id),
realmauditlog_query=RealmAuditLog.objects.filter(
event_type__in=RealmAuditLog.SYNCED_BILLING_EVENTS,
id__gt=last_acked_realmauditlog_id))
if len(realm_count_data) + len(installation_count_data) + len(realmauditlog_data) == 0:
return
request = {
'realm_counts': ujson.dumps(realm_count_data),
'installation_counts': ujson.dumps(installation_count_data),
'realmauditlog_rows': ujson.dumps(realmauditlog_data),
'version': ujson.dumps(ZULIP_VERSION),
}
# Gather only entries with an ID greater than last_realm_count_id
try:
send_to_push_bouncer("POST", "server/analytics", request)
except JsonableError as e:
logging.warning(e.msg)
| apache-2.0 |
ianctse/pvlib-python | pvlib/test/test_modelchain.py | 1 | 8186 | import numpy as np
import pandas as pd
from numpy import nan
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pandas.util.testing import assert_series_equal, assert_frame_equal
from nose.tools import with_setup, raises
# should store this test data locally, but for now...
sam_data = {}
def retrieve_sam_network():
sam_data['cecmod'] = pvsystem.retrieve_sam('cecmod')
sam_data['sandiamod'] = pvsystem.retrieve_sam('sandiamod')
sam_data['cecinverter'] = pvsystem.retrieve_sam('cecinverter')
def mc_setup():
# limit network usage
try:
modules = sam_data['sandiamod']
except KeyError:
retrieve_sam_network()
modules = sam_data['sandiamod']
module = modules.Canadian_Solar_CS5P_220M___2009_.copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(module_parameters=module,
inverter_parameters=inverter)
location = Location(32.2, -111, altitude=700)
return system, location
def test_ModelChain_creation():
system, location = mc_setup()
mc = ModelChain(system, location)
def test_orientation_strategy():
strategies = {None: (0, 180), 'None': (0, 180),
'south_at_latitude_tilt': (32.2, 180),
'flat': (0, 180)}
for strategy, expected in strategies.items():
yield run_orientation_strategy, strategy, expected
def run_orientation_strategy(strategy, expected):
system = PVSystem()
location = Location(32.2, -111, altitude=700)
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy == None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
def test_run_model():
system, location = mc_setup()
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 1.82033564e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_with_irradiance():
system, location = mc_setup()
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, irradiance=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_with_weather():
system, location = mc_setup()
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'wind_speed':5, 'temp_air':10}, index=times)
ac = mc.run_model(times, weather=weather).ac
expected = pd.Series(np.array([ 1.99952400e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_tracker():
system, location = mc_setup()
system = SingleAxisTracker(module_parameters=system.module_parameters,
inverter_parameters=system.inverter_parameters)
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 121.421719, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
expected = pd.DataFrame(np.
array([[ 54.82513187, 90. , 11.0039221 , 11.0039221 ],
[ nan, 0. , 0. , nan]]),
columns=['aoi', 'surface_azimuth', 'surface_tilt', 'tracker_theta'],
index=times)
assert_frame_equal(mc.tracking, expected)
@raises(ValueError)
def test_bad_get_orientation():
modelchain.get_orientation('bad value')
@raises(ValueError)
def test_basic_chain_required():
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32
longitude = -111
altitude = 700
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
altitude=altitude)
def test_basic_chain_alt_az():
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth)
expected = pd.Series(np.array([ 1.14490928477e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_basic_chain_strategy():
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
orientation_strategy='south_at_latitude_tilt',
altitude=altitude)
expected = pd.Series(np.array([ 1.82033563543e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_basic_chain_altitude_pressure():
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
pressure=93194)
expected = pd.Series(np.array([ 1.15771428788e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
altitude=altitude)
expected = pd.Series(np.array([ 1.15771428788e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
| bsd-3-clause |
prospwro/odoo | addons/irsid_edu_training/models/admission_line.py | 2 | 3784 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution Addon
# Copyright (C) 2009-2013 IRSID (<http://irsid.ru>),
# Paul Korotkov (korotkov.paul@gmail.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from core import *
class edu_admission_line(osv.Model):
_name='edu.admission.line'
_description='Admission Line'
_inherit = 'edu.doc'
_track = {
'state': {
'irsid_edu.mt_admission_line_updated': lambda self, cr, uid, obj, ctx=None: True,
},
}
# Naming Functions
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
lines = self.browse(cr, uid, ids, context=context)
res = []
for line in lines:
res.append((line.id, line.name + '/' + line.target_id.code))
return res
# Onchange Functions
def onchange_program_id(self, cr, uid, ids, program_id, context=None):
if program_id:
program = self.pool.get('edu.program').browse(cr, uid, program_id, context=context)
return {'value': {
'name': program.short_name,
}}
return {'value': {}}
# Fields
_columns = {
'name': fields.char(
'Short Title',
size = 32,
required = True,
select = True,
readonly = True,
states = {'draft': [('readonly',False)]},
),
'admission_id': fields.many2one(
'edu.admission',
'Admission Plan',
required = True,
ondelete = 'cascade',
readonly = True,
states = {'draft': [('readonly',False)]},
),
'program_id': fields.many2one(
'edu.program',
'Program',
required = True,
readonly = True,
states = {'draft': [('readonly',False)]},
),
'target_id': fields.many2one(
'edu.admission.target',
'Target',
readonly = True,
states = {'draft': [('readonly',False)]},
),
'seats': fields.float(
'Seats',
readonly = True,
states = {'draft': [('readonly',False)]},
),
'test_ids': fields.many2many(
'edu.module',
'edu_admission_line_module_rel',
'line_id',
'module_id',
'Admission Tests',
readonly = True,
states = {'draft': [('readonly',False)]},
domain="[('program_id','=',program_id)]",
),
'note': fields.text(
'Note',
readonly = True,
states = {'draft': [('readonly',False)]},
),
}
# SQL Constraints
_sql_constraints = [
('line_uniq', 'unique(admission_id, program_id,target_id)', 'Admission Line must be unique per Plan and Target!')
]
# Sorting Order
_order = 'admission_id,program_id,target_id'
| agpl-3.0 |
txemi/ansible | lib/ansible/modules/notification/twilio.py | 36 | 5983 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Makai <matthew.makai@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
version_added: "1.6"
module: twilio
short_description: Sends a text message to a mobile phone through Twilio.
description:
- Sends a text message to a phone number through the Twilio messaging API.
notes:
- This module is non-idempotent because it sends an email through the
external API. It is idempotent only in the case that the module fails.
- Like the other notification modules, this one requires an external
dependency to work. In this case, you'll need a Twilio account with
a purchased or verified phone number to send the text message.
options:
account_sid:
description:
user's Twilio account token found on the account page
required: true
auth_token:
description: user's Twilio authentication token
required: true
msg:
description:
the body of the text message
required: true
to_number:
description:
one or more phone numbers to send the text message to,
format +15551112222
required: true
from_number:
description:
the Twilio number to send the text message from, format +15551112222
required: true
media_url:
description:
a URL with a picture, video or sound clip to send with an MMS
(multimedia message) instead of a plain SMS
required: false
author: "Matt Makai (@makaimc)"
'''
EXAMPLES = '''
# send an SMS about the build status to (555) 303 5681
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: All servers with webserver role are now configured.
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15552014545
to_number: +15553035681
delegate_to: localhost
# send an SMS to multiple phone numbers about the deployment
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: This server configuration is now complete.
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15553258899
to_number:
- +15551113232
- +12025551235
- +19735559010
delegate_to: localhost
# send an MMS to a single recipient with an update on the deployment
# and an image of the results
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: Deployment complete!
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15552014545
to_number: +15553035681
media_url: https://demo.twilio.com/logo.png
delegate_to: localhost
'''
# =======================================
# twilio module support methods
#
import urllib
def post_twilio_api(module, account_sid, auth_token, msg, from_number,
to_number, media_url=None):
URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
% (account_sid,)
AGENT = "Ansible"
data = {'From':from_number, 'To':to_number, 'Body':msg}
if media_url:
data['MediaUrl'] = media_url
encoded_data = urllib.urlencode(data)
headers = {'User-Agent': AGENT,
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json',
}
# Hack module params to have the Basic auth params that fetch_url expects
module.params['url_username'] = account_sid.replace('\n', '')
module.params['url_password'] = auth_token.replace('\n', '')
return fetch_url(module, URI, data=encoded_data, headers=headers)
# =======================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_sid=dict(required=True),
auth_token=dict(required=True, no_log=True),
msg=dict(required=True),
from_number=dict(required=True),
to_number=dict(required=True),
media_url=dict(default=None, required=False),
),
supports_check_mode=True
)
account_sid = module.params['account_sid']
auth_token = module.params['auth_token']
msg = module.params['msg']
from_number = module.params['from_number']
to_number = module.params['to_number']
media_url = module.params['media_url']
if not isinstance(to_number, list):
to_number = [to_number]
for number in to_number:
r, info = post_twilio_api(module, account_sid, auth_token, msg,
from_number, number, media_url)
if info['status'] not in [200, 201]:
body_message = "unknown error"
if 'body' in info:
body = json.loads(info['body'])
body_message = body['message']
module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
module.exit_json(msg=msg, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
saeki-masaki/glance | glance/api/policy.py | 7 | 25297 | # Copyright (c) 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Glance"""
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from glance.common import exception
import glance.domain.proxy
from glance import i18n
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_RULES = policy.Rules.from_dict({
'context_is_admin': 'role:admin',
'default': '@',
'manage_image_cache': 'role:admin',
})
_ = i18n._
class Enforcer(policy.Enforcer):
"""Responsible for loading and enforcing rules"""
def __init__(self):
if CONF.find_file(CONF.oslo_policy.policy_file):
kwargs = dict(rules=None, use_conf=True)
else:
kwargs = dict(rules=DEFAULT_RULES, use_conf=False)
super(Enforcer, self).__init__(CONF, overwrite=False, **kwargs)
def add_rules(self, rules):
"""Add new rules to the Rules object"""
self.set_rules(rules, overwrite=False, use_conf=self.use_conf)
def enforce(self, context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: Glance request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises: `glance.common.exception.Forbidden`
:returns: A non-False value if access is allowed.
"""
credentials = {
'roles': context.roles,
'user': context.user,
'tenant': context.tenant,
}
return super(Enforcer, self).enforce(action, target, credentials,
do_raise=True,
exc=exception.Forbidden,
action=action)
def check(self, context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: Glance request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:returns: A non-False value if access is allowed.
"""
credentials = {
'roles': context.roles,
'user': context.user,
'tenant': context.tenant,
}
return super(Enforcer, self).enforce(action, target, credentials)
def check_is_admin(self, context):
"""Check if the given context is associated with an admin role,
as defined via the 'context_is_admin' RBAC rule.
:param context: Glance request context
:returns: A non-False value if context role is admin.
"""
return self.check(context, 'context_is_admin', context.to_dict())
class ImageRepoProxy(glance.domain.proxy.Repo):
def __init__(self, image_repo, context, policy):
self.context = context
self.policy = policy
self.image_repo = image_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def get(self, image_id):
try:
image = super(ImageRepoProxy, self).get(image_id)
except exception.NotFound:
self.policy.enforce(self.context, 'get_image', {})
raise
else:
self.policy.enforce(self.context, 'get_image', ImageTarget(image))
return image
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_images', {})
return super(ImageRepoProxy, self).list(*args, **kwargs)
def save(self, image, from_state=None):
self.policy.enforce(self.context, 'modify_image', image.target)
return super(ImageRepoProxy, self).save(image, from_state=from_state)
def add(self, image):
self.policy.enforce(self.context, 'add_image', image.target)
return super(ImageRepoProxy, self).add(image)
class ImageProxy(glance.domain.proxy.Image):
def __init__(self, image, context, policy):
self.image = image
self.target = ImageTarget(image)
self.context = context
self.policy = policy
super(ImageProxy, self).__init__(image)
@property
def visibility(self):
return self.image.visibility
@visibility.setter
def visibility(self, value):
if value == 'public':
self.policy.enforce(self.context, 'publicize_image', self.target)
self.image.visibility = value
@property
def locations(self):
return ImageLocationsProxy(self.image.locations,
self.context, self.policy)
@locations.setter
def locations(self, value):
if not isinstance(value, (list, ImageLocationsProxy)):
raise exception.Invalid(_('Invalid locations: %s') % value)
self.policy.enforce(self.context, 'set_image_location', self.target)
new_locations = list(value)
if (set([loc['url'] for loc in self.image.locations]) -
set([loc['url'] for loc in new_locations])):
self.policy.enforce(self.context, 'delete_image_location',
self.target)
self.image.locations = new_locations
def delete(self):
self.policy.enforce(self.context, 'delete_image', self.target)
return self.image.delete()
def deactivate(self):
LOG.debug('Attempting deactivate')
target = ImageTarget(self.image)
self.policy.enforce(self.context, 'deactivate', target=target)
LOG.debug('Deactivate allowed, continue')
self.image.deactivate()
def reactivate(self):
LOG.debug('Attempting reactivate')
target = ImageTarget(self.image)
self.policy.enforce(self.context, 'reactivate', target=target)
LOG.debug('Reactivate allowed, continue')
self.image.reactivate()
def get_data(self, *args, **kwargs):
self.policy.enforce(self.context, 'download_image', self.target)
return self.image.get_data(*args, **kwargs)
def set_data(self, *args, **kwargs):
self.policy.enforce(self.context, 'upload_image', self.target)
return self.image.set_data(*args, **kwargs)
def get_member_repo(self, **kwargs):
member_repo = self.image.get_member_repo(**kwargs)
return ImageMemberRepoProxy(member_repo, self.context, self.policy)
class ImageFactoryProxy(glance.domain.proxy.ImageFactory):
def __init__(self, image_factory, context, policy):
self.image_factory = image_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(ImageFactoryProxy, self).__init__(image_factory,
proxy_class=ImageProxy,
proxy_kwargs=proxy_kwargs)
def new_image(self, **kwargs):
if kwargs.get('visibility') == 'public':
self.policy.enforce(self.context, 'publicize_image', {})
return super(ImageFactoryProxy, self).new_image(**kwargs)
class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory):
def __init__(self, member_factory, context, policy):
super(ImageMemberFactoryProxy, self).__init__(
member_factory,
image_proxy_class=ImageProxy,
image_proxy_kwargs={'context': context, 'policy': policy})
class ImageMemberRepoProxy(glance.domain.proxy.Repo):
def __init__(self, member_repo, context, policy):
self.member_repo = member_repo
self.target = ImageTarget(self.member_repo.image)
self.context = context
self.policy = policy
def add(self, member):
self.policy.enforce(self.context, 'add_member', self.target)
self.member_repo.add(member)
def get(self, member_id):
self.policy.enforce(self.context, 'get_member', self.target)
return self.member_repo.get(member_id)
def save(self, member, from_state=None):
self.policy.enforce(self.context, 'modify_member', self.target)
self.member_repo.save(member, from_state=from_state)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_members', self.target)
return self.member_repo.list(*args, **kwargs)
def remove(self, member):
self.policy.enforce(self.context, 'delete_member', self.target)
self.member_repo.remove(member)
class ImageLocationsProxy(object):
__hash__ = None
def __init__(self, locations, context, policy):
self.locations = locations
self.context = context
self.policy = policy
def __copy__(self):
return type(self)(self.locations, self.context, self.policy)
def __deepcopy__(self, memo):
# NOTE(zhiyan): Only copy location entries, others can be reused.
return type(self)(copy.deepcopy(self.locations, memo),
self.context, self.policy)
def _get_checker(action, func_name):
def _checker(self, *args, **kwargs):
self.policy.enforce(self.context, action, {})
method = getattr(self.locations, func_name)
return method(*args, **kwargs)
return _checker
count = _get_checker('get_image_location', 'count')
index = _get_checker('get_image_location', 'index')
__getitem__ = _get_checker('get_image_location', '__getitem__')
__contains__ = _get_checker('get_image_location', '__contains__')
__len__ = _get_checker('get_image_location', '__len__')
__cast = _get_checker('get_image_location', '__cast')
__cmp__ = _get_checker('get_image_location', '__cmp__')
__iter__ = _get_checker('get_image_location', '__iter__')
append = _get_checker('set_image_location', 'append')
extend = _get_checker('set_image_location', 'extend')
insert = _get_checker('set_image_location', 'insert')
reverse = _get_checker('set_image_location', 'reverse')
__iadd__ = _get_checker('set_image_location', '__iadd__')
__setitem__ = _get_checker('set_image_location', '__setitem__')
pop = _get_checker('delete_image_location', 'pop')
remove = _get_checker('delete_image_location', 'remove')
__delitem__ = _get_checker('delete_image_location', '__delitem__')
__delslice__ = _get_checker('delete_image_location', '__delslice__')
del _get_checker
class TaskProxy(glance.domain.proxy.Task):
def __init__(self, task, context, policy):
self.task = task
self.context = context
self.policy = policy
super(TaskProxy, self).__init__(task)
class TaskStubProxy(glance.domain.proxy.TaskStub):
def __init__(self, task_stub, context, policy):
self.task_stub = task_stub
self.context = context
self.policy = policy
super(TaskStubProxy, self).__init__(task_stub)
class TaskRepoProxy(glance.domain.proxy.TaskRepo):
def __init__(self, task_repo, context, task_policy):
self.context = context
self.policy = task_policy
self.task_repo = task_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskRepoProxy,
self).__init__(task_repo,
task_proxy_class=TaskProxy,
task_proxy_kwargs=proxy_kwargs)
def get(self, task_id):
self.policy.enforce(self.context, 'get_task', {})
return super(TaskRepoProxy, self).get(task_id)
def add(self, task):
self.policy.enforce(self.context, 'add_task', {})
super(TaskRepoProxy, self).add(task)
def save(self, task):
self.policy.enforce(self.context, 'modify_task', {})
super(TaskRepoProxy, self).save(task)
class TaskStubRepoProxy(glance.domain.proxy.TaskStubRepo):
def __init__(self, task_stub_repo, context, task_policy):
self.context = context
self.policy = task_policy
self.task_stub_repo = task_stub_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskStubRepoProxy,
self).__init__(task_stub_repo,
task_stub_proxy_class=TaskStubProxy,
task_stub_proxy_kwargs=proxy_kwargs)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_tasks', {})
return super(TaskStubRepoProxy, self).list(*args, **kwargs)
class TaskFactoryProxy(glance.domain.proxy.TaskFactory):
def __init__(self, task_factory, context, policy):
self.task_factory = task_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskFactoryProxy, self).__init__(
task_factory,
task_proxy_class=TaskProxy,
task_proxy_kwargs=proxy_kwargs)
class ImageTarget(object):
SENTINEL = object()
def __init__(self, target):
"""Initialize the object
:param target: Object being targeted
"""
self.target = target
def __getitem__(self, key):
"""Return the value of 'key' from the target.
If the target has the attribute 'key', return it.
:param key: value to retrieve
"""
key = self.key_transforms(key)
value = getattr(self.target, key, self.SENTINEL)
if value is self.SENTINEL:
extra_properties = getattr(self.target, 'extra_properties', None)
if extra_properties is not None:
value = extra_properties[key]
else:
value = None
return value
def key_transforms(self, key):
if key == 'id':
key = 'image_id'
return key
# Metadef Namespace classes
class MetadefNamespaceProxy(glance.domain.proxy.MetadefNamespace):
def __init__(self, namespace, context, policy):
self.namespace_input = namespace
self.context = context
self.policy = policy
super(MetadefNamespaceProxy, self).__init__(namespace)
class MetadefNamespaceRepoProxy(glance.domain.proxy.MetadefNamespaceRepo):
def __init__(self, namespace_repo, context, namespace_policy):
self.context = context
self.policy = namespace_policy
self.namespace_repo = namespace_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefNamespaceRepoProxy,
self).__init__(namespace_repo,
namespace_proxy_class=MetadefNamespaceProxy,
namespace_proxy_kwargs=proxy_kwargs)
def get(self, namespace):
self.policy.enforce(self.context, 'get_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).get(namespace)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_namespaces', {})
return super(MetadefNamespaceRepoProxy, self).list(*args, **kwargs)
def save(self, namespace):
self.policy.enforce(self.context, 'modify_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).save(namespace)
def add(self, namespace):
self.policy.enforce(self.context, 'add_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).add(namespace)
class MetadefNamespaceFactoryProxy(
glance.domain.proxy.MetadefNamespaceFactory):
def __init__(self, meta_namespace_factory, context, policy):
self.meta_namespace_factory = meta_namespace_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefNamespaceFactoryProxy, self).__init__(
meta_namespace_factory,
meta_namespace_proxy_class=MetadefNamespaceProxy,
meta_namespace_proxy_kwargs=proxy_kwargs)
# Metadef Object classes
class MetadefObjectProxy(glance.domain.proxy.MetadefObject):
def __init__(self, meta_object, context, policy):
self.meta_object = meta_object
self.context = context
self.policy = policy
super(MetadefObjectProxy, self).__init__(meta_object)
class MetadefObjectRepoProxy(glance.domain.proxy.MetadefObjectRepo):
def __init__(self, object_repo, context, object_policy):
self.context = context
self.policy = object_policy
self.object_repo = object_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefObjectRepoProxy,
self).__init__(object_repo,
object_proxy_class=MetadefObjectProxy,
object_proxy_kwargs=proxy_kwargs)
def get(self, namespace, object_name):
self.policy.enforce(self.context, 'get_metadef_object', {})
return super(MetadefObjectRepoProxy, self).get(namespace, object_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_objects', {})
return super(MetadefObjectRepoProxy, self).list(*args, **kwargs)
def save(self, meta_object):
self.policy.enforce(self.context, 'modify_metadef_object', {})
return super(MetadefObjectRepoProxy, self).save(meta_object)
def add(self, meta_object):
self.policy.enforce(self.context, 'add_metadef_object', {})
return super(MetadefObjectRepoProxy, self).add(meta_object)
class MetadefObjectFactoryProxy(glance.domain.proxy.MetadefObjectFactory):
def __init__(self, meta_object_factory, context, policy):
self.meta_object_factory = meta_object_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefObjectFactoryProxy, self).__init__(
meta_object_factory,
meta_object_proxy_class=MetadefObjectProxy,
meta_object_proxy_kwargs=proxy_kwargs)
# Metadef ResourceType classes
class MetadefResourceTypeProxy(glance.domain.proxy.MetadefResourceType):
def __init__(self, meta_resource_type, context, policy):
self.meta_resource_type = meta_resource_type
self.context = context
self.policy = policy
super(MetadefResourceTypeProxy, self).__init__(meta_resource_type)
class MetadefResourceTypeRepoProxy(
glance.domain.proxy.MetadefResourceTypeRepo):
def __init__(self, resource_type_repo, context, resource_type_policy):
self.context = context
self.policy = resource_type_policy
self.resource_type_repo = resource_type_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefResourceTypeRepoProxy, self).__init__(
resource_type_repo,
resource_type_proxy_class=MetadefResourceTypeProxy,
resource_type_proxy_kwargs=proxy_kwargs)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'list_metadef_resource_types', {})
return super(MetadefResourceTypeRepoProxy, self).list(*args, **kwargs)
def get(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_resource_type', {})
return super(MetadefResourceTypeRepoProxy, self).get(*args, **kwargs)
def add(self, resource_type):
self.policy.enforce(self.context,
'add_metadef_resource_type_association', {})
return super(MetadefResourceTypeRepoProxy, self).add(resource_type)
class MetadefResourceTypeFactoryProxy(
glance.domain.proxy.MetadefResourceTypeFactory):
def __init__(self, resource_type_factory, context, policy):
self.resource_type_factory = resource_type_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefResourceTypeFactoryProxy, self).__init__(
resource_type_factory,
resource_type_proxy_class=MetadefResourceTypeProxy,
resource_type_proxy_kwargs=proxy_kwargs)
# Metadef namespace properties classes
class MetadefPropertyProxy(glance.domain.proxy.MetadefProperty):
def __init__(self, namespace_property, context, policy):
self.namespace_property = namespace_property
self.context = context
self.policy = policy
super(MetadefPropertyProxy, self).__init__(namespace_property)
class MetadefPropertyRepoProxy(glance.domain.proxy.MetadefPropertyRepo):
def __init__(self, property_repo, context, object_policy):
self.context = context
self.policy = object_policy
self.property_repo = property_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefPropertyRepoProxy, self).__init__(
property_repo,
property_proxy_class=MetadefPropertyProxy,
property_proxy_kwargs=proxy_kwargs)
def get(self, namespace, property_name):
self.policy.enforce(self.context, 'get_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).get(namespace,
property_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_properties', {})
return super(MetadefPropertyRepoProxy, self).list(
*args, **kwargs)
def save(self, namespace_property):
self.policy.enforce(self.context, 'modify_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).save(
namespace_property)
def add(self, namespace_property):
self.policy.enforce(self.context, 'add_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).add(
namespace_property)
class MetadefPropertyFactoryProxy(glance.domain.proxy.MetadefPropertyFactory):
def __init__(self, namespace_property_factory, context, policy):
self.namespace_property_factory = namespace_property_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefPropertyFactoryProxy, self).__init__(
namespace_property_factory,
property_proxy_class=MetadefPropertyProxy,
property_proxy_kwargs=proxy_kwargs)
# Metadef Tag classes
class MetadefTagProxy(glance.domain.proxy.MetadefTag):
def __init__(self, meta_tag, context, policy):
self.context = context
self.policy = policy
super(MetadefTagProxy, self).__init__(meta_tag)
class MetadefTagRepoProxy(glance.domain.proxy.MetadefTagRepo):
def __init__(self, tag_repo, context, tag_policy):
self.context = context
self.policy = tag_policy
self.tag_repo = tag_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefTagRepoProxy,
self).__init__(tag_repo,
tag_proxy_class=MetadefTagProxy,
tag_proxy_kwargs=proxy_kwargs)
def get(self, namespace, tag_name):
self.policy.enforce(self.context, 'get_metadef_tag', {})
return super(MetadefTagRepoProxy, self).get(namespace, tag_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_tags', {})
return super(MetadefTagRepoProxy, self).list(*args, **kwargs)
def save(self, meta_tag):
self.policy.enforce(self.context, 'modify_metadef_tag', {})
return super(MetadefTagRepoProxy, self).save(meta_tag)
def add(self, meta_tag):
self.policy.enforce(self.context, 'add_metadef_tag', {})
return super(MetadefTagRepoProxy, self).add(meta_tag)
def add_tags(self, meta_tags):
self.policy.enforce(self.context, 'add_metadef_tags', {})
return super(MetadefTagRepoProxy, self).add_tags(meta_tags)
class MetadefTagFactoryProxy(glance.domain.proxy.MetadefTagFactory):
def __init__(self, meta_tag_factory, context, policy):
self.meta_tag_factory = meta_tag_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefTagFactoryProxy, self).__init__(
meta_tag_factory,
meta_tag_proxy_class=MetadefTagProxy,
meta_tag_proxy_kwargs=proxy_kwargs)
| apache-2.0 |
Xevib/manuel | manuel/cli.py | 1 | 1137 | from __future__ import absolute_import
import click
from manuel.manuel import Manuel
@click.group()
def manuel():
pass
@manuel.command()
@click.argument('config_file')
@click.option('--index/--no-index', default=False)
@click.option('--recreate/--no-recreate', default=True)
@click.option('--debug/--no-debug', default=False)
@click.option('--name')
def cli_generate_report(config_file, index, recreate, debug, name):
"""
CLI entry point
:param config_file:
:param index:
:param recreate: Recreates the materialized view
:type recreate: bool
:param debug:Enables debug mode
:type debug: bool
:return:
"""
m = Manuel(config_file)
if index:
m.create_index(config_file, debug)
if recreate:
m.generate_materialized_vies(config_file, debug)
if not name:
from os.path import basename, splitext
name = m.config["report"]["general"].get("report_name", None) or \
splitext(basename(config_file))[0] or 'report'
result = m.generate_report(config_file, debug, name)
m.save_results(result, config_file)
def invoke():
manuel()
| gpl-3.0 |
brianjgeiger/osf.io | scripts/register_oauth_scopes.py | 13 | 2346 | """
Register the list of OAuth2 scopes that can be requested by third parties. This populates the Postgres collection
referenced by CAS when responding to authorization grant requests. The database class is minimal; the exact
specification for what a scope contains lives in the python module from which this collection is drawn.
"""
import sys
import logging
import django
from django.db import transaction
django.setup()
from scripts import utils as script_utils
from framework.auth import oauth_scopes
from osf.models import ApiOAuth2Scope
from website.app import init_app
logger = logging.getLogger(__name__)
def get_or_create(name, description, save=True):
"""
Populate or update the database entry, as needed
:param name: the name of the scope
:param description: the description of the scope
:return: the scope object
"""
if name != name.lower():
raise ValueError('Scope names are case-sensitive, and should always be lower-case.')
try:
scope_obj = ApiOAuth2Scope.objects.get(name=name)
setattr(scope_obj, 'description', description)
print('Updating existing database entry for: {}'.format(name))
except ApiOAuth2Scope.DoesNotExist:
scope_obj = ApiOAuth2Scope(name=name, description=description)
print('Created new database entry for: {}'.format(name))
if save:
scope_obj.save()
return scope_obj
def do_populate(clear=False):
"""
Given a dictionary of scope definitions, {name: scope_namedtuple}, load the
resulting data into a database collection
"""
scope_dict = oauth_scopes.public_scopes
if clear:
ApiOAuth2Scope.objects.all().delete()
for name, scope in scope_dict.items():
# Update a scope if it exists, else populate
if scope.is_public is True:
get_or_create(name, scope.description, save=True)
else:
logger.info('{} is not a publicly advertised scope; did not load into database'.format(name))
def main(dry=True):
init_app(routes=False)
with transaction.atomic():
do_populate(clear=True)
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| apache-2.0 |
joshuajan/odoo | openerp/addons/base/module/wizard/base_module_configuration.py | 447 | 2274 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class base_module_configuration(osv.osv_memory):
_name = "base.module.configuration"
def start(self, cr, uid, ids, context=None):
todo_ids = self.pool.get('ir.actions.todo').search(cr, uid,
['|', ('type','=','recurring'), ('state', '=', 'open')])
if not todo_ids:
# When there is no wizard todo it will display message
data_obj = self.pool.get('ir.model.data')
result = data_obj._get_id(cr, uid, 'base', 'view_base_module_configuration_form')
view_id = data_obj.browse(cr, uid, result).res_id
value = {
'name': _('System Configuration done'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.configuration',
'view_id': [view_id],
'type': 'ir.actions.act_window',
'target': 'new'
}
return value
# Run the config wizards
config_pool = self.pool.get('res.config')
return config_pool.start(cr, uid, ids, context=context)
base_module_configuration()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
googlefonts/nototools | nototools/lang_data.py | 3 | 11484 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions to return information on scripts and langs, primarily for the
Noto website.
The data is generated based on information in cldr_data and unicode_data,
and cached in this module. The primary functions return the primary
language for each script (sometimes 'und', e.g. for Dsrt), and the names
for each lang_script that has an English name (in English and in the script
when known). Other functions return the set of scripts and the set of
lang_scripts (that have english names).
"""
import collections
from nototools import cldr_data
from nototools import unicode_data
# controls printing of debug/trace information
# normally disabled
def _log(msg):
# sys.stderr.write('#lang_data: ' + msg + '\n')
pass
def is_excluded_script(script_code):
return script_code in ["Zinh", "Zyyy", "Zzzz"]
def script_includes(script_code):
"""Returns a set of script codes 'included' by the provided one. Intended to
deal with script codes like 'Jpan' used to describe writing systems that
use/require multiple scripts. The script code itself (and other subsets)
are also included in the result."""
if script_code not in scripts():
raise ValueError("!not a script code: %s" % script_code)
if script_code == "Hrkt":
return frozenset(["Hrkt", "Hira", "Kana"])
if script_code == "Jpan":
return frozenset(["Jpan", "Hrkt", "Hani", "Hira", "Kana"])
if script_code == "Kore":
return frozenset(["Kore", "Hang"])
return frozenset([script_code])
def _create_lang_data():
"""Generates language data from CLDR plus extensions.
Returns a mapping from lang to a tuple of:
- a set of scripts used in some region
- a set of scripts not used in any region."""
all_lang_scripts = collections.defaultdict(set)
used_lang_scripts = collections.defaultdict(set)
known_scripts = set()
all_langs = set()
for region in cldr_data.known_regions():
lang_scripts = cldr_data.region_to_lang_scripts(region)
for lang_script in lang_scripts:
lang, script = lang_script.split("-")
known_scripts.add(script)
if lang == "und":
_log("used lang is und for script %s in region %s" % (script, region))
continue
used_lang_scripts[lang].add(script)
all_lang_scripts[lang].add(script)
all_langs.add(lang)
for lang in cldr_data.known_langs():
lang_scripts = cldr_data.lang_to_scripts(lang)
all_lang_scripts[lang] |= lang_scripts
known_scripts |= lang_scripts
all_langs.add(lang)
for lang in all_langs:
script = cldr_data.get_likely_script(lang)
if not is_excluded_script(script):
all_lang_scripts[lang].add(script)
for script in unicode_data.all_scripts():
if is_excluded_script(script):
continue
lang = cldr_data.get_likely_subtags("und-" + script)[0]
if lang != "und":
if script not in all_lang_scripts[lang]:
_log("adding likely lang %s for script %s" % (lang, script))
all_lang_scripts[lang].add(script)
elif script not in known_scripts:
_log("adding script with unknown language %s" % script)
all_lang_scripts[lang].add(script)
else:
_log("script %s with unknown language already seen" % script)
# Patch: ensure ryu-Jpan exists
# - Okinawan can be written in either Kana or a combination of Hira
# and Kanji. Rather than take a strong position on this, add a
# mapping to Jpan.
all_lang_scripts["ryu"].add("Jpan")
# Patch: see noto-fonts#133 comment on June 8th.
all_lang_scripts["tlh"] |= {"Latn", "Piqd"}
all_langs = used_lang_scripts.keys() + all_lang_scripts.keys()
lang_data = {}
for lang in all_langs:
if lang in used_lang_scripts:
if lang in all_lang_scripts:
unused_set = all_lang_scripts[lang] - used_lang_scripts[lang]
lang_data[lang] = (
used_lang_scripts[lang].copy(),
unused_set if unused_set else set(),
)
else:
lang_data[lang] = (used_lang_scripts[lang].copy(), set())
else:
lang_data[lang] = (set(), all_lang_scripts[lang].copy())
return lang_data
def _langs_with_no_scripts(lang_script_data):
"""Return a set of langs with no scripts in lang_script_data."""
return set(
[
k
for k in lang_script_data
if not (lang_script_data[k][0] or lang_script_data[k][1])
]
)
def _remove_keys_from_dict(keys, some_dict):
for k in keys:
some_dict.pop(k, None)
def _create_script_to_default_lang(lang_script_data):
"""Iterates over all the scripts in lang_script_data, and returns a map
from each script to the default language code, generally based on cldr
likely subtag data. This assigns 'en' to Latn by fiat (cldr defaults to
'und'). Some other scripts (e.g. Dsrt) just get 'und'.
This checks that the default lang for a script actually uses that script
in lang_script_data, when the default lang is not 'und'.
"""
script_to_default_lang = {}
all_scripts = set()
script_to_used = collections.defaultdict(set)
script_to_unused = collections.defaultdict(set)
for lang in lang_script_data:
used, unused = lang_script_data[lang]
all_scripts |= used
all_scripts |= unused
for script in used:
script_to_used[script].add(lang)
for script in unused:
script_to_unused[script].add(lang)
# Add scripts without langs.
all_scripts.add("Zsym")
all_scripts.add("Zsye")
# Patch Klingon as default lang for (unused) script pIqaD
script_to_used["Piqd"].add("tlh")
for script in sorted(all_scripts):
default_lang = cldr_data.get_likely_subtags("und-" + script)[0]
if default_lang == "und":
if script == "Latn":
default_lang = "en" # cultural bias...
else:
_log("no default lang for script %s" % script)
langs = script_to_used[script]
if langs:
default_lang = next(iter(langs))
_log("using used lang %s from %s" % (default_lang, langs))
else:
langs = script_to_unused[script]
if langs:
default_lang = next(iter(langs))
_log("using unused lang %s from %s" % (default_lang, langs))
else:
_log("defaulting to 'und'")
else:
used, unused = lang_script_data[default_lang]
assert script in used or script in unused
script_to_default_lang[script] = default_lang
return script_to_default_lang
def _create_lang_script_to_names(lang_script_data):
"""Generate a map from lang-script to English (and possibly native) names.
Whether the script is included in the name depends on the number of used
and unused scripts. If there's one used script, that script is omitted.
Else if there's no used script and one unused script, that script is
omitted. Else the script is included. If there's no English name for
the lang_script, it is excluded.
"""
lang_to_names = {}
for lang in lang_script_data:
used, unused = lang_script_data[lang]
if len(used) == 1:
exclude_script = next(iter(used))
elif not used and len(unused) == 1:
exclude_script = next(iter(unused))
else:
exclude_script = ""
for script in used | unused:
lang_script = lang + "-" + script
target = lang if script == exclude_script else lang_script
# special case, not generally useful
if target.startswith("und-"):
en_name = cldr_data.get_english_script_name(target[4:]) + " script"
else:
en_name = cldr_data.get_english_language_name(target)
if not en_name:
# Easier than patching the cldr_data, not sure I want to go there.
if lang_script == "tlh-Piqd":
en_name = u"Klingon"
else:
_log("No english name for %s" % lang_script)
continue
native_name = cldr_data.get_native_language_name(
lang_script, exclude_script
)
if native_name == en_name:
native_name = None
lang_to_names[lang_script] = (
[en_name, native_name] if native_name else [en_name]
)
return lang_to_names
_LANG_DATA = None
def _get_lang_data():
global _LANG_DATA
if not _LANG_DATA:
_LANG_DATA = _create_lang_data()
return _LANG_DATA
_SCRIPT_TO_DEFAULT_LANG = None
def _get_script_to_default_lang():
global _SCRIPT_TO_DEFAULT_LANG
if not _SCRIPT_TO_DEFAULT_LANG:
_SCRIPT_TO_DEFAULT_LANG = _create_script_to_default_lang(_get_lang_data())
return _SCRIPT_TO_DEFAULT_LANG
_LANG_SCRIPT_TO_NAMES = None
def _get_lang_script_to_names():
global _LANG_SCRIPT_TO_NAMES
if not _LANG_SCRIPT_TO_NAMES:
_LANG_SCRIPT_TO_NAMES = _create_lang_script_to_names(_get_lang_data())
return _LANG_SCRIPT_TO_NAMES
def scripts():
return _get_script_to_default_lang().keys()
def script_to_default_lang(script):
return _get_script_to_default_lang()[script]
def lang_scripts():
return _get_lang_script_to_names().keys()
def lang_script_to_names(lang_script):
return _get_lang_script_to_names()[lang_script]
def main():
lang_data = _get_lang_data()
print()
print("--------")
langs_without_scripts = _langs_with_no_scripts(lang_data)
if langs_without_scripts:
print("langs without scripts: " + ", ".join(sorted(langs_without_scripts)))
_remove_keys_from_dict(langs_without_scripts, lang_data)
print()
print("lang data")
for k in sorted(lang_data):
used, unused = lang_data[k]
used_msg = "used: " + ", ".join(sorted(used)) if used else None
unused_msg = "unused: " + ", ".join(sorted(unused)) if unused else None
msg = "; ".join([m for m in (used_msg, unused_msg) if m])
print(k, msg)
print()
print("lang_script to names")
lang_script_to_names = _get_lang_script_to_names()
for t in sorted(lang_script_to_names.items()):
print("%s: %s" % t)
print()
print("script to default lang")
script_to_default_lang = _get_script_to_default_lang()
for t in sorted(script_to_default_lang.items()):
print("%s: %s" % t)
if __name__ == "__main__":
main()
| apache-2.0 |
fpy171/django | django/conf/locale/pt/formats.py | 504 | 1717 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
Acidburn0zzz/servo | tests/wpt/web-platform-tests/common/security-features/subresource/font.py | 16 | 4580 | import os, sys, base64
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import subresource
def generate_payload(request, server_data):
data = ('{"headers": %(headers)s}') % server_data
if "id" in request.GET:
request.server.stash.put(request.GET["id"], data)
# Simple base64 encoded .tff font
return base64.decodestring("AAEAAAANAIAAAwBQRkZUTU6u6MkAAAXcAAAAHE9TLzJWYW"
"QKAAABWAAAAFZjbWFwAA8D7wAAAcAAAAFCY3Z0IAAhAnkA"
"AAMEAAAABGdhc3D//wADAAAF1AAAAAhnbHlmCC6aTwAAAx"
"QAAACMaGVhZO8ooBcAAADcAAAANmhoZWEIkAV9AAABFAAA"
"ACRobXR4EZQAhQAAAbAAAAAQbG9jYQBwAFQAAAMIAAAACm"
"1heHAASQA9AAABOAAAACBuYW1lehAVOgAAA6AAAAIHcG9z"
"dP+uADUAAAWoAAAAKgABAAAAAQAAMhPyuV8PPPUACwPoAA"
"AAAMU4Lm0AAAAAxTgubQAh/5wFeAK8AAAACAACAAAAAAAA"
"AAEAAAK8/5wAWgXcAAAAAAV4AAEAAAAAAAAAAAAAAAAAAA"
"AEAAEAAAAEAAwAAwAAAAAAAgAAAAEAAQAAAEAALgAAAAAA"
"AQXcAfQABQAAAooCvAAAAIwCigK8AAAB4AAxAQIAAAIABg"
"kAAAAAAAAAAAABAAAAAAAAAAAAAAAAUGZFZABAAEEAQQMg"
"/zgAWgK8AGQAAAABAAAAAAAABdwAIQAAAAAF3AAABdwAZA"
"AAAAMAAAADAAAAHAABAAAAAAA8AAMAAQAAABwABAAgAAAA"
"BAAEAAEAAABB//8AAABB////wgABAAAAAAAAAQYAAAEAAA"
"AAAAAAAQIAAAACAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAhAnkAAAAqACoAKgBGAAAAAgAhAA"
"ABKgKaAAMABwAusQEALzyyBwQA7TKxBgXcPLIDAgDtMgCx"
"AwAvPLIFBADtMrIHBgH8PLIBAgDtMjMRIREnMxEjIQEJ6M"
"fHApr9ZiECWAAAAwBk/5wFeAK8AAMABwALAAABNSEVATUh"
"FQE1IRUB9AH0/UQDhPu0BRQB9MjI/tTIyP7UyMgAAAAAAA"
"4ArgABAAAAAAAAACYATgABAAAAAAABAAUAgQABAAAAAAAC"
"AAYAlQABAAAAAAADACEA4AABAAAAAAAEAAUBDgABAAAAAA"
"AFABABNgABAAAAAAAGAAUBUwADAAEECQAAAEwAAAADAAEE"
"CQABAAoAdQADAAEECQACAAwAhwADAAEECQADAEIAnAADAA"
"EECQAEAAoBAgADAAEECQAFACABFAADAAEECQAGAAoBRwBD"
"AG8AcAB5AHIAaQBnAGgAdAAgACgAYwApACAAMgAwADAAOA"
"AgAE0AbwB6AGkAbABsAGEAIABDAG8AcgBwAG8AcgBhAHQA"
"aQBvAG4AAENvcHlyaWdodCAoYykgMjAwOCBNb3ppbGxhIE"
"NvcnBvcmF0aW9uAABNAGEAcgBrAEEAAE1hcmtBAABNAGUA"
"ZABpAHUAbQAATWVkaXVtAABGAG8AbgB0AEYAbwByAGcAZQ"
"AgADIALgAwACAAOgAgAE0AYQByAGsAQQAgADoAIAA1AC0A"
"MQAxAC0AMgAwADAAOAAARm9udEZvcmdlIDIuMCA6IE1hcm"
"tBIDogNS0xMS0yMDA4AABNAGEAcgBrAEEAAE1hcmtBAABW"
"AGUAcgBzAGkAbwBuACAAMAAwADEALgAwADAAMAAgAABWZX"
"JzaW9uIDAwMS4wMDAgAABNAGEAcgBrAEEAAE1hcmtBAAAA"
"AgAAAAAAAP+DADIAAAABAAAAAAAAAAAAAAAAAAAAAAAEAA"
"AAAQACACQAAAAAAAH//wACAAAAAQAAAADEPovuAAAAAMU4"
"Lm0AAAAAxTgubQ==");
def generate_report_headers_payload(request, server_data):
stashed_data = request.server.stash.take(request.GET["id"])
return stashed_data
def main(request, response):
handler = lambda data: generate_payload(request, data)
content_type = 'application/x-font-truetype'
if "report-headers" in request.GET:
handler = lambda data: generate_report_headers_payload(request, data)
content_type = 'application/json'
subresource.respond(request,
response,
payload_generator = handler,
content_type = content_type,
access_control_allow_origin = "*")
| mpl-2.0 |
alan-unravel/bokeh | examples/charts/file/palettes.py | 35 | 2197 | from collections import OrderedDict
import numpy as np
from bokeh.charts import Area, output_file, gridplot, show
from bokeh.palettes import (Blues9, BrBG9, BuGn9, BuPu9, GnBu9, Greens9,
Greys9, OrRd9, Oranges9, PRGn9, PiYG9, PuBu9,
PuBuGn9, PuOr9, PuRd9, Purples9, RdBu9, RdGy9,
RdPu9, RdYlBu9, RdYlGn9, Reds9, Spectral9, YlGn9,
YlGnBu9, YlOrBr9, YlOrRd9)
standard_palettes = OrderedDict([("Blues9", Blues9), ("BrBG9", BrBG9),
("BuGn9", BuGn9), ("BuPu9", BuPu9),
("GnBu9", GnBu9), ("Greens9", Greens9),
("Greys9", Greys9), ("OrRd9", OrRd9),
("Oranges9", Oranges9), ("PRGn9", PRGn9),
("PiYG9", PiYG9), ("PuBu9", PuBu9),
("PuBuGn9", PuBuGn9), ("PuOr9", PuOr9),
("PuRd9", PuRd9), ("Purples9", Purples9),
("RdBu9", RdBu9), ("RdGy9", RdGy9),
("RdPu9", RdPu9), ("RdYlBu9", RdYlBu9),
("RdYlGn9", RdYlGn9), ("Reds9", Reds9),
("Spectral9", Spectral9), ("YlGn9", YlGn9),
("YlGnBu9", YlGnBu9), ("YlOrBr9", YlOrBr9),
("YlOrRd9", YlOrRd9)])
def create_area_chart(data, palette):
_chart_styling = dict(height=300,
width=300,
xgrid=False,
ygrid=False,
tools=None)
return Area(data,
title=palette,
stacked=True,
palette=standard_palettes.get(palette),
**_chart_styling)
data = np.random.random_integers(low=5, high=13, size=[9,20])
area_charts = [create_area_chart(data, palette)
for palette
in standard_palettes.keys()]
area_charts = np.reshape(area_charts, newshape=[9,3]).tolist()
output_file('palettes.html', title='palettes.py example')
show(gridplot(area_charts))
| bsd-3-clause |
realsobek/freeipa | ipatests/test_webui/test_service.py | 4 | 9526 | # Authors:
# Petr Vobornik <pvoborni@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Service tests
"""
from ipatests.test_webui.ui_driver import UI_driver
from ipatests.test_webui.ui_driver import screenshot
import pytest
ENTITY = 'service'
@pytest.mark.tier1
class sevice_tasks(UI_driver):
def prep_data(self):
host = self.config.get('ipa_server')
realm = self.config.get('ipa_realm')
pkey = 'itest'
return {
'pkey': '%s/%s@%s' % (pkey, host, realm),
'add': [
('textbox', 'service', pkey),
('combobox', 'host', host)
],
'mod': [
('checkbox', 'ipakrbokasdelegate', None),
],
}
def load_file(self, path):
# ENHANCEMENT: generate csr dynamically
with open(path, 'r') as file_d:
content = file_d.read()
return content
def get_http_pkey(self):
host = self.config.get('ipa_server')
realm = self.config.get('ipa_realm')
pkey = 'HTTP/%s@%s' % (host, realm)
return pkey
@pytest.mark.tier1
class test_service(sevice_tasks):
@screenshot
def test_crud(self):
"""
Basic CRUD: service
"""
self.init_app()
data = self.prep_data()
self.basic_crud(ENTITY, data)
@screenshot
def test_certificates(self):
"""
Test service certificate actions
Requires to have CA installed and 'service_csr_path' configuration option
set.
"""
if not self.has_ca():
self.skip('CA is not configured')
csr_path = self.config.get('service_csr_path')
if not csr_path:
self.skip('CSR file is not configured')
self.init_app()
data = self.prep_data()
pkey = data.get('pkey')
csr = self.load_file(csr_path)
cert_widget_sel = "div.certificate-widget"
self.add_record(ENTITY, data)
self.navigate_to_record(pkey)
# cert request
self.action_list_action('request_cert', confirm=False)
self.assert_dialog()
self.fill_text("textarea[name='csr'", csr)
self.dialog_button_click('issue')
self.wait_for_request(n=2, d=3)
self.assert_visible(cert_widget_sel)
# cert view
self.action_list_action('view', confirm=False,
parents_css_sel=cert_widget_sel)
self.assert_dialog()
self.dialog_button_click('close')
# cert get
self.action_list_action('get', confirm=False,
parents_css_sel=cert_widget_sel)
self.assert_dialog()
# check that text area is not empty
self.assert_empty_value('textarea.certificate', negative=True)
self.dialog_button_click('close')
# cert download - we can only try to click the download action
self.action_list_action('download', confirm=False,
parents_css_sel=cert_widget_sel)
# check that revoke action is enabled
self.assert_action_list_action('revoke',
parents_css_sel=cert_widget_sel,
facet_actions=False)
# check that remove_hold action is not enabled
self.assert_action_list_action('remove_hold', enabled=False,
parents_css_sel=cert_widget_sel,
facet_actions=False)
# cert revoke
self.action_list_action('revoke', confirm=False,
parents_css_sel=cert_widget_sel)
self.wait()
self.select('select', '6')
self.dialog_button_click('ok')
self.wait_for_request(n=2, d=3)
self.assert_visible(cert_widget_sel + " div.watermark")
# check that revoke action is not enabled
self.assert_action_list_action('revoke', enabled=False,
parents_css_sel=cert_widget_sel,
facet_actions=False)
# check that remove_hold action is enabled
self.assert_action_list_action('remove_hold',
parents_css_sel=cert_widget_sel,
facet_actions=False)
# cert remove hold
self.action_list_action('remove_hold', confirm=False,
parents_css_sel=cert_widget_sel)
self.wait()
self.dialog_button_click('ok')
self.wait_for_request(n=2)
# check that revoke action is enabled
self.assert_action_list_action('revoke',
parents_css_sel=cert_widget_sel,
facet_actions=False)
# check that remove_hold action is not enabled
self.assert_action_list_action('remove_hold', enabled=False,
parents_css_sel=cert_widget_sel,
facet_actions=False)
# cleanup
self.navigate_to_entity(ENTITY, 'search')
self.delete_record(pkey, data.get('del'))
@screenshot
def test_arbitrary_certificates(self):
"""
Test managing service arbitrary certificate.
Requires to have 'arbitrary_cert_path' configuration set.
"""
cert_path = self.config.get('arbitrary_cert_path')
if not cert_path:
self.skip('Arbitrary certificate file is not configured')
self.init_app()
data = self.prep_data()
pkey = data.get('pkey')
cert = self.load_file(cert_path)
cert_widget_sel = "div.certificate-widget"
self.add_record(ENTITY, data)
self.navigate_to_record(pkey)
# check whether certificate section is present
self.assert_visible("div[name='certificate']")
# add certificate
self.button_click('add', parents_css_sel="div[name='certificate']")
self.assert_dialog()
self.fill_textarea('new_cert', cert)
self.dialog_button_click('add')
self.assert_visible(cert_widget_sel)
# cert view
self.action_list_action('view', confirm=False,
parents_css_sel=cert_widget_sel)
self.assert_dialog()
self.dialog_button_click('close')
# cert get
self.action_list_action('get', confirm=False,
parents_css_sel=cert_widget_sel)
self.assert_dialog()
# check that the textarea is not empty
self.assert_empty_value('textarea.certificate', negative=True)
self.dialog_button_click('close')
# cert download - we can only try to click the download action
self.action_list_action('download', confirm=False,
parents_css_sel=cert_widget_sel)
# check that revoke action is not enabled
self.assert_action_list_action('revoke', enabled=False,
parents_css_sel=cert_widget_sel,
facet_actions=False)
# check that remove_hold action is not enabled
self.assert_action_list_action('remove_hold', enabled=False,
parents_css_sel=cert_widget_sel,
facet_actions=False)
# cleanup
self.navigate_to_entity(ENTITY, 'search')
self.delete_record(pkey, data.get('del'))
@screenshot
def test_ca_less(self):
"""
Test service certificate actions in CA-less install
http://www.freeipa.org/page/V3/CA-less_install
"""
if self.has_ca():
self.skip('CA is installed')
self.init_app()
data = self.prep_data()
pkey = data.get('pkey')
self.add_record(ENTITY, data)
self.navigate_to_record(pkey)
self.assert_action_list_action('request_cert', visible=False)
self.navigate_by_breadcrumb('Services')
self.delete_record(pkey, data.get('del'))
@screenshot
def test_kerberos_flags(self):
"""
Test Kerberos flags
http://www.freeipa.org/page/V3/Kerberos_Flags
"""
pkey = self.get_http_pkey()
name = 'ipakrbokasdelegate'
mod = {'mod': [('checkbox', name, None)]}
checked = ['checked']
self.init_app()
self.navigate_to_record(pkey, entity=ENTITY)
if self.get_field_checked(name) == checked:
self.mod_record(ENTITY, mod) # uncheck
self.mod_record(ENTITY, mod)
self.validate_fields([('checkbox', name, checked)])
self.mod_record(ENTITY, mod)
self.validate_fields([('checkbox', name, [])])
| gpl-3.0 |
brendanebers/geolocation-python | geolocation/distance_matrix/main.py | 4 | 1666 | # encoding: utf-8
from geolocation.distance_matrix.api import DistanceMatrixApi
from geolocation.distance_matrix.models import DistanceMatrixModel
from geolocation.distance_matrix.parser import DistanceMatrixParser
from geolocation.managers import Manager
class DistanceMatrix():
parser = DistanceMatrixParser()
manager = Manager()
def __init__(self, api_key):
self.api = DistanceMatrixApi(api_key)
def to_python(self, json_data):
"""Method should converts json_data to python object."""
self.manager.clear() # always clear manager data.
self.parser.json_data = json_data
origins = self.parser.get_origin()
destinations = self.parser.get_destination()
rows = self.parser.get_rows()
origin_counter = 0
for origin in origins:
destination_counter = 0
for element in rows[origin_counter].get('elements'):
self.parser.json_data = element
model = DistanceMatrixModel()
model.origin = origin
model.destination = destinations[destination_counter]
model.distance = self.parser.get_distance()
model.duration = self.parser.get_duration()
self.manager.data.add(model)
destination_counter += 1
origin_counter += 1
def distance(self, origins, destinations, mode, avoid=None):
"""Method returns distance between origins and destination."""
json_data = self.api.query(origins, destinations, mode, avoid)
if json_data:
self.to_python(json_data)
return self.manager | bsd-3-clause |
giggsey/SickRage | lib/subliminal/converters/podnapisi.py | 32 | 1960 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from babelfish import LanguageReverseConverter, LanguageConvertError, LanguageReverseError
class PodnapisiConverter(LanguageReverseConverter):
def __init__(self):
self.from_podnapisi = {2: ('eng',), 28: ('spa',), 26: ('pol',), 36: ('srp',), 1: ('slv',), 38: ('hrv',),
9: ('ita',), 8: ('fra',), 48: ('por', 'BR'), 23: ('nld',), 12: ('ara',), 13: ('ron',),
33: ('bul',), 32: ('por',), 16: ('ell',), 15: ('hun',), 31: ('fin',), 30: ('tur',),
7: ('ces',), 25: ('swe',), 27: ('rus',), 24: ('dan',), 22: ('heb',), 51: ('vie',),
52: ('fas',), 5: ('deu',), 14: ('spa', 'AR'), 54: ('ind',), 47: ('srp', None, 'Cyrl'),
3: ('nor',), 20: ('est',), 10: ('bos',), 17: ('zho',), 37: ('slk',), 35: ('mkd',),
11: ('jpn',), 4: ('kor',), 29: ('sqi',), 6: ('isl',), 19: ('lit',), 46: ('ukr',),
44: ('tha',), 53: ('cat',), 56: ('sin',), 21: ('lav',), 40: ('cmn',), 55: ('msa',),
42: ('hin',), 50: ('bel',)}
self.to_podnapisi = {v: k for k, v in self.from_podnapisi.items()}
self.codes = set(self.from_podnapisi.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3,) in self.to_podnapisi:
return self.to_podnapisi[(alpha3,)]
if (alpha3, country) in self.to_podnapisi:
return self.to_podnapisi[(alpha3, country)]
if (alpha3, country, script) in self.to_podnapisi:
return self.to_podnapisi[(alpha3, country, script)]
raise LanguageConvertError(alpha3, country, script)
def reverse(self, podnapisi):
if podnapisi not in self.from_podnapisi:
raise LanguageReverseError(podnapisi)
return self.from_podnapisi[podnapisi]
| gpl-3.0 |
p990-slimrom/platform_external_chromium | chrome/common/extensions/docs/build/directory.py | 65 | 24652 | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class for parsing metadata about extension samples."""
import locale
import os
import os.path
import re
import hashlib
import zipfile
import simplejson as json
# Make sure we get consistent string sorting behavior by explicitly using the
# default C locale.
locale.setlocale(locale.LC_ALL, 'C')
def sorted_walk(path):
""" A version of os.walk that yields results in order sorted by name.
This is to prevent spurious docs changes due to os.walk returning items in a
filesystem dependent order (by inode creation time, etc).
"""
for base, dirs, files in os.walk(path):
dirs.sort()
files.sort()
yield base, dirs, files
def parse_json_file(path, encoding="utf-8"):
""" Load the specified file and parse it as JSON.
Args:
path: Path to a file containing JSON-encoded data.
encoding: Encoding used in the file. Defaults to utf-8.
Returns:
A Python object representing the data encoded in the file.
Raises:
Exception: If the file could not be read or its contents could not be
parsed as JSON data.
"""
try:
json_file = open(path, 'r')
except IOError, msg:
raise Exception("Failed to read the file at %s: %s" % (path, msg))
try:
json_obj = json.load(json_file, encoding)
except ValueError, msg:
raise Exception("Failed to parse JSON out of file %s: %s" % (path, msg))
finally:
json_file.close()
return json_obj
class ApiManifest(object):
""" Represents the list of API methods contained in extension_api.json """
_MODULE_DOC_KEYS = ['functions', 'events']
""" Keys which may be passed to the _parseModuleDocLinksByKey method."""
def __init__(self, manifest_path):
""" Read the supplied manifest file and parse its contents.
Args:
manifest_path: Path to extension_api.json
"""
self._manifest = parse_json_file(manifest_path)
def _getDocLink(self, method, hashprefix):
"""
Given an API method, return a partial URL corresponding to the doc
file for that method.
Args:
method: A string like 'chrome.foo.bar' or 'chrome.experimental.foo.onBar'
hashprefix: The prefix to put in front of hash links - 'method' for
methods and 'event' for events.
Returns:
A string like 'foo.html#method-bar' or 'experimental.foo.html#event-onBar'
"""
urlpattern = '%%s.html#%s-%%s' % hashprefix
urlparts = tuple(method.replace('chrome.', '').rsplit('.', 1))
return urlpattern % urlparts
def _parseModuleDocLinksByKey(self, module, key):
"""
Given a specific API module, returns a dict of methods or events mapped to
documentation URLs.
Args:
module: The data in extension_api.json corresponding to a single module.
key: A key belonging to _MODULE_DOC_KEYS to determine which set of
methods to parse, and what kind of documentation URL to generate.
Returns:
A dict of extension methods mapped to file and hash URL parts for the
corresponding documentation links, like:
{
"chrome.tabs.remove": "tabs.html#method-remove",
"chrome.tabs.onDetached" : "tabs.html#event-onDetatched"
}
If the API namespace is defined "nodoc" then an empty dict is returned.
Raises:
Exception: If the key supplied is not a member of _MODULE_DOC_KEYS.
"""
methods = []
api_dict = {}
namespace = module['namespace']
if module.has_key('nodoc'):
return api_dict
if key not in self._MODULE_DOC_KEYS:
raise Exception("key %s must be one of %s" % (key, self._MODULE_DOC_KEYS))
if module.has_key(key):
methods.extend(module[key])
for method in methods:
method_name = 'chrome.%s.%s' % (namespace, method['name'])
hashprefix = 'method'
if key == 'events':
hashprefix = 'event'
api_dict[method_name] = self._getDocLink(method_name, hashprefix)
return api_dict
def getModuleNames(self):
""" Returns the names of individual modules in the API.
Returns:
The namespace """
# Exclude modules with a "nodoc" property.
return set(module['namespace'].encode() for module in self._manifest
if "nodoc" not in module)
def getDocumentationLinks(self):
""" Parses the extension_api.json manifest and returns a dict of all
events and methods for every module, mapped to relative documentation links.
Returns:
A dict of methods/events => partial doc links for every module.
"""
api_dict = {}
for module in self._manifest:
api_dict.update(self._parseModuleDocLinksByKey(module, 'functions'))
api_dict.update(self._parseModuleDocLinksByKey(module, 'events'))
return api_dict
class SamplesManifest(object):
""" Represents a manifest file containing information about the sample
extensions available in the codebase. """
def __init__(self, base_sample_path, base_dir, api_manifest):
""" Reads through the filesystem and obtains information about any Chrome
extensions which exist underneath the specified folder.
Args:
base_sample_path: The directory under which to search for samples.
base_dir: The base directory samples will be referenced from.
api_manifest: An instance of the ApiManifest class, which will indicate
which API methods are available.
"""
self._base_dir = base_dir
manifest_paths = self._locateManifestsFromPath(base_sample_path)
self._manifest_data = self._parseManifestData(manifest_paths, api_manifest)
def _locateManifestsFromPath(self, path):
"""
Returns a list of paths to sample extension manifest.json files.
Args:
base_path: Base path in which to start the search.
Returns:
A list of paths below base_path pointing at manifest.json files.
"""
manifest_paths = []
for root, directories, files in sorted_walk(path):
if 'manifest.json' in files:
directories = [] # Don't go any further down this tree
manifest_paths.append(os.path.join(root, 'manifest.json'))
if '.svn' in directories:
directories.remove('.svn') # Don't go into SVN metadata directories
return manifest_paths
def _parseManifestData(self, manifest_paths, api_manifest):
""" Returns metadata about the sample extensions given their manifest
paths.
Args:
manifest_paths: A list of paths to extension manifests
api_manifest: An instance of the ApiManifest class, which will indicate
which API methods are available.
Returns:
Manifest data containing a list of samples and available API methods.
"""
api_method_dict = api_manifest.getDocumentationLinks()
api_methods = api_method_dict.keys()
samples = []
for path in manifest_paths:
sample = Sample(path, api_methods, self._base_dir)
# Don't render apps
if sample.is_app() == False:
samples.append(sample)
def compareSamples(sample1, sample2):
""" Compares two samples as a sort comparator, by name then path. """
value = cmp(sample1['name'].upper(), sample2['name'].upper())
if value == 0:
value = cmp(sample1['path'], sample2['path'])
return value
samples.sort(compareSamples)
manifest_data = {'samples': samples, 'api': api_method_dict}
return manifest_data
def writeToFile(self, path):
""" Writes the contents of this manifest file as a JSON-encoded text file.
Args:
path: The path to write the samples manifest file to.
"""
manifest_text = json.dumps(self._manifest_data, indent=2,
sort_keys=True, separators=(',', ': '))
output_path = os.path.realpath(path)
try:
output_file = open(output_path, 'w')
except IOError, msg:
raise Exception("Failed to write the samples manifest file."
"The specific error was: %s." % msg)
output_file.write(manifest_text)
output_file.close()
def writeZippedSamples(self):
""" For each sample in the current manifest, create a zip file with the
sample contents in the sample's parent directory if not zip exists, or
update the zip file if the sample has been updated.
Returns:
A set of paths representing zip files which have been modified.
"""
modified_paths = []
for sample in self._manifest_data['samples']:
path = sample.write_zip()
if path:
modified_paths.append(path)
return modified_paths
class Sample(dict):
""" Represents metadata about a Chrome extension sample.
Extends dict so that it can be easily JSON serialized.
"""
def __init__(self, manifest_path, api_methods, base_dir):
""" Initializes a Sample instance given a path to a manifest.
Args:
manifest_path: A filesystem path to a manifest file.
api_methods: A list of strings containing all possible Chrome extension
API calls.
base_dir: The base directory where this sample will be referenced from -
paths will be made relative to this directory.
"""
self._base_dir = base_dir
self._manifest_path = manifest_path
self._manifest = parse_json_file(self._manifest_path)
self._locale_data = self._parse_locale_data()
# The following calls set data which will be serialized when converting
# this object to JSON.
source_data = self._parse_source_data(api_methods)
self['api_calls'] = source_data['api_calls']
self['source_files'] = source_data['source_files']
self['source_hash'] = source_data['source_hash']
self['name'] = self._parse_name()
self['description'] = self._parse_description()
self['icon'] = self._parse_icon()
self['features'] = self._parse_features()
self['protocols'] = self._parse_protocols()
self['path'] = self._get_relative_path()
self['search_string'] = self._get_search_string()
self['id'] = hashlib.sha1(self['path']).hexdigest()
self['zip_path'] = self._get_relative_zip_path()
_FEATURE_ATTRIBUTES = (
'browser_action',
'page_action',
'background_page',
'options_page',
'plugins',
'theme',
'chrome_url_overrides'
)
""" Attributes that will map to "features" if their corresponding key is
present in the extension manifest. """
_SOURCE_FILE_EXTENSIONS = ('.html', '.json', '.js', '.css', '.htm')
""" File extensions to files which may contain source code."""
_ENGLISH_LOCALES = ['en_US', 'en', 'en_GB']
""" Locales from which translations may be used in the sample gallery. """
def _get_localized_manifest_value(self, key):
""" Returns a localized version of the requested manifest value.
Args:
key: The manifest key whose value the caller wants translated.
Returns:
If the supplied value exists and contains a ___MSG_token___ value, this
method will resolve the appropriate translation and return the result.
If no token exists, the manifest value will be returned. If the key does
not exist, an empty string will be returned.
Raises:
Exception: If the localized value for the given token could not be found.
"""
if self._manifest.has_key(key):
if self._manifest[key][:6] == '__MSG_':
try:
return self._get_localized_value(self._manifest[key])
except Exception, msg:
raise Exception("Could not translate manifest value for key %s: %s" %
(key, msg))
else:
return self._manifest[key]
else:
return ''
def _get_localized_value(self, message_token):
""" Returns the localized version of the requested MSG bundle token.
Args:
message_token: A message bundle token like __MSG_extensionName__.
Returns:
The translated text corresponding to the token, with any placeholders
automatically resolved and substituted in.
Raises:
Exception: If a message bundle token is not found in the translations.
"""
placeholder_pattern = re.compile('\$(\w*)\$')
token = message_token[6:-2]
if self._locale_data.has_key(token):
message = self._locale_data[token]['message']
placeholder_match = placeholder_pattern.search(message)
if placeholder_match:
# There are placeholders in the translation - substitute them.
placeholder_name = placeholder_match.group(1)
placeholders = self._locale_data[token]['placeholders']
if placeholders.has_key(placeholder_name.lower()):
placeholder_value = placeholders[placeholder_name.lower()]['content']
placeholder_token = '$%s$' % placeholder_name
message = message.replace(placeholder_token, placeholder_value)
return message
else:
raise Exception('Could not find localized string: %s' % message_token)
def _get_relative_path(self):
""" Returns a relative path from the supplied base dir to the manifest dir.
This method is used because we may not be able to rely on os.path.relpath
which was introduced in Python 2.6 and only works on Windows and Unix.
Since the example extensions should always be subdirectories of the
base sample manifest path, we can get a relative path through a simple
string substitution.
Returns:
A relative directory path from the sample manifest's directory to the
directory containing this sample's manifest.json.
"""
real_manifest_path = os.path.realpath(self._manifest_path)
real_base_path = os.path.realpath(self._base_dir)
return real_manifest_path.replace(real_base_path, '')\
.replace('manifest.json', '')[1:]
def _get_relative_zip_path(self):
""" Returns a relative path from the base dir to the sample's zip file.
Intended for locating the zip file for the sample in the samples manifest.
Returns:
A relative directory path form the sample manifest's directory to this
sample's zip file.
"""
zip_filename = self._get_zip_filename()
zip_relpath = os.path.dirname(os.path.dirname(self._get_relative_path()))
return os.path.join(zip_relpath, zip_filename)
def _get_search_string(self):
""" Constructs a string to be used when searching the samples list.
To make the implementation of the JavaScript-based search very direct, a
string is constructed containing the title, description, API calls, and
features that this sample uses, and is converted to uppercase. This makes
JavaScript sample searching very fast and easy to implement.
Returns:
An uppercase string containing information to match on for searching
samples on the client.
"""
search_terms = [
self['name'],
self['description'],
]
search_terms.extend(self['features'])
search_terms.extend(self['api_calls'])
search_string = ' '.join(search_terms).replace('"', '')\
.replace('\'', '')\
.upper()
return search_string
def _get_zip_filename(self):
""" Returns the filename to be used for a generated zip of the sample.
Returns:
A string in the form of "<dirname>.zip" where <dirname> is the name
of the directory containing this sample's manifest.json.
"""
sample_path = os.path.realpath(os.path.dirname(self._manifest_path))
sample_dirname = os.path.basename(sample_path)
return "%s.zip" % sample_dirname
def _parse_description(self):
""" Returns a localized description of the extension.
Returns:
A localized version of the sample's description.
"""
return self._get_localized_manifest_value('description')
def _parse_features(self):
""" Returns a list of features the sample uses.
Returns:
A list of features the extension uses, as determined by
self._FEATURE_ATTRIBUTES.
"""
features = set()
for feature_attr in self._FEATURE_ATTRIBUTES:
if self._manifest.has_key(feature_attr):
features.add(feature_attr)
if self._uses_popup():
features.add('popup')
if self._manifest.has_key('permissions'):
for permission in self._manifest['permissions']:
split = permission.split('://')
if (len(split) == 1):
features.add(split[0])
return sorted(features)
def _parse_icon(self):
""" Returns the path to the 128px icon for this sample.
Returns:
The path to the 128px icon if defined in the manifest, None otherwise.
"""
if (self._manifest.has_key('icons') and
self._manifest['icons'].has_key('128')):
return self._manifest['icons']['128']
else:
return None
def _parse_locale_data(self):
""" Parses this sample's locale data into a dict.
Because the sample gallery is in English, this method only looks for
translations as defined by self._ENGLISH_LOCALES.
Returns:
A dict containing the translation keys and corresponding English text
for this extension.
Raises:
Exception: If the messages file cannot be read, or if it is improperly
formatted JSON.
"""
en_messages = {}
extension_dir_path = os.path.dirname(self._manifest_path)
for locale in self._ENGLISH_LOCALES:
en_messages_path = os.path.join(extension_dir_path, '_locales', locale,
'messages.json')
if (os.path.isfile(en_messages_path)):
break
if (os.path.isfile(en_messages_path)):
try:
en_messages_file = open(en_messages_path, 'r')
except IOError, msg:
raise Exception("Failed to read %s: %s" % (en_messages_path, msg))
en_messages_contents = en_messages_file.read()
en_messages_file.close()
try:
en_messages = json.loads(en_messages_contents)
except ValueError, msg:
raise Exception("File %s has a syntax error: %s" %
(en_messages_path, msg))
return en_messages
def _parse_name(self):
""" Returns a localized name for the extension.
Returns:
A localized version of the sample's name.
"""
return self._get_localized_manifest_value('name')
def _parse_protocols(self):
""" Returns a list of protocols this extension requests permission for.
Returns:
A list of every unique protocol listed in the manifest's permssions.
"""
protocols = []
if self._manifest.has_key('permissions'):
for permission in self._manifest['permissions']:
split = permission.split('://')
if (len(split) == 2) and (split[0] not in protocols):
protocols.append(split[0] + "://")
return protocols
def _parse_source_data(self, api_methods):
""" Iterates over the sample's source files and parses data from them.
Parses any files in the sample directory with known source extensions
(as defined in self._SOURCE_FILE_EXTENSIONS). For each file, this method:
1. Stores a relative path from the manifest.json directory to the file.
2. Searches through the contents of the file for chrome.* API calls.
3. Calculates a SHA1 digest for the contents of the file.
Args:
api_methods: A list of strings containing the potential
API calls the and the extension sample could be making.
Raises:
Exception: If any of the source files cannot be read.
Returns:
A dictionary containing the keys/values:
'api_calls' A sorted list of API calls the sample makes.
'source_files' A sorted list of paths to files the extension uses.
'source_hash' A hash of the individual file hashes.
"""
data = {}
source_paths = []
source_hashes = []
api_calls = set()
base_path = os.path.realpath(os.path.dirname(self._manifest_path))
for root, directories, files in sorted_walk(base_path):
if '.svn' in directories:
directories.remove('.svn') # Don't go into SVN metadata directories
for file_name in files:
ext = os.path.splitext(file_name)[1]
if ext in self._SOURCE_FILE_EXTENSIONS:
# Add the file path to the list of source paths.
fullpath = os.path.realpath(os.path.join(root, file_name))
path = fullpath.replace(base_path, '')[1:]
source_paths.append(path)
# Read the contents and parse out API calls.
try:
code_file = open(fullpath, "r")
except IOError, msg:
raise Exception("Failed to read %s: %s" % (fullpath, msg))
code_contents = unicode(code_file.read(), errors="replace")
code_file.close()
for method in api_methods:
if (code_contents.find(method) > -1):
api_calls.add(method)
# Get a hash of the file contents for zip file generation.
hash = hashlib.sha1(code_contents.encode("ascii", "replace"))
source_hashes.append(hash.hexdigest())
data['api_calls'] = sorted(api_calls)
data['source_files'] = sorted(source_paths)
data['source_hash'] = hashlib.sha1(''.join(source_hashes)).hexdigest()
return data
def _uses_background(self):
""" Returns true if the extension defines a background page. """
return self._manifest.has_key('background_page')
def _uses_browser_action(self):
""" Returns true if the extension defines a browser action. """
return self._manifest.has_key('browser_action')
def _uses_content_scripts(self):
""" Returns true if the extension uses content scripts. """
return self._manifest.has_key('content_scripts')
def _uses_options(self):
""" Returns true if the extension defines an options page. """
return self._manifest.has_key('options_page')
def _uses_page_action(self):
""" Returns true if the extension uses a page action. """
return self._manifest.has_key('page_action')
def _uses_popup(self):
""" Returns true if the extension defines a popup on a page or browser
action. """
has_b_popup = (self._uses_browser_action() and
self._manifest['browser_action'].has_key('popup'))
has_p_popup = (self._uses_page_action() and
self._manifest['page_action'].has_key('popup'))
return has_b_popup or has_p_popup
def is_app(self):
""" Returns true if the extension has an 'app' section in its manifest."""
return self._manifest.has_key('app')
def write_zip(self):
""" Writes a zip file containing all of the files in this Sample's dir."""
sample_path = os.path.realpath(os.path.dirname(self._manifest_path))
sample_dirname = os.path.basename(sample_path)
sample_parentpath = os.path.dirname(sample_path)
zip_filename = self._get_zip_filename()
zip_path = os.path.join(sample_parentpath, zip_filename)
# we pass zip_manifest_path to zipfile.getinfo(), which chokes on
# backslashes, so don't rely on os.path.join, use forward slash on
# all platforms.
zip_manifest_path = sample_dirname + '/manifest.json'
zipfile.ZipFile.debug = 3
if os.path.isfile(zip_path):
try:
old_zip_file = zipfile.ZipFile(zip_path, 'r')
except IOError, msg:
raise Exception("Could not read zip at %s: %s" % (zip_path, msg))
except zipfile.BadZipfile, msg:
raise Exception("File at %s is not a zip file: %s" % (zip_path, msg))
try:
info = old_zip_file.getinfo(zip_manifest_path)
hash = info.comment
if hash == self['source_hash']:
return None # Hashes match - no need to generate file
except KeyError, msg:
pass # The old zip file doesn't contain a hash - overwrite
finally:
old_zip_file.close()
zip_file = zipfile.ZipFile(zip_path, 'w')
try:
for root, dirs, files in sorted_walk(sample_path):
if '.svn' in dirs:
dirs.remove('.svn')
for file in files:
# Absolute path to the file to be added.
abspath = os.path.realpath(os.path.join(root, file))
# Relative path to store the file in under the zip.
relpath = sample_dirname + abspath.replace(sample_path, "")
zip_file.write(abspath, relpath)
if file == 'manifest.json':
info = zip_file.getinfo(zip_manifest_path)
info.comment = self['source_hash']
except RuntimeError, msg:
raise Exception("Could not write zip at %s: %s" % (zip_path, msg))
finally:
zip_file.close()
return self._get_relative_zip_path()
| bsd-3-clause |
myarjunar/QGIS | python/plugins/db_manager/db_plugins/postgis/plugins/versioning/dlg_versioning.py | 9 | 11805 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Versioning plugin for DB Manager
Description : Set up versioning support for a table
Date : Mar 12, 2012
copyright : (C) 2012 by Giuseppe Sucameli
email : brush.tyler@gmail.com
Based on PG_Manager by Martin Dobias <wonder.sk@gmail.com> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QDialog, QDialogButtonBox, QMessageBox, QApplication
from .ui_DlgVersioning import Ui_DlgVersioning
from .....dlg_db_error import DlgDbError
from ....plugin import BaseError, Table
class DlgVersioning(QDialog, Ui_DlgVersioning):
def __init__(self, item, parent=None):
QDialog.__init__(self, parent)
self.item = item
self.setupUi(self)
self.db = self.item.database()
self.schemas = self.db.schemas()
self.hasSchemas = self.schemas is not None
self.buttonBox.accepted.connect(self.onOK)
self.buttonBox.helpRequested.connect(self.showHelp)
self.populateSchemas()
self.populateTables()
if isinstance(item, Table):
index = self.cboTable.findText(self.item.name)
if index >= 0:
self.cboTable.setCurrentIndex(index)
self.cboSchema.currentIndexChanged.connect(self.populateTables)
# updates of SQL window
self.cboSchema.currentIndexChanged.connect(self.updateSql)
self.cboTable.currentIndexChanged.connect(self.updateSql)
self.chkCreateCurrent.stateChanged.connect(self.updateSql)
self.editPkey.textChanged.connect(self.updateSql)
self.editStart.textChanged.connect(self.updateSql)
self.editEnd.textChanged.connect(self.updateSql)
self.updateSql()
def populateSchemas(self):
self.cboSchema.clear()
if not self.hasSchemas:
self.hideSchemas()
return
index = -1
for schema in self.schemas:
self.cboSchema.addItem(schema.name)
if hasattr(self.item, 'schema') and schema.name == self.item.schema().name:
index = self.cboSchema.count() - 1
self.cboSchema.setCurrentIndex(index)
def hideSchemas(self):
self.cboSchema.setEnabled(False)
def populateTables(self):
self.tables = []
schemas = self.db.schemas()
if schemas is not None:
schema_name = self.cboSchema.currentText()
matching_schemas = [x for x in schemas if x.name == schema_name]
tables = matching_schemas[0].tables() if len(matching_schemas) > 0 else []
else:
tables = self.db.tables()
self.cboTable.clear()
for table in tables:
if table.type == table.VectorType: # contains geometry column?
self.tables.append(table)
self.cboTable.addItem(table.name)
def get_escaped_name(self, schema, table, suffix):
name = self.db.connector.quoteId(u"%s%s" % (table, suffix))
schema_name = self.db.connector.quoteId(schema) if schema else None
return u"%s.%s" % (schema_name, name) if schema_name else name
def updateSql(self):
if self.cboTable.currentIndex() < 0 or len(self.tables) < self.cboTable.currentIndex():
return
self.table = self.tables[self.cboTable.currentIndex()]
self.schematable = self.table.quotedName()
self.current = self.chkCreateCurrent.isChecked()
self.colPkey = self.db.connector.quoteId(self.editPkey.text())
self.colStart = self.db.connector.quoteId(self.editStart.text())
self.colEnd = self.db.connector.quoteId(self.editEnd.text())
self.columns = [self.db.connector.quoteId(x.name) for x in self.table.fields()]
self.colOrigPkey = None
for constr in self.table.constraints():
if constr.type == constr.TypePrimaryKey:
self.origPkeyName = self.db.connector.quoteId(constr.name)
self.colOrigPkey = [self.db.connector.quoteId(x_y[1].name) for x_y in iter(list(constr.fields().items()))]
break
if self.colOrigPkey is None:
self.txtSql.setPlainText("Table doesn't have a primary key!")
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
return
elif len(self.colOrigPkey) > 1:
self.txtSql.setPlainText("Table has multicolumn primary key!")
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
return
# take first (and only column of the pkey)
self.colOrigPkey = self.colOrigPkey[0]
# define view, function, rule and trigger names
self.view = self.get_escaped_name(self.table.schemaName(), self.table.name, "_current")
self.func_at_time = self.get_escaped_name(self.table.schemaName(), self.table.name, "_at_time")
self.func_update = self.get_escaped_name(self.table.schemaName(), self.table.name, "_update")
self.func_insert = self.get_escaped_name(self.table.schemaName(), self.table.name, "_insert")
self.rule_del = self.get_escaped_name(None, self.table.name, "_del")
self.trigger_update = self.get_escaped_name(None, self.table.name, "_update")
self.trigger_insert = self.get_escaped_name(None, self.table.name, "_insert")
sql = []
# modify table: add serial column, start time, end time
sql.append(self.sql_alterTable())
# add primary key to the table
sql.append(self.sql_setPkey())
sql.append(self.sql_currentView())
# add X_at_time, X_update, X_delete functions
sql.append(self.sql_functions())
# add insert, update trigger, delete rule
sql.append(self.sql_triggers())
# add _current view + updatable
# if self.current:
sql.append(self.sql_updatesView())
self.txtSql.setPlainText(u'\n\n'.join(sql))
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)
return sql
def showHelp(self):
helpText = u"""In this dialog you can set up versioning support for a table. The table will be modified so that all changes will be recorded: there will be a column with start time and end time. Every row will have its start time, end time is assigned when the feature gets deleted. When a row is modified, the original data is marked with end time and new row is created. With this system, it's possible to get back to state of the table any time in history. When selecting rows from the table, you will always have to specify at what time do you want the rows."""
QMessageBox.information(self, "Help", helpText)
def sql_alterTable(self):
return u"ALTER TABLE %s ADD %s serial, ADD %s timestamp, ADD %s timestamp;" % (
self.schematable, self.colPkey, self.colStart, self.colEnd)
def sql_setPkey(self):
return u"ALTER TABLE %s DROP CONSTRAINT %s, ADD PRIMARY KEY (%s);" % (
self.schematable, self.origPkeyName, self.colPkey)
def sql_currentView(self):
cols = ",".join(self.columns)
return u"CREATE VIEW %(view)s AS SELECT %(cols)s FROM %(schematable)s WHERE %(end)s IS NULL;" % \
{'view': self.view, 'cols': cols, 'schematable': self.schematable, 'end': self.colEnd}
def sql_functions(self):
cols = ",".join(self.columns)
old_cols = ",".join([u"OLD." + x for x in self.columns])
sql = u"""
CREATE OR REPLACE FUNCTION %(func_at_time)s(timestamp)
RETURNS SETOF %(view)s AS
$$
SELECT %(cols)s FROM %(schematable)s WHERE
( SELECT CASE WHEN %(end)s IS NULL THEN (%(start)s <= $1) ELSE (%(start)s <= $1 AND %(end)s > $1) END );
$$
LANGUAGE 'sql';
CREATE OR REPLACE FUNCTION %(func_update)s()
RETURNS TRIGGER AS
$$
BEGIN
IF OLD.%(end)s IS NOT NULL THEN
RETURN NULL;
END IF;
IF NEW.%(end)s IS NULL THEN
INSERT INTO %(schematable)s (%(cols)s, %(start)s, %(end)s) VALUES (%(oldcols)s, OLD.%(start)s, current_timestamp);
NEW.%(start)s = current_timestamp;
END IF;
RETURN NEW;
END;
$$
LANGUAGE 'plpgsql';
CREATE OR REPLACE FUNCTION %(func_insert)s()
RETURNS trigger AS
$$
BEGIN
if NEW.%(start)s IS NULL then
NEW.%(start)s = now();
NEW.%(end)s = null;
end if;
RETURN NEW;
END;
$$
LANGUAGE 'plpgsql';""" % {'view': self.view, 'schematable': self.schematable, 'cols': cols, 'oldcols': old_cols,
'start': self.colStart, 'end': self.colEnd, 'func_at_time': self.func_at_time,
'func_update': self.func_update, 'func_insert': self.func_insert}
return sql
def sql_triggers(self):
return u"""
CREATE RULE %(rule_del)s AS ON DELETE TO %(schematable)s
DO INSTEAD UPDATE %(schematable)s SET %(end)s = current_timestamp WHERE %(pkey)s = OLD.%(pkey)s AND %(end)s IS NULL;
CREATE TRIGGER %(trigger_update)s BEFORE UPDATE ON %(schematable)s
FOR EACH ROW EXECUTE PROCEDURE %(func_update)s();
CREATE TRIGGER %(trigger_insert)s BEFORE INSERT ON %(schematable)s
FOR EACH ROW EXECUTE PROCEDURE %(func_insert)s();""" % \
{'rule_del': self.rule_del, 'trigger_update': self.trigger_update, 'trigger_insert': self.trigger_insert,
'func_update': self.func_update, 'func_insert': self.func_insert, 'schematable': self.schematable,
'pkey': self.colPkey, 'end': self.colEnd}
def sql_updatesView(self):
cols = ",".join(self.columns)
new_cols = ",".join([u"NEW." + x for x in self.columns])
assign_cols = ",".join([u"%s = NEW.%s" % (x, x) for x in self.columns])
return u"""
CREATE OR REPLACE RULE "_DELETE" AS ON DELETE TO %(view)s DO INSTEAD
DELETE FROM %(schematable)s WHERE %(origpkey)s = old.%(origpkey)s;
CREATE OR REPLACE RULE "_INSERT" AS ON INSERT TO %(view)s DO INSTEAD
INSERT INTO %(schematable)s (%(cols)s) VALUES (%(newcols)s);
CREATE OR REPLACE RULE "_UPDATE" AS ON UPDATE TO %(view)s DO INSTEAD
UPDATE %(schematable)s SET %(assign)s WHERE %(origpkey)s = NEW.%(origpkey)s;""" % {'view': self.view,
'schematable': self.schematable,
'cols': cols, 'newcols': new_cols,
'assign': assign_cols,
'origpkey': self.colOrigPkey}
def onOK(self):
# execute and commit the code
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
sql = u"\n".join(self.updateSql())
self.db.connector._execute_and_commit(sql)
except BaseError as e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
QMessageBox.information(self, "good!", "everything went fine!")
self.accept()
| gpl-2.0 |
cancro7/gem5 | src/arch/x86/isa/insts/general_purpose/compare_and_test/test.py | 91 | 2776 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop TEST_M_R
{
ld t1, seg, sib, disp
and t0, t1, reg, flags=(OF, SF, ZF, PF, CF)
};
def macroop TEST_P_R
{
rdip t7
ld t1, seg, riprel, disp
and t0, t1, reg, flags=(OF, SF, ZF, PF, CF)
};
def macroop TEST_R_R
{
and t0, reg, regm, flags=(OF, SF, ZF, PF, CF)
};
def macroop TEST_M_I
{
ld t1, seg, sib, disp
limm t2, imm
and t0, t1, t2, flags=(OF, SF, ZF, PF, CF)
};
def macroop TEST_P_I
{
rdip t7
ld t1, seg, riprel, disp
limm t2, imm
and t0, t1, t2, flags=(OF, SF, ZF, PF, CF)
};
def macroop TEST_R_I
{
limm t1, imm
and t0, reg, t1, flags=(OF, SF, ZF, PF, CF)
};
'''
| bsd-3-clause |
Monithon/Monithon-2.0 | reports/models.py | 1 | 2079 |
from django.contrib.gis.db import models
from monitor.models import MonitoringTeam
from projects.models import Monitorable
from customforms.models import Form as CustomForm, FormFieldOption
from customforms.models import FormField
import json
class Report(models.Model):
author = models.ForeignKey(MonitoringTeam, related_name="reports", null=True, blank=True)
author_text = models.CharField(max_length=1000, null=True, blank=True)
project = models.ForeignKey(Monitorable, related_name="reports",null=True, blank=True)
title = models.TextField(null=True, blank=True)
datetime = models.DateTimeField(auto_now_add=True)
finalized = models.BooleanField(default = False)
validated = models.BooleanField(default = False)
position = models.GeometryField(null=True, blank=True)
class Meta:
ordering = ["datetime"]
def __str__(self):
return self.title
def geoj(self):
return json.dumps(json.loads(self.position.json))
def description(self):
for form in self.forms.all():
for field in form.fields.all():
if "Descr" in field.field.label:
return field.value
return ""
class ReportForm(models.Model):
report = models.ForeignKey(Report, related_name="forms")
form = models.ForeignKey(CustomForm)
class ReportFormField(models.Model):
report_form = models.ForeignKey(ReportForm, related_name ="fields")
field = models.ForeignKey(FormFieldOption)
value = models.TextField()
class ReportImage(models.Model):
report = models.ForeignKey(Report)
image = models.ImageField(upload_to="uploads")
class ReportLink(models.Model):
report = models.ForeignKey(Report, related_name="links")
link = models.URLField(max_length=1000)
def panel_body(self):
try:
if "youtu" in self.link:
the_vid = self.link
if ".be" in self.link:
the_vid = the_vid.split("youtu.be/")[1]
else:
the_vid = the_vid.split("v=")[1]
return """<div class='panel-body'><iframe class="col-lg-12" height="315" src="//www.youtube.com/embed/%s" frameborder="0" allowfullscreen></iframe></div>""" % the_vid
else:
return ""
except:
return "" | gpl-2.0 |
ethanyoung/shadowsocks | shadowsocks/crypto/rc4_md5.py | 1042 | 1339 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
__all__ = ['ciphers']
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLCrypto(b'rc4', rc4_key, b'', op)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
def test():
from shadowsocks.crypto import util
cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1)
decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test()
| apache-2.0 |
Stanford-Legal-Tech-Design/legaltech-rapidpro | temba/orgs/migrations/0001_initial.py | 2 | 12340 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('locations', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CreditAlert',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('threshold', models.IntegerField(help_text='The threshold this alert was sent for')),
('created_by', models.ForeignKey(related_name=b'orgs_creditalert_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name=b'orgs_creditalert_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('email', models.EmailField(help_text='The email to which we send the invitation of the viewer', max_length=75, verbose_name='Email')),
('secret', models.CharField(help_text='a unique code associated with this invitation', unique=True, max_length=64, verbose_name='Secret')),
('host', models.CharField(help_text='The host this invitation was created on', max_length=32)),
('user_group', models.CharField(default='V', max_length=1, verbose_name='User Role', choices=[('A', 'Administrator'), ('E', 'Editor'), ('V', 'Viewer')])),
('created_by', models.ForeignKey(related_name=b'orgs_invitation_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name=b'orgs_invitation_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('name', models.CharField(max_length=128)),
('iso_code', models.CharField(max_length=4)),
('created_by', models.ForeignKey(related_name=b'orgs_language_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name=b'orgs_language_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Org',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('name', models.CharField(max_length=128, verbose_name='Name')),
('plan', models.CharField(default='FREE', help_text='What plan your organization is on', max_length=16, verbose_name='Plan', choices=[('FREE', 'Free Plan'), ('TRIAL', 'Trial'), ('TIER_39', 'Bronze'), ('TIER1', 'Silver'), ('TIER2', 'Gold (Legacy)'), ('TIER3', 'Platinum (Legacy)'), ('TIER_249', 'Gold'), ('TIER_449', 'Platinum')])),
('plan_start', models.DateTimeField(help_text='When the user switched to this plan', verbose_name='Plan Start', auto_now_add=True)),
('stripe_customer', models.CharField(help_text='Our Stripe customer id for your organization', max_length=32, null=True, verbose_name='Stripe Customer', blank=True)),
('language', models.CharField(choices=[(b'en-us', b'English'), (b'pt-br', b'Portuguese'), (b'fr', b'French'), (b'es', b'Spanish')], max_length=64, blank=True, help_text='The main language used by this organization', null=True, verbose_name='Language')),
('timezone', models.CharField(max_length=64, verbose_name='Timezone')),
('date_format', models.CharField(default='D', help_text='Whether day comes first or month comes first in dates', max_length=1, verbose_name='Date Format', choices=[('D', 'DD-MM-YYYY'), ('M', 'MM-DD-YYYY')])),
('webhook', models.CharField(max_length=255, null=True, verbose_name='Webhook', blank=True)),
('webhook_events', models.IntegerField(default=0, help_text='Which type of actions will trigger webhook events.', verbose_name='Webhook Events')),
('msg_last_viewed', models.DateTimeField(auto_now_add=True, verbose_name='Message Last Viewed')),
('flows_last_viewed', models.DateTimeField(auto_now_add=True, verbose_name='Flows Last Viewed')),
('config', models.TextField(help_text='More Organization specific configuration', null=True, verbose_name='Configuration')),
('slug', models.SlugField(null=True, error_messages={b'unique': 'This slug is not available'}, max_length=255, blank=True, unique=True, verbose_name='Slug')),
('is_anon', models.BooleanField(default=False, help_text='Whether this organization anonymizes the phone numbers of contacts within it')),
('administrators', models.ManyToManyField(help_text='The administrators in your organization', related_name='org_admins', verbose_name='Administrators', to=settings.AUTH_USER_MODEL)),
('country', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='locations.AdminBoundary', help_text='The country this organization should map results for.', null=True)),
('created_by', models.ForeignKey(related_name=b'orgs_org_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('editors', models.ManyToManyField(help_text='The editors in your organization', related_name='org_editors', verbose_name='Editors', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(related_name=b'orgs_org_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
('primary_language', models.ForeignKey(related_name='orgs', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='orgs.Language', help_text='The primary language will be used for contacts with no language preference.', null=True)),
('viewers', models.ManyToManyField(help_text='The viewers in your organization', related_name='org_viewers', verbose_name='Viewers', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TopUp',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('price', models.IntegerField(help_text='The price paid for the messages in this top up (in cents)', verbose_name='Price Paid')),
('credits', models.IntegerField(help_text='The number of credits bought in this top up', verbose_name='Number of Credits')),
('expires_on', models.DateTimeField(help_text='The date that this top up will expire', verbose_name='Expiration Date')),
('stripe_charge', models.CharField(help_text='The Stripe charge id for this charge', max_length=32, null=True, verbose_name='Stripe Charge Id', blank=True)),
('comment', models.CharField(help_text='Any comment associated with this topup, used when we credit accounts', max_length=255, null=True, blank=True)),
('created_by', models.ForeignKey(related_name=b'orgs_topup_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name=b'orgs_topup_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
('org', models.ForeignKey(related_name='topups', to='orgs.Org', help_text='The organization that was toppped up')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language', models.CharField(default='en-us', help_text='Your preferred language', max_length=8, choices=[(b'en-us', b'English'), (b'pt-br', b'Portuguese'), (b'fr', b'French'), (b'es', b'Spanish')])),
('tel', models.CharField(help_text='Phone number for testing and recording voice flows', max_length=16, null=True, verbose_name='Phone Number', blank=True)),
('user', models.ForeignKey(related_name='settings', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='language',
name='org',
field=models.ForeignKey(related_name='languages', verbose_name='Org', to='orgs.Org'),
preserve_default=True,
),
migrations.AddField(
model_name='invitation',
name='org',
field=models.ForeignKey(related_name='invitations', verbose_name='Org', to='orgs.Org', help_text='The organization to which the account is invited to view'),
preserve_default=True,
),
migrations.AddField(
model_name='creditalert',
name='org',
field=models.ForeignKey(help_text='The organization this alert was triggered for', to='orgs.Org'),
preserve_default=True,
),
]
| agpl-3.0 |
HashUnlimited/Einsteinium-Unlimited | qa/rpc-tests/bip68-112-113-p2p.py | 41 | 27358 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
'''
This test is meant to exercise activation of the first version bits soft fork
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
'''
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in range(2):
b25times = []
for b25 in range(2):
b22times = []
for b22 in range(2):
b18times = []
for b18 in range(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in range(2):
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash, 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in range(2):
for b18 in range(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in range(2):
for b18 in range(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in range(2):
for b18 in range(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
| mit |
mikhaelharswanto/lge-kernel-gproj | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
40223119/w16b_test | static/Brython3.1.1-20150328-091302/Lib/_string.py | 625 | 1112 | """string helper module"""
import re
class __loader__(object):
pass
def formatter_field_name_split(fieldname):
"""split the argument as a field name"""
_list=[]
for _name in fieldname:
_parts = _name.split('.')
for _item in _parts:
is_attr=False #fix me
if re.match('\d+', _item):
_list.append((int(_item), is_attr))
else:
_list.append((_item, is_attr))
return _list[0][0], iter(_list[1:])
def formatter_parser(*args,**kw):
"""parse the argument as a format string"""
assert len(args)==1
assert isinstance(args[0], str)
_result=[]
for _match in re.finditer("([^{]*)?(\{[^}]*\})?", args[0]):
_pre, _fmt = _match.groups()
if _fmt is None:
_result.append((_pre, None, None, None))
elif _fmt == '{}':
_result.append((_pre, '', '', None))
else:
_m=re.match("\{([^!]*)!?(.*)?\}", _fmt)
_name=_m.groups(0)
_flags=_m.groups(1)
_result.append((_pre, _name, _flags, None))
return _result
| gpl-3.0 |
RevelSystems/django | tests/gis_tests/layermap/tests.py | 30 | 14579 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from copy import copy
from decimal import Decimal
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.gdal import HAS_GDAL
from django.db import connection
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.utils._os import upath
if HAS_GDAL:
from django.contrib.gis.utils.layermapping import (LayerMapping,
LayerMapError, InvalidDecimal, InvalidString, MissingForeignKey)
from django.contrib.gis.gdal import DataSource
from .models import (
City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State,
city_mapping, co_mapping, cofeat_mapping, inter_mapping)
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
@skipUnless(HAS_GDAL, "LayerMapTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class LayerMapTest(TestCase):
def test_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
with self.assertRaises(LayerMapError):
LayerMapping(City, city_shp, bad_map)
# A LookupError should be thrown for bogus encodings.
with self.assertRaises(LookupError):
LayerMapping(City, city_shp, city_mapping, encoding='foobar')
def test_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 5)
self.assertAlmostEqual(pnt1.y, pnt2.y, 5)
def test_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
with self.assertRaises(InvalidDecimal):
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
Interstate.objects.all().delete()
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
try:
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
except Exception:
self.fail('No exception should be raised for proper use of keywords.')
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
self.assertRaises(e, LayerMapping, County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if connection.features.supports_transform:
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping)
bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping)
bad_fk_map2['state'] = {'nombre': 'State'}
self.assertRaises(TypeError, LayerMapping, County, co_shp, bad_fk_map1, transform=False)
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties():
County.objects.all().delete()
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
self.assertRaises(TypeError, lm.save, fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
self.assertRaises(LayerMapError, lm.save, fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name)
self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name)
self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4, 7, 1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {'name': 'Name',
'population': 'Population',
'density': 'Density',
'point': 'POINT',
'dt': 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def test_invalid_layer(self):
"Tests LayerMapping on invalid geometries. See #15378."
invalid_mapping = {'point': 'POINT'}
lm = LayerMapping(Invalid, invalid_shp, invalid_mapping,
source_srs=4326)
lm.save(silent=True)
def test_charfield_too_short(self):
mapping = copy(city_mapping)
mapping['name_short'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
with self.assertRaises(InvalidString):
lm.save(silent=True, strict=True)
def test_textfield(self):
"Tests that String content fits also in a TextField"
mapping = copy(city_mapping)
mapping['name_txt'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 3)
self.assertEqual(City.objects.get(name='Houston').name_txt, "Houston")
def test_encoded_name(self):
""" Test a layer containing utf-8-encoded name """
city_shp = os.path.join(shp_path, 'ch-city', 'ch-city.shp')
lm = LayerMapping(City, city_shp, city_mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 1)
self.assertEqual(City.objects.all()[0].name, "Zürich")
class OtherRouter(object):
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return self.db_for_read(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, **hints):
return True
@skipUnless(HAS_GDAL, "LayerMapRouterTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
@override_settings(DATABASE_ROUTERS=[OtherRouter()])
class LayerMapRouterTest(TestCase):
@unittest.skipUnless(len(settings.DATABASES) > 1, 'multiple databases required')
def test_layermapping_default_db(self):
lm = LayerMapping(City, city_shp, city_mapping)
self.assertEqual(lm.using, 'other')
| bsd-3-clause |
sumedhasingla/VTK | ThirdParty/Twisted/twisted/conch/test/test_recvline.py | 42 | 23851 | # -*- test-case-name: twisted.conch.test.test_recvline -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.recvline} and fixtures for testing related
functionality.
"""
import sys, os
from twisted.conch.insults import insults
from twisted.conch import recvline
from twisted.python import reflect, components
from twisted.internet import defer, error
from twisted.trial import unittest
from twisted.cred import portal
from twisted.test.proto_helpers import StringTransport
class Arrows(unittest.TestCase):
def setUp(self):
self.underlyingTransport = StringTransport()
self.pt = insults.ServerProtocol()
self.p = recvline.HistoricRecvLine()
self.pt.protocolFactory = lambda: self.p
self.pt.factory = self
self.pt.makeConnection(self.underlyingTransport)
# self.p.makeConnection(self.pt)
def test_printableCharacters(self):
"""
When L{HistoricRecvLine} receives a printable character,
it adds it to the current line buffer.
"""
self.p.keystrokeReceived('x', None)
self.p.keystrokeReceived('y', None)
self.p.keystrokeReceived('z', None)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
def test_horizontalArrows(self):
"""
When L{HistoricRecvLine} receives an LEFT_ARROW or
RIGHT_ARROW keystroke it moves the cursor left or right
in the current line buffer, respectively.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.LEFT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
def test_newline(self):
"""
When {HistoricRecvLine} receives a newline, it adds the current
line buffer to the end of its history buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('c')
kR('b')
kR('a')
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('\n')
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123', 'cba'), ()))
def test_verticalArrows(self):
"""
When L{HistoricRecvLine} receives UP_ARROW or DOWN_ARROW
keystrokes it move the current index in the current history
buffer up or down, and resets the current line buffer to the
previous or next line in history, respectively for each.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
self.assertEqual(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc'), ('123',)))
self.assertEqual(self.p.currentLineBuffer(), ('123', ''))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz',), ('abc', '123')))
self.assertEqual(self.p.currentLineBuffer(), ('abc', ''))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.UP_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
for i in range(4):
kR(self.pt.DOWN_ARROW)
self.assertEqual(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
def test_home(self):
"""
When L{HistoricRecvLine} receives a HOME keystroke it moves the
cursor to the beginning of the current line buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
self.assertEqual(self.p.currentLineBuffer(), ('', 'hello, world'))
def test_end(self):
"""
When L{HistoricRecvLine} receives a END keystroke it moves the cursor
to the end of the current line buffer.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
kR(self.pt.END)
self.assertEqual(self.p.currentLineBuffer(), ('hello, world', ''))
def test_backspace(self):
"""
When L{HistoricRecvLine} receives a BACKSPACE keystroke it deletes
the character immediately before the cursor.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), ('', 'y'))
kR(self.pt.BACKSPACE)
self.assertEqual(self.p.currentLineBuffer(), ('', 'y'))
def test_delete(self):
"""
When L{HistoricRecvLine} receives a DELETE keystroke, it
delets the character immediately after the cursor.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('x', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.DELETE)
self.assertEqual(self.p.currentLineBuffer(), ('', ''))
def test_insert(self):
"""
When not in INSERT mode, L{HistoricRecvLine} inserts the typed
character at the cursor before the next character.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEqual(self.p.currentLineBuffer(), ('xyA', 'z'))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEqual(self.p.currentLineBuffer(), ('xyB', 'Az'))
def test_typeover(self):
"""
When in INSERT mode and upon receiving a keystroke with a printable
character, L{HistoricRecvLine} replaces the character at
the cursor with the typed character rather than inserting before.
Ah, the ironies of INSERT mode.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEqual(self.p.currentLineBuffer(), ('xyA', ''))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEqual(self.p.currentLineBuffer(), ('xyB', ''))
def test_unprintableCharacters(self):
"""
When L{HistoricRecvLine} receives a keystroke for an unprintable
function key with no assigned behavior, the line buffer is unmodified.
"""
kR = lambda ch: self.p.keystrokeReceived(ch, None)
pt = self.pt
for ch in (pt.F1, pt.F2, pt.F3, pt.F4, pt.F5, pt.F6, pt.F7, pt.F8,
pt.F9, pt.F10, pt.F11, pt.F12, pt.PGUP, pt.PGDN):
kR(ch)
self.assertEqual(self.p.currentLineBuffer(), ('', ''))
from twisted.conch import telnet
from twisted.conch.insults import helper
from twisted.protocols import loopback
class EchoServer(recvline.HistoricRecvLine):
def lineReceived(self, line):
self.terminal.write(line + '\n' + self.ps[self.pn])
# An insults API for this would be nice.
left = "\x1b[D"
right = "\x1b[C"
up = "\x1b[A"
down = "\x1b[B"
insert = "\x1b[2~"
home = "\x1b[1~"
delete = "\x1b[3~"
end = "\x1b[4~"
backspace = "\x7f"
from twisted.cred import checkers
try:
from twisted.conch.ssh import userauth, transport, channel, connection, session
from twisted.conch.manhole_ssh import TerminalUser, TerminalSession, TerminalRealm, TerminalSessionTransport, ConchFactory
except ImportError:
ssh = False
else:
ssh = True
class SessionChannel(channel.SSHChannel):
name = 'session'
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
channel.SSHChannel.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def channelOpen(self, data):
term = session.packRequest_pty_req("vt102", (self.height, self.width, 0, 0), '')
self.conn.sendRequest(self, 'pty-req', term)
self.conn.sendRequest(self, 'shell', '')
self._protocolInstance = self.protocolFactory(*self.protocolArgs, **self.protocolKwArgs)
self._protocolInstance.factory = self
self._protocolInstance.makeConnection(self)
def closed(self):
self._protocolInstance.connectionLost(error.ConnectionDone())
def dataReceived(self, data):
self._protocolInstance.dataReceived(data)
class TestConnection(connection.SSHConnection):
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
connection.SSHConnection.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def serviceStarted(self):
self.__channel = SessionChannel(self.protocolFactory, self.protocolArgs, self.protocolKwArgs, self.width, self.height)
self.openChannel(self.__channel)
def write(self, bytes):
return self.__channel.write(bytes)
class TestAuth(userauth.SSHUserAuthClient):
def __init__(self, username, password, *a, **kw):
userauth.SSHUserAuthClient.__init__(self, username, *a, **kw)
self.password = password
def getPassword(self):
return defer.succeed(self.password)
class TestTransport(transport.SSHClientTransport):
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, username, password, width, height, *a, **kw):
# transport.SSHClientTransport.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.username = username
self.password = password
self.width = width
self.height = height
def verifyHostKey(self, hostKey, fingerprint):
return defer.succeed(True)
def connectionSecure(self):
self.__connection = TestConnection(self.protocolFactory, self.protocolArgs, self.protocolKwArgs, self.width, self.height)
self.requestService(
TestAuth(self.username, self.password, self.__connection))
def write(self, bytes):
return self.__connection.write(bytes)
class TestSessionTransport(TerminalSessionTransport):
def protocolFactory(self):
return self.avatar.conn.transport.factory.serverProtocol()
class TestSession(TerminalSession):
transportFactory = TestSessionTransport
class TestUser(TerminalUser):
pass
components.registerAdapter(TestSession, TestUser, session.ISession)
class LoopbackRelay(loopback.LoopbackRelay):
clearCall = None
def logPrefix(self):
return "LoopbackRelay(%r)" % (self.target.__class__.__name__,)
def write(self, bytes):
loopback.LoopbackRelay.write(self, bytes)
if self.clearCall is not None:
self.clearCall.cancel()
from twisted.internet import reactor
self.clearCall = reactor.callLater(0, self._clearBuffer)
def _clearBuffer(self):
self.clearCall = None
loopback.LoopbackRelay.clearBuffer(self)
class NotifyingExpectableBuffer(helper.ExpectableBuffer):
def __init__(self):
self.onConnection = defer.Deferred()
self.onDisconnection = defer.Deferred()
def connectionMade(self):
helper.ExpectableBuffer.connectionMade(self)
self.onConnection.callback(self)
def connectionLost(self, reason):
self.onDisconnection.errback(reason)
class _BaseMixin:
WIDTH = 80
HEIGHT = 24
def _assertBuffer(self, lines):
receivedLines = str(self.recvlineClient).splitlines()
expectedLines = lines + ([''] * (self.HEIGHT - len(lines) - 1))
self.assertEqual(len(receivedLines), len(expectedLines))
for i in range(len(receivedLines)):
self.assertEqual(
receivedLines[i], expectedLines[i],
str(receivedLines[max(0, i-1):i+1]) +
" != " +
str(expectedLines[max(0, i-1):i+1]))
def _trivialTest(self, input, output):
done = self.recvlineClient.expect("done")
self._testwrite(input)
def finished(ign):
self._assertBuffer(output)
return done.addCallback(finished)
class _SSHMixin(_BaseMixin):
def setUp(self):
if not ssh:
raise unittest.SkipTest("Crypto requirements missing, can't run historic recvline tests over ssh")
u, p = 'testuser', 'testpass'
rlm = TerminalRealm()
rlm.userFactory = TestUser
rlm.chainedProtocolFactory = lambda: insultsServer
ptl = portal.Portal(
rlm,
[checkers.InMemoryUsernamePasswordDatabaseDontUse(**{u: p})])
sshFactory = ConchFactory(ptl)
sshFactory.serverProtocol = self.serverProtocol
sshFactory.startFactory()
recvlineServer = self.serverProtocol()
insultsServer = insults.ServerProtocol(lambda: recvlineServer)
sshServer = sshFactory.buildProtocol(None)
clientTransport = LoopbackRelay(sshServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = insults.ClientProtocol(lambda: recvlineClient)
sshClient = TestTransport(lambda: insultsClient, (), {}, u, p, self.WIDTH, self.HEIGHT)
serverTransport = LoopbackRelay(sshClient)
sshClient.makeConnection(clientTransport)
sshServer.makeConnection(serverTransport)
self.recvlineClient = recvlineClient
self.sshClient = sshClient
self.sshServer = sshServer
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, bytes):
self.sshClient.write(bytes)
from twisted.conch.test import test_telnet
class TestInsultsClientProtocol(insults.ClientProtocol,
test_telnet.TestProtocol):
pass
class TestInsultsServerProtocol(insults.ServerProtocol,
test_telnet.TestProtocol):
pass
class _TelnetMixin(_BaseMixin):
def setUp(self):
recvlineServer = self.serverProtocol()
insultsServer = TestInsultsServerProtocol(lambda: recvlineServer)
telnetServer = telnet.TelnetTransport(lambda: insultsServer)
clientTransport = LoopbackRelay(telnetServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = TestInsultsClientProtocol(lambda: recvlineClient)
telnetClient = telnet.TelnetTransport(lambda: insultsClient)
serverTransport = LoopbackRelay(telnetClient)
telnetClient.makeConnection(clientTransport)
telnetServer.makeConnection(serverTransport)
serverTransport.clearBuffer()
clientTransport.clearBuffer()
self.recvlineClient = recvlineClient
self.telnetClient = telnetClient
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, bytes):
self.telnetClient.write(bytes)
try:
from twisted.conch import stdio
except ImportError:
stdio = None
class _StdioMixin(_BaseMixin):
def setUp(self):
# A memory-only terminal emulator, into which the server will
# write things and make other state changes. What ends up
# here is basically what a user would have seen on their
# screen.
testTerminal = NotifyingExpectableBuffer()
# An insults client protocol which will translate bytes
# received from the child process into keystroke commands for
# an ITerminalProtocol.
insultsClient = insults.ClientProtocol(lambda: testTerminal)
# A process protocol which will translate stdout and stderr
# received from the child process to dataReceived calls and
# error reporting on an insults client protocol.
processClient = stdio.TerminalProcessProtocol(insultsClient)
# Run twisted/conch/stdio.py with the name of a class
# implementing ITerminalProtocol. This class will be used to
# handle bytes we send to the child process.
exe = sys.executable
module = stdio.__file__
if module.endswith('.pyc') or module.endswith('.pyo'):
module = module[:-1]
args = [exe, module, reflect.qual(self.serverProtocol)]
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(sys.path)
from twisted.internet import reactor
clientTransport = reactor.spawnProcess(processClient, exe, args,
env=env, usePTY=True)
self.recvlineClient = self.testTerminal = testTerminal
self.processClient = processClient
self.clientTransport = clientTransport
# Wait for the process protocol and test terminal to become
# connected before proceeding. The former should always
# happen first, but it doesn't hurt to be safe.
return defer.gatherResults(filter(None, [
processClient.onConnection,
testTerminal.expect(">>> ")]))
def tearDown(self):
# Kill the child process. We're done with it.
try:
self.clientTransport.signalProcess("KILL")
except (error.ProcessExitedAlready, OSError):
pass
def trap(failure):
failure.trap(error.ProcessTerminated)
self.assertEqual(failure.value.exitCode, None)
self.assertEqual(failure.value.status, 9)
return self.testTerminal.onDisconnection.addErrback(trap)
def _testwrite(self, bytes):
self.clientTransport.write(bytes)
class RecvlineLoopbackMixin:
serverProtocol = EchoServer
def testSimple(self):
return self._trivialTest(
"first line\ndone",
[">>> first line",
"first line",
">>> done"])
def testLeftArrow(self):
return self._trivialTest(
insert + 'first line' + left * 4 + "xxxx\ndone",
[">>> first xxxx",
"first xxxx",
">>> done"])
def testRightArrow(self):
return self._trivialTest(
insert + 'right line' + left * 4 + right * 2 + "xx\ndone",
[">>> right lixx",
"right lixx",
">>> done"])
def testBackspace(self):
return self._trivialTest(
"second line" + backspace * 4 + "xxxx\ndone",
[">>> second xxxx",
"second xxxx",
">>> done"])
def testDelete(self):
return self._trivialTest(
"delete xxxx" + left * 4 + delete * 4 + "line\ndone",
[">>> delete line",
"delete line",
">>> done"])
def testInsert(self):
return self._trivialTest(
"third ine" + left * 3 + "l\ndone",
[">>> third line",
"third line",
">>> done"])
def testTypeover(self):
return self._trivialTest(
"fourth xine" + left * 4 + insert + "l\ndone",
[">>> fourth line",
"fourth line",
">>> done"])
def testHome(self):
return self._trivialTest(
insert + "blah line" + home + "home\ndone",
[">>> home line",
"home line",
">>> done"])
def testEnd(self):
return self._trivialTest(
"end " + left * 4 + end + "line\ndone",
[">>> end line",
"end line",
">>> done"])
class RecvlineLoopbackTelnet(_TelnetMixin, unittest.TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackSSH(_SSHMixin, unittest.TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackStdio(_StdioMixin, unittest.TestCase, RecvlineLoopbackMixin):
if stdio is None:
skip = "Terminal requirements missing, can't run recvline tests over stdio"
class HistoricRecvlineLoopbackMixin:
serverProtocol = EchoServer
def testUpArrow(self):
return self._trivialTest(
"first line\n" + up + "\ndone",
[">>> first line",
"first line",
">>> first line",
"first line",
">>> done"])
def testDownArrow(self):
return self._trivialTest(
"first line\nsecond line\n" + up * 2 + down + "\ndone",
[">>> first line",
"first line",
">>> second line",
"second line",
">>> second line",
"second line",
">>> done"])
class HistoricRecvlineLoopbackTelnet(_TelnetMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackSSH(_SSHMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackStdio(_StdioMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
if stdio is None:
skip = "Terminal requirements missing, can't run historic recvline tests over stdio"
| bsd-3-clause |
jlubcke/csvpp | csvpp/csvpp.py | 1 | 1221 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import csv
from io import StringIO
from itertools import count
import sys
try:
# Python 2
from itertools import izip_longest
except ImportError:
# Python 3
from itertools import zip_longest as izip_longest
def csvpp(csv_input):
max_widths = []
max_indent = 0
for line in csv.reader(StringIO(csv_input)):
widths = [len(s.strip()) for s in line]
max_widths = list(map(max, izip_longest(max_widths, widths, fillvalue=0)))
indent = len(line[0]) - len(line[0].lstrip())
max_indent = max(max_indent, indent)
result = StringIO()
for line in csv.reader(StringIO(csv_input)):
result.write(u' ' * max_indent)
last_column = len(line) - 1
for value, max_width, column in zip(line, max_widths, count()):
value = value.strip()
result.write(u"" + value)
if column != last_column:
result.write(u", ")
result.write(u" " * (max_width - len(value)))
result.write(u'\n')
return result.getvalue()
def main():
csv_input = sys.stdin.read().decode('utf-8')
sys.stdout.write(csvpp(csv_input).encode('utf-8'))
| mit |
anshulkgupta/viznow | Mayank/blpapi_python3.5.5/blpapi/__init__.py | 2 | 2029 | # __init__.py
from __future__ import absolute_import
from .internals import CorrelationId
from .abstractsession import AbstractSession
from .constant import Constant, ConstantList
from .datetime import FixedOffset
from .datatype import DataType
from .element import Element
from .event import Event, EventQueue
from .eventdispatcher import EventDispatcher
from .eventformatter import EventFormatter
from .exception import *
from .identity import Identity
from .message import Message
from .name import Name
from .providersession import ProviderSession, ServiceRegistrationOptions
from .request import Request
from .resolutionlist import ResolutionList
from .schema import SchemaElementDefinition, SchemaStatus, SchemaTypeDefinition
from .service import Service
from .session import Session
from .sessionoptions import SessionOptions
from .subscriptionlist import SubscriptionList
from .topic import Topic
from .topiclist import TopicList
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| mit |
steedos/odoo7 | openerp/addons/project/__openerp__.py | 46 | 3184 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Management',
'version': '1.1',
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'category': 'Project Management',
'sequence': 8,
'summary': 'Projects, Tasks',
'images': [
'images/gantt.png',
'images/project_dashboard.jpeg',
'images/project_task_tree.jpeg',
'images/project_task.jpeg',
'images/project.jpeg',
'images/task_analysis.jpeg',
'images/project_kanban.jpeg',
'images/task_kanban.jpeg',
'images/task_stages.jpeg'
],
'depends': [
'base_setup',
'base_status',
'product',
'analytic',
'board',
'mail',
'resource',
'web_kanban'
],
'description': """
Track multi-level projects, tasks, work done on tasks
=====================================================
This application allows an operational project management system to organize your activities into tasks and plan the work you need to get the tasks completed.
Gantt diagrams will give you a graphical representation of your project plans, as well as resources availability and workload.
Dashboard / Reports for Project Management will include:
--------------------------------------------------------
* My Tasks
* Open Tasks
* Tasks Analysis
* Cumulative Flow
""",
'data': [
'security/project_security.xml',
'wizard/project_task_delegate_view.xml',
'wizard/project_task_reevaluate_view.xml',
'security/ir.model.access.csv',
'project_data.xml',
'project_view.xml',
'process/task_process.xml',
'res_partner_view.xml',
'report/project_report_view.xml',
'report/project_cumulative.xml',
'board_project_view.xml',
'res_config_view.xml',
],
'demo': ['project_demo.xml'],
'test': [
'test/project_demo.yml',
'test/project_process.yml',
'test/task_process.yml',
],
'installable': True,
'auto_install': False,
'application': True,
'css': ['static/src/css/project.css'],
'js': ['static/src/js/project.js'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
liyi193328/seq2seq | seq2seq/contrib/learn/session_run_hook.py | 144 | 1204 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file is deprecated. Use tensorflow.python.training.session_run_hook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import session_run_hook
# pylint: disable=invalid-name
SessionRunHook = session_run_hook.SessionRunHook
SessionRunArgs = session_run_hook.SessionRunArgs
SessionRunContext = session_run_hook.SessionRunContext
SessionRunValues = session_run_hook.SessionRunValues
# pylint: enable=invalid-name
| apache-2.0 |
jensengrouppsu/rapid | rapid/pyqtgraph/graphicsItems/GraphicsLayout.py | 6 | 6134 | from ..Qt import QtGui, QtCore
from .. import functions as fn
from .GraphicsWidget import GraphicsWidget
## Must be imported at the end to avoid cyclic-dependency hell:
from .ViewBox import ViewBox
from .PlotItem import PlotItem
from .LabelItem import LabelItem
__all__ = ['GraphicsLayout']
class GraphicsLayout(GraphicsWidget):
"""
Used for laying out GraphicsWidgets in a grid.
This is usually created automatically as part of a :class:`GraphicsWindow <pyqtgraph.GraphicsWindow>` or :class:`GraphicsLayoutWidget <pyqtgraph.GraphicsLayoutWidget>`.
"""
def __init__(self, parent=None, border=None):
GraphicsWidget.__init__(self, parent)
if border is True:
border = (100,100,100)
self.border = border
self.layout = QtGui.QGraphicsGridLayout()
self.setLayout(self.layout)
self.items = {} ## item: [(row, col), (row, col), ...] lists all cells occupied by the item
self.rows = {} ## row: {col1: item1, col2: item2, ...} maps cell location to item
self.currentRow = 0
self.currentCol = 0
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding))
#def resizeEvent(self, ev):
#ret = GraphicsWidget.resizeEvent(self, ev)
#print self.pos(), self.mapToDevice(self.rect().topLeft())
#return ret
def setBorder(self, *args, **kwds):
"""
Set the pen used to draw border between cells.
See :func:`mkPen <pyqtgraph.mkPen>` for arguments.
"""
self.border = fn.mkPen(*args, **kwds)
self.update()
def nextRow(self):
"""Advance to next row for automatic item placement"""
self.currentRow += 1
self.currentCol = -1
self.nextColumn()
def nextColumn(self):
"""Advance to next available column
(generally only for internal use--called by addItem)"""
self.currentCol += 1
while self.getItem(self.currentRow, self.currentCol) is not None:
self.currentCol += 1
def nextCol(self, *args, **kargs):
"""Alias of nextColumn"""
return self.nextColumn(*args, **kargs)
def addPlot(self, row=None, col=None, rowspan=1, colspan=1, **kargs):
"""
Create a PlotItem and place it in the next available cell (or in the cell specified)
All extra keyword arguments are passed to :func:`PlotItem.__init__ <pyqtgraph.PlotItem.__init__>`
Returns the created item.
"""
plot = PlotItem(**kargs)
self.addItem(plot, row, col, rowspan, colspan)
return plot
def addViewBox(self, row=None, col=None, rowspan=1, colspan=1, **kargs):
"""
Create a ViewBox and place it in the next available cell (or in the cell specified)
All extra keyword arguments are passed to :func:`ViewBox.__init__ <pyqtgraph.ViewBox.__init__>`
Returns the created item.
"""
vb = ViewBox(**kargs)
self.addItem(vb, row, col, rowspan, colspan)
return vb
def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs):
"""
Create a LabelItem with *text* and place it in the next available cell (or in the cell specified)
All extra keyword arguments are passed to :func:`LabelItem.__init__ <pyqtgraph.LabelItem.__init__>`
Returns the created item.
To create a vertical label, use *angle* = -90.
"""
text = LabelItem(text, **kargs)
self.addItem(text, row, col, rowspan, colspan)
return text
def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs):
"""
Create an empty GraphicsLayout and place it in the next available cell (or in the cell specified)
All extra keyword arguments are passed to :func:`GraphicsLayout.__init__ <pyqtgraph.GraphicsLayout.__init__>`
Returns the created item.
"""
layout = GraphicsLayout(**kargs)
self.addItem(layout, row, col, rowspan, colspan)
return layout
def addItem(self, item, row=None, col=None, rowspan=1, colspan=1):
"""
Add an item to the layout and place it in the next available cell (or in the cell specified).
The item must be an instance of a QGraphicsWidget subclass.
"""
if row is None:
row = self.currentRow
if col is None:
col = self.currentCol
self.items[item] = []
for i in range(rowspan):
for j in range(colspan):
row2 = row + i
col2 = col + j
if row2 not in self.rows:
self.rows[row2] = {}
self.rows[row2][col2] = item
self.items[item].append((row2, col2))
self.layout.addItem(item, row, col, rowspan, colspan)
self.nextColumn()
def getItem(self, row, col):
"""Return the item in (*row*, *col*). If the cell is empty, return None."""
return self.rows.get(row, {}).get(col, None)
def boundingRect(self):
return self.rect()
def paint(self, p, *args):
if self.border is None:
return
p.setPen(fn.mkPen(self.border))
for i in self.items:
r = i.mapRectToParent(i.boundingRect())
p.drawRect(r)
def itemIndex(self, item):
for i in range(self.layout.count()):
if self.layout.itemAt(i).graphicsItem() is item:
return i
raise Exception("Could not determine index of item " + str(item))
def removeItem(self, item):
"""Remove *item* from the layout."""
ind = self.itemIndex(item)
self.layout.removeAt(ind)
self.scene().removeItem(item)
for r,c in self.items[item]:
del self.rows[r][c]
del self.items[item]
self.update()
def clear(self):
items = []
for i in list(self.items.keys()):
self.removeItem(i)
| mit |
amenonsen/ansible | lib/ansible/modules/storage/purestorage/purefa_smtp.py | 2 | 4503 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_smtp
version_added: '2.9'
author:
- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
short_description: Configure FlashArray SMTP settings
description:
- Set or erase configuration for the SMTP settings.
- If username/password are set this will always force a change as there is
no way to see if the password is differnet from the current SMTP configuration.
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
state:
description:
- Set or delete SMTP configuration
default: present
type: str
choices: [ absent, present ]
password:
description:
- The SMTP password.
type: str
user:
description:
- The SMTP username.
type: str
relay_host:
description:
- IPv4 or IPv6 address or FQDN. A port number may be appended.
type: str
sender_domain:
description:
- Domain name.
type: str
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Delete exisitng SMTP settings
purefa_smtp:
state: absent
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Set SMTP settings
purefa_smtp:
sender_domain: purestorage.com
password: account_password
user: smtp_account
relay_host: 10.2.56.78:2345
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def delete_smtp(module, array):
"""Delete SMTP settings"""
changed = True
if not module.check_mode:
try:
array.set_smtp(sender_domain='', username='', password='', relay_host='')
except Exception:
module.fail_json(msg='Delete SMTP settigs failed')
module.exit_json(changed=changed)
def create_smtp(module, array):
"""Set SMTP settings"""
changed = True
current_smtp = array.get_smtp()
if not module.check_mode:
if module.params['sender_domain'] and current_smtp['sender_domain'] != module.params['sender_domain']:
try:
array.set_smtp(sender_domain=module.params['sender_domain'])
changed_sender = True
except Exception:
module.fail_json(msg='Set SMTP sender domain failed.')
else:
changed_sender = False
if module.params['relay_host'] and current_smtp['relay_host'] != module.params['relay_host']:
try:
array.set_smtp(relay_host=module.params['relay_host'])
changed_relay = True
except Exception:
module.fail_json(msg='Set SMTP relay host failed.')
else:
changed_relay = False
if module.params['user']:
try:
array.set_smtp(user_name=module.params['user'], password=module.params['password'])
changed_creds = True
except Exception:
module.fail_json(msg='Set SMTP username/password failed.')
else:
changed_creds = False
changed = bool(changed_sender or changed_relay or changed_creds)
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
sender_domain=dict(type='str'),
password=dict(type='str', no_log=True),
user=dict(type='str'),
relay_host=dict(type='str'),
))
required_together = [['user', 'password']]
module = AnsibleModule(argument_spec,
required_together=required_together,
supports_check_mode=True)
state = module.params['state']
array = get_system(module)
if state == 'absent':
delete_smtp(module, array)
elif state == 'present':
create_smtp(module, array)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
solintegra/addons | account_treasury_forecast/models/account_treasury_forecast_template.py | 31 | 3739 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import openerp.addons.decimal_precision as dp
from openerp import models, fields, api, _
class AccountTreasuryForecastTemplate(models.Model):
_name = 'account.treasury.forecast.template'
_description = 'Treasury Forecast Template'
name = fields.Char(string="Description", required=True)
recurring_line_ids = fields.One2many(
"account.treasury.forecast.line.template", "treasury_template_id",
string="Recurring Line", domain=[('line_type', '=', 'recurring')])
variable_line_ids = fields.One2many(
"account.treasury.forecast.line.template", "treasury_template_id",
string="Variable Line", domain=[('line_type', '=', 'variable')])
class AccountTreasuryForecastLineTemplate(models.Model):
_name = 'account.treasury.forecast.line.template'
_description = 'Treasury Forecast Line Template'
name = fields.Char(string="Description", required=True)
date = fields.Date(string="Date")
line_type = fields.Selection([('recurring', 'Recurring'),
('variable', 'Variable')],
string="Treasury Line Type")
partner_id = fields.Many2one("res.partner", string="Partner")
journal_id = fields.Many2one("account.journal", string="Journal",
domain=[("type", "=", "purchase")])
invoice_id = fields.Many2one("account.invoice", string="Invoice",
domain=[("type", "=", "in_invoice")])
amount = fields.Float(string="Amount",
digits_compute=dp.get_precision('Account'))
paid = fields.Boolean(string="Invoiced/Paid")
treasury_template_id = fields.Many2one(
"account.treasury.forecast.template", string="Treasury Template")
@api.one
@api.onchange('invoice_id')
def onchange_invoice(self):
if self.invoice_id:
self.journal_id = self.invoice_id.journal_id.id
self.partner_id = self.invoice_id.partner_id.id
self.amount = self.invoice_id.amount_total
self.date = self.invoice_id.date_invoice
self.paid = True
@api.multi
def create_invoice(self):
wiz_obj = self.env['wiz.create.invoice']
inv_wiz_values = {'partner_id': self.partner_id.id,
'journal_id': self.journal_id.id,
'description': self.name,
'amount': self.amount,
'line_id': self.id
}
wiz_id = wiz_obj.create(inv_wiz_values)
return {'name': _('Create Invoice'),
'type': 'ir.actions.act_window',
'res_model': 'wiz.create.invoice',
'view_type': 'form',
'view_mode': 'form',
'res_id': wiz_id.id,
'target': 'new',
}
| agpl-3.0 |
samuto/UnityOpenCV | opencv/tests/swig_python/highgui/cvShowImage.py | 3 | 1288 | #! /usr/bin/env python
"""
This script will test highgui's window functionality
"""
# name of this test and it's requirements
TESTNAME = "cvShowImage"
REQUIRED = ["cvLoadImagejpg", "cvNamedWindow"]
# needed for sys.exit(int) and .works file handling
import os
import sys
import works
# path to imagefiles we need
PREFIX=os.path.join(os.environ["srcdir"],"../../opencv_extra/testdata/python/images/")
# check requirements and delete old flag file, if it exists
if not works.check_files(REQUIRED,TESTNAME):
sys.exit(77)
# import the necessary things for OpenCV
from highgui import *
from cv import *
# defined window name
win_name = "testing..."
# we expect a window to be createable, thanks to 'cvNamedWindow.works'
cvNamedWindow(win_name, CV_WINDOW_AUTOSIZE)
# we expect the image to be loadable, thanks to 'cvLoadImage.works'
image = cvLoadImage(PREFIX+"cvShowImage.jpg")
if image is None:
print "(ERROR) Couldn't load image "+PREFIX+"cvShowImage.jpg"
sys.exit(1)
# try to show image in window
res = cvShowImage( win_name, image )
cvWaitKey(0)
if res == 0:
cvReleaseImage(image)
cvDestroyWindow(win_name)
sys.exit(1)
# destroy window
cvDestroyWindow(win_name)
# create flag file for following tests
works.set_file(TESTNAME)
# return 0 ('PASS')
sys.exit(0)
| gpl-3.0 |
takis/django | django/conf/locale/sr_Latn/formats.py | 1008 | 2011 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
viswimmer1/PythonGenerator | data/python_files/29978636/wpp_newusers.py | 1 | 44626 | import codecs
import datetime
import functools
import httplib2
import itertools
import logging
from lxml import html
import mwclient
from pprint import pprint
import time
import unittest
import urllib
import urlparse
import MySQLdb
import MySQLdb.cursors
from wpp_settings import WPPRY_USER, WPPRY_PW, WPPRY_DB, WPPRY_HOST, WPPRY_PORT
# http://stackoverflow.com/questions/2348317/how-to-write-a-pager-for-python-iterators/2350904#2350904
def grouper(iterable, page_size):
page= []
for item in iterable:
page.append( item )
if len(page) == page_size:
yield page
page= []
if len(page):
yield page
def filter_none_value(d):
return dict([(k,v) for (k,v) in d.items() if v is not None])
# http://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
class CommonEqualityMixin(object):
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
class EndOfUserToken(CommonEqualityMixin):
def __init__(self,user_name,timestamp):
self.user_name = user_name
self.timestamp = timestamp
def __repr__(self):
return u"EndOfUserToken: " + unicode(self.user_name) + u"|" + unicode(self.timestamp)
def st_cmp(x,y):
if x is None and y is not None:
return -1 # x < y
elif y is None and x is not None:
return 1 # x > y
elif x is None and y is None:
return 0
elif x < y:
return -1
elif x == y:
return 0
else:
return 1
def et_cmp(x,y):
if x is None and y is not None:
return 1 # x > y
elif y is None and x is not None:
return -1 # x < y
elif x is None and y is None:
return 0
elif x < y:
return -1
elif x == y:
return 0
else:
return 1
def dt_in_range(dt, start, end):
""" is dt within the range from start to end (equality with start or end is considered in range; None for start or end means unspecified
Make sure all arguments are datetime.datetime"""
logging.debug("in dt_in_range...")
logging.debug("dt, start, end: %s | %s | %s " % (dt,start,end))
(dt2, start2, end2) = map(struct_time_to_datetime,(dt,start,end))
logging.debug("dt2, start2, end2: %s | %s | %s " % (dt2,start2,end2))
if start2 is not None:
start_condition = (dt2 >= start2)
else:
start_condition = True
if end2 is not None:
end_condition = (dt2 <= end2)
else:
end_condition = True
logging.debug("start_condition, end_condition: %s | %s " % (start_condition, end_condition))
return start_condition and end_condition
key_st_cmp = functools.cmp_to_key(st_cmp)
key_et_cmp = functools.cmp_to_key(et_cmp)
def struct_time_to_datetime(timestamp):
# don't touch None but allow it
if isinstance(timestamp, time.struct_time):
return datetime.datetime(*timestamp[:6])
else:
return timestamp
class TestComparison(unittest.TestCase):
def test_st_cmp(self):
self.assertEqual(st_cmp(datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1)), -1)
self.assertEqual(st_cmp(datetime.datetime(2010,9,10,0,0,2), datetime.datetime(2010,9,10,0,0,1)), 1)
self.assertEqual(st_cmp(datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,0)), 0)
self.assertEqual(st_cmp(None, datetime.datetime(2010,9,10,0,0,1)), -1)
self.assertEqual(st_cmp(datetime.datetime(2010,9,10,0,0,1), None), 1)
self.assertEqual(st_cmp(None, None), 0)
def test_start_times_sort(self):
start_times = [datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1), None]
sorted_start_times = sorted(start_times,cmp=st_cmp)
self.assertEqual(sorted_start_times, [None,datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1)])
def test_start_times_w_key(self):
start_times = [datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1), None]
sorted_start_times = sorted(start_times,key=key_st_cmp)
self.assertEqual(sorted_start_times, [None,datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1)])
def test_st_cmp_min_max(self):
start_times = [datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1), None]
self.assertEqual(min(start_times,key=key_st_cmp),None)
self.assertEqual(max(start_times,key=key_st_cmp),datetime.datetime(2010,9,10,0,0,1))
def test_et_cmp(self):
self.assertEqual(et_cmp(datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1)), -1)
self.assertEqual(et_cmp(datetime.datetime(2010,9,10,0,0,2), datetime.datetime(2010,9,10,0,0,1)), 1)
self.assertEqual(et_cmp(datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,0)), 0)
self.assertEqual(et_cmp(None, datetime.datetime(2010,9,10,0,0,1)), 1)
self.assertEqual(et_cmp(datetime.datetime(2010,9,10,0,0,1), None), -1)
self.assertEqual(et_cmp(None, None), 0)
def test_end_times_sort(self):
end_times = [datetime.datetime(2010,9,10,0,0,0), None, datetime.datetime(2010,9,10,0,0,1)]
sorted_end_times = sorted(end_times,cmp=et_cmp)
self.assertEqual(sorted_end_times, [datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1), None])
def test_end_times_w_key(self):
end_times = [datetime.datetime(2010,9,10,0,0,0), None, datetime.datetime(2010,9,10,0,0,1)]
sorted_end_times = sorted(end_times,key=key_et_cmp)
self.assertEqual(sorted_end_times, [datetime.datetime(2010,9,10,0,0,0), datetime.datetime(2010,9,10,0,0,1), None])
def test_et_cmp_min_max(self):
end_times = [datetime.datetime(2010,9,10,0,0,0), None, datetime.datetime(2010,9,10,0,0,1)]
self.assertEqual(min(end_times,key=key_et_cmp),datetime.datetime(2010,9,10,0,0,0))
self.assertEqual(max(end_times,key=key_et_cmp),None)
def test_dt_in_range(self):
# firmly in middle
self.assertEqual(dt_in_range(dt=datetime.datetime(2010,9,10,0,0,0),start=datetime.datetime(2010,8,10,0,0,0),end=datetime.datetime(2010,10,10,0,0,0)),
True)
# dt = start
self.assertEqual(dt_in_range(dt=datetime.datetime(2010,8,10,0,0,0),start=datetime.datetime(2010,8,10,0,0,0),end=datetime.datetime(2010,10,10,0,0,0)),
True)
# dt = end
self.assertEqual(dt_in_range(dt=datetime.datetime(2010,10,10,0,0,0),start=datetime.datetime(2010,8,10,0,0,0),end=datetime.datetime(2010,10,10,0,0,0)),
True)
# dt < start
self.assertEqual(dt_in_range(dt=datetime.datetime(2009,9,10,0,0,0),start=datetime.datetime(2010,8,10,0,0,0),end=datetime.datetime(2010,10,10,0,0,0)),
False)
# dt > end
self.assertEqual(dt_in_range(dt=datetime.datetime(2011,9,10,0,0,0),start=datetime.datetime(2010,8,10,0,0,0),end=datetime.datetime(2010,10,10,0,0,0)),
False)
# start = None and dt < end
self.assertEqual(dt_in_range(dt=datetime.datetime(2010,9,10,0,0,0),start=None,end=datetime.datetime(2010,10,10,0,0,0)),
True)
# end = None and dt > start
self.assertEqual(dt_in_range(dt=datetime.datetime(2010,9,10,0,0,0),start=datetime.datetime(2009,9,10,0,0,0),end=None),
True)
# start, end None
self.assertEqual(dt_in_range(dt=datetime.datetime(2010,9,10,0,0,0),start=None,end=None),
True)
class wpp_db(object):
def __init__(self, user=WPPRY_USER, pw=WPPRY_PW, db=WPPRY_DB, host=WPPRY_HOST, port=WPPRY_PORT):
self.user = user
self.pw = pw
self.db = db
self.host = host
self.port = port
self.conn = MySQLdb.connect(host=self.host, port=self.port, user=self.user, passwd=self.pw, db=self.db, cursorclass=MySQLdb.cursors.DictCursor, use_unicode=True,
charset = "utf8")
def get_wpp_newusers(self):
GET_USERS_SQL = "SELECT * from wpp_newusers;"
cur = self.conn.cursor()
cur.execute(GET_USERS_SQL)
for user in cur:
yield user
def get_users_without_registration(self):
NO_REGISTRATION_SQL = "SELECT name FROM wpp_newusers WHERE registration is null and missing <> 1;"
cur = self.conn.cursor()
cur.execute(NO_REGISTRATION_SQL)
for user in cur:
yield user
def get_users_by_last_updated(self,dir="ASC"):
LAST_UPDATED_SQL = "SELECT name, last_updated, editcount FROM wpp_newusers WHERE missing <> 1 ORDER BY last_updated %s;" % (dir)
cur = self.conn.cursor()
cur.execute(LAST_UPDATED_SQL)
for user in cur:
yield user
def put_wpuser(self, name, emailable=None, blockedby=None, blockreason=None, missing=False, gender=None, editcount=None, registration=None, groups=None, record_created=None, last_updated=None,
rev_id=None, rev_title=None, rev_timestamp=None, invalid=False):
# check whether name already exists -- can this be done in SQL?
wpuser_fields = ["name", "emailable", "blockedby", "blockreason", "missing", "gender", "registration", "groups", "record_created", "last_updated", "editcount", "rev_id",
"rev_title", "rev_timestamp", "invalid"]
if record_created is None:
#record_created = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
record_created = datetime.datetime.utcnow()
if last_updated is None:
last_updated = record_created
# convert registration to datetime.datetime
if registration is not None:
registration = datetime.datetime.strptime(registration,"%Y-%m-%dT%H:%M:%SZ")
wpuser_values = [name, emailable, blockedby, blockreason, missing, gender, registration, groups, record_created, last_updated, editcount, rev_id, rev_title, rev_timestamp, invalid]
# I don't allow for the updating of rev_id, rev_title, or rev_timestamp -- a bit of kludge
update_fields = ["emailable", "blockedby", "blockreason", "missing", "gender", "registration", "groups", "last_updated", "editcount", "invalid"]
update_values = [emailable, blockedby, blockreason, missing, gender, registration, groups, last_updated, editcount, invalid]
#print wpuser_values
INSERT_USER_SQL = u"INSERT INTO wpp_newusers (%s) VALUES (%s) " % ( ", ".join(wpuser_fields), ", ".join(["%s" for k in range(len(wpuser_fields))]))
UPDATE_CLAUSE = "ON DUPLICATE KEY UPDATE " + ", ".join(map (lambda x: x+"=%s", update_fields))
INSERT_UPDATE_USER_SQL = INSERT_USER_SQL + UPDATE_CLAUSE + ";"
#print INSERT_UPDATE_USER_SQL
#print wpuser_values + update_values
cur = self.conn.cursor()
cur.execute(INSERT_UPDATE_USER_SQL, wpuser_values + update_values)
def get_stats(self):
GET_STATS_SQL = "SELECT count(*) as count, sum(editcount) as sum_editcount, sum(editcount)/count(*) as mean_edits_per_user, sum(emailable) as sum_emailable, min(rev_timestamp) as min_rev_timestamp , max(rev_timestamp) as max_rev_timestamp, max(editcount) as max_editcount, sum(timestampdiff(SECOND,registration,last_updated)) as sum_active_time, sum(timestampdiff(SECOND,registration,last_updated)) / sum(editcount)/(60*60*24) as mean_days_between_edit FROM wpp_newusers where missing<>1;"
cur = self.conn.cursor()
cur.execute(GET_STATS_SQL)
return cur.fetchone()
class wpp_db2(wpp_db):
""" subclassing wpp_db for now to help in development -- don't want to break wpp_db"""
def get_usercontribs(self):
GET_USERCONTRIBS_SQL = "SELECT * from wpp_usercontribs;"
cur = self.conn.cursor()
cur.execute(GET_USERCONTRIBS_SQL)
for contrib in cur:
yield contrib
def get_usercontrib_by_rev_id(self, rev_id):
pass
def put_usercontrib(self, rev_id, page_title=None, page_id=None, user_name=None, comment=None, parsedcomment=None,
minor=None, commenthidden=None, patrolled=None, timestamp=None, namespace=None, tags=None, record_created=None,
last_updated=None, missing=False):
#wpp_usercontribs_fields = ['rev_id', 'page_title', 'page_id', 'user_name', 'comment', 'parsedcomment', 'minor', 'patrolled', 'timestamp', 'namespace', 'tags', 'record_created', 'last_updated', 'missing']
if record_created is None:
record_created = datetime.datetime.utcnow()
if last_updated is None:
last_updated = record_created
# convert timestamp to datetime.datetime -- I think timestamp is a time.struct_time
if timestamp is not None:
if isinstance(timestamp,time.struct_time):
timestamp = datetime.datetime(*timestamp[:6])
wpp_usercontribs_dict = filter_none_value({'rev_id': rev_id, 'page_title': page_title, 'page_id': page_id, 'user_name': user_name,
'comment': comment, 'parsedcomment': parsedcomment, 'minor': minor, 'commenthidden':commenthidden,
'patrolled': patrolled, 'timestamp': timestamp, 'namespace': namespace, 'tags': tags, 'record_created': record_created,
'last_updated': last_updated, 'missing': missing})
# which fields are updatable: not rev_id, record_created
wpp_usercontribs_update_fields = filter_none_value({'page_title': page_title, 'page_id': page_id, 'user_name': user_name,
'comment': comment, 'parsedcomment': parsedcomment, 'minor': minor, 'commenthidden':commenthidden,
'patrolled': patrolled, 'timestamp': timestamp, 'namespace': namespace, 'tags': tags,
'last_updated': last_updated, 'missing': missing})
INSERT_USER_SQL = u"INSERT INTO wpp_usercontribs (%s) VALUES (%s) " % ( ", ".join(wpp_usercontribs_dict.keys()),
", ".join(["%s" for k in range(len(wpp_usercontribs_dict.keys()))]))
UPDATE_CLAUSE = "ON DUPLICATE KEY UPDATE " + ", ".join(map (lambda x: x+"=%s", wpp_usercontribs_update_fields.keys()))
INSERT_UPDATE_USER_SQL = INSERT_USER_SQL + UPDATE_CLAUSE + ";"
#print
#print
#print INSERT_UPDATE_USER_SQL
#print wpp_usercontribs_dict.values() + wpp_usercontribs_update_fields.values()
#print len(wpp_usercontribs_dict.values() + wpp_usercontribs_update_fields.values())
cur = self.conn.cursor()
cur.execute(INSERT_UPDATE_USER_SQL, wpp_usercontribs_dict.values() + wpp_usercontribs_update_fields.values())
def get_users_by_latest_usercontribs_timestamp_checked (self):
#LAST_UPDATED_SQL = "SELECT name, last_updated, latest_usercontribs_timestamp_checked from wpp_newusers WHERE missing <> 1 ORDER BY latest_usercontribs_timestamp_checked ASC;"
#LAST_UPDATED_SQL = """SELECT name, latest_usercontribs_timestamp_checked, last_updated FROM wpp_newusers WHERE (latest_usercontribs_timestamp_checked IS NOT NULL) AND (latest_usercontribs_timestamp_checked < last_updated) AND (missing <> 1) UNION ALL SELECT name, latest_usercontribs_timestamp_checked, last_updated FROM wpp_newusers WHERE (latest_usercontribs_timestamp_checked IS NULL) AND (missing <> 1);"""
LAST_UPDATED_SQL = """SELECT name, latest_usercontribs_timestamp_checked, last_updated FROM wpp_newusers WHERE (latest_usercontribs_timestamp_checked IS NULL) AND (missing <> 1) UNION ALL SELECT name, latest_usercontribs_timestamp_checked, last_updated FROM wpp_newusers WHERE (latest_usercontribs_timestamp_checked IS NOT NULL) AND (latest_usercontribs_timestamp_checked < last_updated) AND (missing <> 1);"""
cur = self.conn.cursor()
cur.execute(LAST_UPDATED_SQL)
for user in cur:
yield user
def update_latest_usercontribs_timestamp_checked(self,user_name,timestamp):
"""write the timestamp to the database"""
# timestamp has be of the form time.struct_time -- or I should check and allow both struct_time or datetime
UPDATE_SQL = "UPDATE wpp_newusers SET latest_usercontribs_timestamp_checked=%s WHERE name=%s"
cur = self.conn.cursor()
# convert timestamp to datetime.datetime
if isinstance(timestamp,time.struct_time):
timestamp = datetime.datetime(*timestamp[:6])
cur.execute(UPDATE_SQL, (timestamp,user_name))
def get_users_by_name(self,user_names):
"""user_names is an iteration of Wikipedia user names to pull from the database"""
GET_SQL = "SELECT * from wpp_newusers u WHERE u.name = %s;";
cur = self.conn.cursor()
for user_name in user_names:
try:
cur.execute(GET_SQL, user_name)
for user in cur:
yield user
except Exception, e:
pass
def get_users_with_editcount_fewer_contribs(self, limit=10000000):
GET_SQL = """SELECT sb.user_name as name, u.editcount, sb.num_contribs, u.last_updated, u.latest_usercontribs_timestamp_checked from (SELECT c.user_name, count(*) as num_contribs from wpp_usercontribs c GROUP BY c.user_name) as sb LEFT JOIN wpp_newusers u on sb.user_name = u.name WHERE sb.num_contribs > u.editcount LIMIT %s;"""
cur = self.conn.cursor()
cur.execute(GET_SQL,limit)
for user in cur:
yield user
def get_missing_users(self):
MISSING_USERS_SQL = "SELECT name from wpp_newusers where missing = 1;"
cur = self.conn.cursor()
cur.execute(MISSING_USERS_SQL)
for user in cur:
yield user
def user_contribs(contribs="newbie",dir=None,limit=500, tagfilter=None,target=None,namespace=None,year=None,month=None,offset=None):
# dir = None --> we start with the most recent and go back in time
# dir=prev --> we start with oldest contributions and return them oldest->newest
parameters = {"title":"Special:Contributions", "contribs":contribs, "limit":limit, "dir":dir, "tagfilter":tagfilter,
"target":target,"namespace":namespace,"year":year,"month":month, "offset":offset}
# get rid of any parameters that are blank
for (k,v) in parameters.items():
if v is None:
del parameters[k]
base_url = "http://en.wikipedia.org"
url = base_url + "/w/index.php?" + urllib.urlencode(parameters)
#print url
# now grab content of url
h = httplib2.Http()
#if dir is None: mw-nextlink holds the URL for the next page -- loop until no more mw-nextlink
#if dir is prev: mw-prevlink holds the URL for the following page -- loop until no more mw-prevlink
more_pages = True
while more_pages:
(resp, content) = h.request(url,"GET")
root = html.fromstring(content)
userlinks = userlinks = root.xpath("""//*[contains(concat( " ", @class, " " ), concat( " ", "mw-userlink", " " ))]""")
if dir is None:
following_page = root.xpath("//a[@class='mw-nextlink']")
elif dir == "prev":
following_page = root.xpath("//a[@class='mw-prevlink']")
userlinks.reverse() # to get links in chronological order
if len(following_page) > 0:
url = base_url + following_page[0].attrib["href"]
else:
more_pages = False
for user in userlinks:
# get parent
li = user.getparent()
rev_data = dict()
rev_data["li"] = li
rev_data["name"] = user.text
li_children = li.getchildren()
a0 = li_children[0]
# check whether the first child is an anchor -- if it isn't then a deleted rev is represented and we skip (for now)
if (a0.tag == "a"):
# have to check whether the rev has been deleted
# e.g., <span class="history-deleted">07:25, 6 September 2010</span>
# extract the timestamp, title, revid from the first anchor
rev_data["timestamp"] = datetime.datetime.strptime(a0.text,"%H:%M, %d %B %Y")
href = urlparse.urlparse(a0.attrib["href"]).query
rev_data["title"] = urlparse.parse_qs(href)["title"][0]
rev_data["revid"] = urlparse.parse_qs(href)["oldid"][0]
yield rev_data
else:
continue # go to next user
def sample_users(offset=None, dir="prev",page_size=500, max_revs=50000, continue_from_rev_timestamp=None,db=None):
"""
if continue_from_wp_timestamp is not None, see whether it's max or min -- and set offset to the corresponding value
"""
user_set = set()
if db is None:
db = wpp_db()
stats = db.get_stats()
if continue_from_rev_timestamp == "max":
offset = stats["max_rev_timestamp"].strftime("%Y%m%d%H%M%S")
elif continue_from_rev_timestamp == "min":
offset = stats["min_rev_timestamp"].strftime("%Y%m%d%H%M%S")
for (i,data) in enumerate(itertools.islice(user_contribs(limit=page_size, dir="prev", offset=offset),max_revs)):
u = data["li"]
print i, data["name"], data["timestamp"], data["title"], data["revid"]
user_set.add(data["name"])
db.put_wpuser(name=data["name"], rev_id=data["revid"], rev_title=data["title"], rev_timestamp=data["timestamp"])
class wpp_newusers_updater(object):
def __init__(self, user="wppry", pw="wppry", db="wppry", host="127.0.0.1", port=3306):
self.db = wpp_db(user=user,pw=pw,db=db,host=host,port=port)
self.mw = mwclient.Site("en.wikipedia.org")
def update_users(self,users,max_to_update=100000):
page_size = 50 # the limit for the number of user ids to pass in.
for page in grouper(itertools.islice(users,max_to_update),page_size):
user_names = []
for user in page:
user_names.append(user["name"])
# "blockinfo|groups|editcount|registration|emailable|gender"
#print "user_names to query: ", user_names
user_data = list(self.mw.users(user_names,prop="blockinfo|groups|editcount|registration|emailable|gender"))
#pprint (user_data)
for d in user_data:
name = d["name"]
if d.has_key("invalid"):
invalid = True
missing = True
print "INVALID: ", name
self.db.put_wpuser(name=name,missing=missing, invalid=invalid)
elif d.has_key("missing"):
missing = True
print name, missing
self.db.put_wpuser(name=name,missing=missing)
else:
try:
missing = False
registration = d["registration"]
gender = d["gender"]
editcount = d["editcount"]
if d.has_key("emailable"):
emailable = True
else:
emailable = False
if d.has_key("blockedby"):
blockedby = d["blockedby"]
blockreason = d["blockreason"]
else:
blockedby = None
blockreason = None
if d.has_key("groups"):
groups = ", ".join(d["groups"])
else:
groups = None
print name, registration, gender, editcount, emailable, blockedby, blockreason, groups, missing
self.db.put_wpuser(name=name,emailable=emailable,blockedby=blockedby,blockreason=blockreason, missing=missing,
gender=gender,editcount=editcount,registration=registration,groups=groups)
except Exception, e:
print "ERROR: ", e
print name, d
raise e
def update_users_without_registration(self,max_to_update=10000):
users_without_reg = self.db.get_users_without_registration()
self.update_users(users_without_reg,max_to_update)
def update_users_by_lastupdate(self,max_to_update=10000):
users = self.db.get_users_by_last_updated()
self.update_users(users,max_to_update)
class wpp_usercontribs_updater(object):
def __init__(self, user=WPPRY_USER, pw=WPPRY_PW, db=WPPRY_DB, host=WPPRY_HOST, port=WPPRY_PORT):
self.db = wpp_db2(user=user,pw=pw,db=db,host=host,port=port)
self.mw = mwclient.Site("en.wikipedia.org")
def contribs_for_users(self,users, start='use_latest_usercontribs_timestamp_checked', end='use_wpp_newusers.last_updated',
add_EndOfUserToken=False):
"""iterator to produce usercontribs for users; if get_new_for_db is False, get contribs from beginning
What options do we have here?
* add_EndOfUserToken (default False) throw in EndOfUserToken into the stream to make it easier to recognize transition between users
* start can be {None | a specific datetime.datetime | 'use_latest_usercontribs_timestamp_checked' (default)}
* specify a common end for a search for all users (or none at all) or tie end for a search to the wpp_newusers.last_updated so
we can compare wpp_newusers.editcount directly end={None | a specific datetime.datetime | 'use_wpp_newusers.last_updated' (default)}
"""
# have a shared end_time for all users
nowish = datetime.datetime(*datetime.datetime.utcnow().timetuple()[:6])
for user in users:
name = user["name"].decode("UTF-8")
if start == 'use_latest_usercontribs_timestamp_checked':
if user["latest_usercontribs_timestamp_checked"] is not None:
start_time = user["latest_usercontribs_timestamp_checked"]
else:
start_time = None
elif start is None:
start_time = None
else:
start_time = start
if end is None:
# end_time = datetime.datetime.utcnow()
# since mwclient uses str(dt) to convert parameters and mediawiki doesn't like subseconds, truncate the microseconds in utcnow()
end_time = nowish
elif end == 'use_wpp_newusers.last_updated':
end_time = user["last_updated"]
else:
end_time = end
time_params = filter_none_value({'start':start_time, 'end':end_time})
contribs = self.user_contribs(user_name=name, **time_params)
#print "contribs_for_users: ", name, start_time, end_time
for contrib in contribs:
yield contrib
if add_EndOfUserToken:
yield EndOfUserToken(user_name=name, timestamp=end_time)
def normalize_contrib(self,contrib):
rev_id = contrib["revid"]
page_title = contrib["title"]
page_id = contrib["pageid"]
user_name = contrib["user"]
if contrib.has_key("commenthidden"):
comment = None
parsedcomment = None
commenthidden = True
else:
comment = contrib["comment"]
parsedcomment = contrib["parsedcomment"]
commenthidden = False
if contrib.has_key("minor"):
minor = True
else:
minor = False
timestamp = contrib["timestamp"]
# timestamp_formatted = datetime.datetime(*timestamp[:6]).isoformat()
namespace = contrib["ns"]
if contrib.has_key("tags"): # actually an array -- should map to a comma-separated string
tags = ", ".join(contrib["tags"])
else:
tags = None
return {'rev_id':rev_id, 'page_title':page_title, 'page_id':page_id, 'user_name':user_name, 'comment':comment,
'parsedcomment':parsedcomment, 'minor':minor, 'commenthidden':commenthidden, 'timestamp':timestamp,'namespace':namespace, 'tags':tags}
def contribs_for_users_2(self,users, start='use_latest_usercontribs_timestamp_checked', end='use_wpp_newusers.last_updated',
add_EndOfUserToken=False):
"""
Rewrite to use the fact that we can pass along more than one user to the API at a time.
iterator to produce usercontribs for users; if get_new_for_db is False, get contribs from beginning
What options do we have here?
* add_EndOfUserToken (default False) throw in EndOfUserToken into the stream to make it easier to recognize transition between users
* start can be {None | a specific datetime.datetime | 'use_latest_usercontribs_timestamp_checked' (default)}
* specify a common end for a search for all users (or none at all) or tie end for a search to the wpp_newusers.last_updated so
we can compare wpp_newusers.editcount directly end={None | a specific datetime.datetime | 'use_wpp_newusers.last_updated' (default)}
"""
logging.debug("in contribs_for_users_2")
# building this back up piece by piece
# page through users
page_size = 50 # the limit for the number of user ids to pass in.
dir = 'newer'
nowish = datetime.datetime(*datetime.datetime.utcnow().timetuple()[:6])
logging.debug ("nowish is: " + str(nowish))
for (page_num, page) in enumerate(grouper(users,page_size)):
logging.debug("page_num: %s" %(page_num))
user_names = []
if add_EndOfUserToken:
end_of_user_tokens = {}
# need to calculate the start_times and end_times for individual accounts
# for each page of user accounts, also need to calculate a page start_time and end_time
start_times = {}
end_times = {}
page_start_time = None
page_end_time = None
for user in page:
user_name = user["name"].decode("UTF-8")
user_names.append(user_name)
# calculate the start_time / end_time for the user
if start == 'use_latest_usercontribs_timestamp_checked':
if user["latest_usercontribs_timestamp_checked"] is not None:
start_times[user_name] = user["latest_usercontribs_timestamp_checked"]
else:
start_times[user_name] = None
elif start is None:
start_times[user_name] = None
else:
start_times[user_name] = start
if end is None:
# end_time = datetime.datetime.utcnow()
# since mwclient uses str(dt) to convert parameters and mediawiki doesn't like subseconds, truncate the microseconds in utcnow()
end_times[user_name] = nowish
elif end == 'use_wpp_newusers.last_updated':
end_times[user_name] = user["last_updated"]
else:
end_times[user_name] = end
if add_EndOfUserToken:
end_of_user_tokens[user_name] = EndOfUserToken(user_name=user_name, timestamp=end_times[user_name])
# calculate the page start_time and end_time
page_start_time = min(start_times.values(),key=key_st_cmp)
page_end_time = max(end_times.values(),key=key_et_cmp)
time_limits = filter_none_value({'start':page_start_time, 'end':page_end_time})
user_names_string = "|".join(user_names)
logging.debug("user_names_string: %s" % (user_names_string))
logging.debug("time_limits: %s" % (time_limits))
logging.debug("start_times: %s " % (start_times))
logging.debug("end_times: %s " % (end_times))
contribs = self.mw.usercontributions(user=user_names_string, prop="ids|title|timestamp|comment|parsedcomment|size|flags|tags",dir=dir, **time_limits)
current_user = None # track changes in users in the flow of contribs
for (m,contrib) in enumerate(contribs):
if current_user is None: # take care of initialization
current_user = contrib["user"]
if add_EndOfUserToken:
if contrib["user"] != current_user:
logging.debug("m, EndOfUserToken for %s" % (current_user.encode("UTF-8")))
yield end_of_user_tokens[current_user]
del(end_of_user_tokens[current_user])
# yield EndOfUserToken(user_name=current_user, timestamp=end_times[current_user])
current_user = contrib["user"]
logging.debug("m, set (2) current_user: %s" % (current_user.encode("UTF-8")))
# check whether contrib fits in timeframe before yielding the contrib
if dt_in_range(dt=contrib["timestamp"], start=start_times[current_user],end=end_times[current_user]):
logging.debug("m, contrib yielded %s %s %s %s %s" %(current_user, contrib["revid"], contrib["timestamp"], start_times[current_user], end_times[current_user]))
yield self.normalize_contrib(contrib)
# yield up any other EndOfUserTokens
if add_EndOfUserToken:
for token in end_of_user_tokens.values():
yield token
#yield EndOfUserToken(user_name=current_user, timestamp=end_times[current_user])
def update_contribs_of_users(self,users,max_to_update=1000000):
#contribs = self.contribs_for_users(users,add_EndOfUserToken=True)
contribs = self.contribs_for_users_2(users,add_EndOfUserToken=True)
for (m,contrib) in enumerate(itertools.islice(contribs,max_to_update)):
print m,
if isinstance(contrib,EndOfUserToken):
#update the wpp_newusers table to hold the timestamp of the search for the user
self.db.update_latest_usercontribs_timestamp_checked(contrib.user_name,contrib.timestamp)
else:
self.db.put_usercontrib(**contrib)
self.db.update_latest_usercontribs_timestamp_checked(contrib["user_name"],contrib["timestamp"])
def user_contribs(self,user_name,start=None,end=None,dir='newer'):
# are there other props to look at?
# if start is not specified, grab all the contribs
time_limits = filter_none_value({'start':start, 'end':end})
#print "in user_contribs (start,end): ", time_limits
contribs = self.mw.usercontributions(user=user_name, prop="ids|title|timestamp|comment|parsedcomment|size|flags|tags",dir=dir, **time_limits)
# ids, title, timestamp, comment, parsedcomment, size, flags, patrolled, tags
# currently don't have permission to get patrolled flag
for contrib in contribs:
#print contrib
rev_id = contrib["revid"]
page_title = contrib["title"]
page_id = contrib["pageid"]
user_name = contrib["user"]
if contrib.has_key("commenthidden"):
comment = None
parsedcomment = None
commenthidden = True
else:
comment = contrib["comment"]
parsedcomment = contrib["parsedcomment"]
commenthidden = False
if contrib.has_key("minor"):
minor = True
else:
minor = False
timestamp = contrib["timestamp"]
# timestamp_formatted = datetime.datetime(*timestamp[:6]).isoformat()
namespace = contrib["ns"]
if contrib.has_key("tags"): # actually an array -- should map to a comma-separated string
tags = ", ".join(contrib["tags"])
else:
tags = None
yield {'rev_id':rev_id, 'page_title':page_title, 'page_id':page_id, 'user_name':user_name, 'comment':comment,
'parsedcomment':parsedcomment, 'minor':minor, 'commenthidden':commenthidden, 'timestamp':timestamp,'namespace':namespace, 'tags':tags}
def update_usercontribs_by_lastupdate(self,max_to_update=1000000):
"""update `wpp_usercontribs` grouped by users. Generate a priority queue based on `latest_usercontribs_timestamp_checked`, starting with ones that
are `null` value followed by chronological order of `wpp_usercontribs`."""
user_queue = self.db.get_users_by_latest_usercontribs_timestamp_checked()
self.update_contribs_of_users(user_queue,max_to_update)
class mwclient_demo(object):
def __init__(self, url="en.wikipedia.org"):
self.site = mwclient.Site(url)
def revisions_in_page(self, page_id, num_revs):
page = self.site.Pages[page_id]
revisions = page.revisions(diffto='prev',prop = 'ids|timestamp|flags|comment|user|content')
for rev in itertools.islice(revisions,num_revs):
pprint(rev)
def users(self,user_list,prop="blockinfo|groups|editcount|registration|emailable|gender"):
users_data = self.site.users(user_list,prop=prop)
return users_data
def user_contribs(self,user_id):
# are there other props to look at?
contribs = self.site.usercontributions(user=user_id, prop="ids|title|timestamp|comment|parsedcomment|size|flags|patrolled|tags")
# ids, title, timestamp, comment, parsedcomment, size, flags, patrolled, tags
for (m,contrib) in enumerate(contribs):
rev_id = contrib["revid"]
page_title = contrib["title"]
page_id = contrib["pageid"]
user_name = contrib["user"]
comment = contrib["comment"]
parsedcomment = contrib["parsedcomment"]
if contrib.has_key("minor"):
minor = True
else:
minor = False
if contrib.has_key("patrolled"):
patrolled = True
else:
patrolled = False
timestamp = contrib["timestamp"]
# timestamp_formatted = datetime.datetime(*timestamp[:6]).isoformat()
namespace = contrib["ns"]
tags = contrib["tags"]
print m, contrib
def user_contribs_html(self,user_id):
# get contributions for user
# usercontributions(self, user, start = None, end = None, dir = 'older', namespace = None, prop = None, show = None, limit = None)
contribs = self.site.usercontributions(user=user_id, prop="ids|title|timestamp|flags|comment|parsedcomment|size|tags|user")
f = codecs.open("mwclient_user_contrib.html",mode="wb", encoding="UTF-8")
header = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<title>mwclient user contrib test</title>
</head>"""
f.write(header)
f.write("<body>")
f.write("<table>")
for contrib in contribs:
title = contrib["title"]
revid = contrib["revid"]
comment = contrib["comment"]
parsedcomment = contrib["parsedcomment"]
timestamp = contrib["timestamp"]
timestamp_formatted = datetime.datetime(*timestamp[:6]).isoformat()
ns = contrib["ns"]
tags = contrib["tags"]
pageid = contrib["pageid"]
user = contrib["user"]
if contrib.has_key("minor"):
minor = True
else:
minor = False
# revision link e.g., http://en.wikipedia.org/w/index.php?title=Bach-Werke-Verzeichnis&oldid=383129991
rev_url = u"http://en.wikipedia.org/w/index.php?" + urllib.urlencode({"title":title.encode("UTF-8"), "oldid":revid})
rev_link = u"<a href='%s'>%s</a>" % (rev_url, timestamp_formatted)
# diff link http://en.wikipedia.org/w/index.php?title=Bach-Werke-Verzeichnis&diff=prev&oldid=383129991
diff_url = u"http://en.wikipedia.org/w/index.php?" + urllib.urlencode({"title":title.encode("UTF-8"), "diff":"prev", "oldid":revid})
diff_link = u"<a href='%s'>diff</a>" % (diff_url)
# page link http://en.wikipedia.org/wiki/Bach-Werke-Verzeichnis
page_url = "http://en.wikipedia.org/wiki/%s" % (title)
page_link = u"<a href='%s'>%s</a>" % (page_url, title)
# history link http://en.wikipedia.org/w/index.php?title=Bach-Werke-Verzeichnis&action=history
history_url = "http://en.wikipedia.org/w/index.php?" + urllib.urlencode({"title":title.encode("UTF-8"), "action":"history"})
history_link = "<a href='%s'>history</a>" % (history_url)
try:
#f.write(u" ".join(map(str,[rev_link, diff_link, history_link, pageid, revid, page_link, user, minor, u"<i>%s</i>" % (parsedcomment), "<br>"])))
f.write(u" ".join(map(unicode,[rev_link, diff_link, history_link, pageid, revid, page_link, user, minor, u"<i>%s</i>" % (parsedcomment), "<br>"])))
except Exception, e:
f.write("error writing %s <br>" % diff_link + str(e))
#can check for whether the contribution was minor
#pprint(contrib)
f.write("</body>")
f.close()
def test_grouper():
limit = 2002
page_size = 100
for page in grouper(range(1,limit), page_size):
for m in page:
print m,
print
def test_db():
db = wpp_db()
stats = db.get_stats()
print stats
print 1/0
for u in users:
print u
class edit_history_tracker (object):
def __init__(self):
pass
if __name__ == "__main__":
#test_grouper()
db1 = wpp_db()
#db1 = wpp_db(user="wpp_test", pw="wpp_test", db="wpp_test", host="127.0.0.1", port=3306)
sample_users(page_size=500,max_revs=100000,continue_from_rev_timestamp="max",db=db1)
# granite db
#db2 = wpp_db(user=WPPRY_USER, pw="3k3h1974", db="wppry", host="127.0.0.1", port=3307)
#sample_users(page_size=500,max_revs=100000,continue_from_rev_timestamp="max",db=db2)
# local updater
updater = wpp_newusers_updater(user=WPPRY_USER, pw=WPPRY_PW, db=WPPRY_DB, host=WPPRY_HOST, port=WPPRY_PORT)
#updater = wpp_newusers_updater(user="wpp_test", pw="wpp_test", db="wpp_test", host="127.0.0.1", port=3306)
updater.update_users_without_registration(200000)
updater.update_users_by_lastupdate(500000)
#granite updater -- depends on a tunnel
# ssh -fNg -L 3307:127.0.0.1:3306 raymond@granite.ischool.berkeley.edu
# mysql -h 127.0.0.1 -P 3307 -u root -p
#updater2 = wpp_newusers_updater(user="root", pw="3k3h1974", db="wppry", host="127.0.0.1", port=3307)
#updater2.update_users_without_registration(10000)
#updater2.update_users_by_lastupdate(5000)
#new_user_page()
#demo = mwclient_demo('en.wikipedia.org')
#demo.users(["RaymondYee", 'Biggles1000', 'Delta Trine'])
#demo.revisions_in_page("User:RaymondYee",10)
#demo.user_contribs_html("RaymondYee")
#test_db()
#new_user_page()
| gpl-2.0 |
pathompongoo/ThGovJobApp | env/lib/python2.7/site-packages/setuptools/tests/test_resources.py | 345 | 23973 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# NOTE: the shebang and encoding lines are for ScriptHeaderTests do not remove
import os
import sys
import tempfile
import shutil
from unittest import TestCase
import pkg_resources
from pkg_resources import (parse_requirements, VersionConflict, parse_version,
Distribution, EntryPoint, Requirement, safe_version, safe_name,
WorkingSet)
from setuptools.command.easy_install import (get_script_header, is_sh,
nt_quote_arg)
from setuptools.compat import StringIO, iteritems
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
def safe_repr(obj, short=False):
""" copied from Python2.7"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < pkg_resources._MAX_LENGTH:
return result
return result[:pkg_resources._MAX_LENGTH] + ' [truncated]...'
class Metadata(pkg_resources.EmptyProvider):
"""Mock object to return metadata as if from an on-disk distribution"""
def __init__(self,*pairs):
self.metadata = dict(pairs)
def has_metadata(self,name):
return name in self.metadata
def get_metadata(self,name):
return self.metadata[name]
def get_metadata_lines(self,name):
return pkg_resources.yield_lines(self.get_metadata(name))
dist_from_fn = pkg_resources.Distribution.from_filename
class DistroTests(TestCase):
def testCollection(self):
# empty path should produce no distributions
ad = pkg_resources.Environment([], platform=None, python=None)
self.assertEqual(list(ad), [])
self.assertEqual(ad['FooPkg'],[])
ad.add(dist_from_fn("FooPkg-1.3_1.egg"))
ad.add(dist_from_fn("FooPkg-1.4-py2.4-win32.egg"))
ad.add(dist_from_fn("FooPkg-1.2-py2.4.egg"))
# Name is in there now
self.assertTrue(ad['FooPkg'])
# But only 1 package
self.assertEqual(list(ad), ['foopkg'])
# Distributions sort by version
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2']
)
# Removing a distribution leaves sequence alone
ad.remove(ad['FooPkg'][1])
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.2']
)
# And inserting adds them in order
ad.add(dist_from_fn("FooPkg-1.9.egg"))
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2']
)
ws = WorkingSet([])
foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg")
foo14 = dist_from_fn("FooPkg-1.4-py2.4-win32.egg")
req, = parse_requirements("FooPkg>=1.3")
# Nominal case: no distros on path, should yield all applicable
self.assertEqual(ad.best_match(req,ws).version, '1.9')
# If a matching distro is already installed, should return only that
ws.add(foo14)
self.assertEqual(ad.best_match(req,ws).version, '1.4')
# If the first matching distro is unsuitable, it's a version conflict
ws = WorkingSet([])
ws.add(foo12)
ws.add(foo14)
self.assertRaises(VersionConflict, ad.best_match, req, ws)
# If more than one match on the path, the first one takes precedence
ws = WorkingSet([])
ws.add(foo14)
ws.add(foo12)
ws.add(foo14)
self.assertEqual(ad.best_match(req,ws).version, '1.4')
def checkFooPkg(self,d):
self.assertEqual(d.project_name, "FooPkg")
self.assertEqual(d.key, "foopkg")
self.assertEqual(d.version, "1.3-1")
self.assertEqual(d.py_version, "2.4")
self.assertEqual(d.platform, "win32")
self.assertEqual(d.parsed_version, parse_version("1.3-1"))
def testDistroBasics(self):
d = Distribution(
"/some/path",
project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32"
)
self.checkFooPkg(d)
d = Distribution("/some/path")
self.assertEqual(d.py_version, sys.version[:3])
self.assertEqual(d.platform, None)
def testDistroParse(self):
d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg")
self.checkFooPkg(d)
d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg-info")
self.checkFooPkg(d)
def testDistroMetadata(self):
d = Distribution(
"/some/path", project_name="FooPkg", py_version="2.4", platform="win32",
metadata = Metadata(
('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n")
)
)
self.checkFooPkg(d)
def distRequires(self, txt):
return Distribution("/foo", metadata=Metadata(('depends.txt', txt)))
def checkRequires(self, dist, txt, extras=()):
self.assertEqual(
list(dist.requires(extras)),
list(parse_requirements(txt))
)
def testDistroDependsSimple(self):
for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0":
self.checkRequires(self.distRequires(v), v)
def testResolve(self):
ad = pkg_resources.Environment([])
ws = WorkingSet([])
# Resolving no requirements -> nothing to install
self.assertEqual(list(ws.resolve([],ad)), [])
# Request something not in the collection -> DistributionNotFound
self.assertRaises(
pkg_resources.DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad
)
Foo = Distribution.from_filename(
"/foo_dir/Foo-1.2.egg",
metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
)
ad.add(Foo)
ad.add(Distribution.from_filename("Foo-0.9.egg"))
# Request thing(s) that are available -> list to activate
for i in range(3):
targets = list(ws.resolve(parse_requirements("Foo"), ad))
self.assertEqual(targets, [Foo])
list(map(ws.add,targets))
self.assertRaises(VersionConflict, ws.resolve,
parse_requirements("Foo==0.9"), ad)
ws = WorkingSet([]) # reset
# Request an extra that causes an unresolved dependency for "Baz"
self.assertRaises(
pkg_resources.DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad
)
Baz = Distribution.from_filename(
"/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
)
ad.add(Baz)
# Activation list now includes resolved dependency
self.assertEqual(
list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz]
)
# Requests for conflicting versions produce VersionConflict
self.assertRaises(VersionConflict,
ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad)
def testDistroDependsOptions(self):
d = self.distRequires("""
Twisted>=1.5
[docgen]
ZConfig>=2.0
docutils>=0.3
[fastcgi]
fcgiapp>=0.1""")
self.checkRequires(d,"Twisted>=1.5")
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(),
["docgen","fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(),
["fastcgi", "docgen"]
)
self.assertRaises(pkg_resources.UnknownExtra, d.requires, ["foo"])
class EntryPointTests(TestCase):
def assertfields(self, ep):
self.assertEqual(ep.name,"foo")
self.assertEqual(ep.module_name,"setuptools.tests.test_resources")
self.assertEqual(ep.attrs, ("EntryPointTests",))
self.assertEqual(ep.extras, ("x",))
self.assertTrue(ep.load() is EntryPointTests)
self.assertEqual(
str(ep),
"foo = setuptools.tests.test_resources:EntryPointTests [x]"
)
def setUp(self):
self.dist = Distribution.from_filename(
"FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]')))
def testBasics(self):
ep = EntryPoint(
"foo", "setuptools.tests.test_resources", ["EntryPointTests"],
["x"], self.dist
)
self.assertfields(ep)
def testParse(self):
s = "foo = setuptools.tests.test_resources:EntryPointTests [x]"
ep = EntryPoint.parse(s, self.dist)
self.assertfields(ep)
ep = EntryPoint.parse("bar baz= spammity[PING]")
self.assertEqual(ep.name,"bar baz")
self.assertEqual(ep.module_name,"spammity")
self.assertEqual(ep.attrs, ())
self.assertEqual(ep.extras, ("ping",))
ep = EntryPoint.parse(" fizzly = wocka:foo")
self.assertEqual(ep.name,"fizzly")
self.assertEqual(ep.module_name,"wocka")
self.assertEqual(ep.attrs, ("foo",))
self.assertEqual(ep.extras, ())
def testRejects(self):
for ep in [
"foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2",
]:
try: EntryPoint.parse(ep)
except ValueError: pass
else: raise AssertionError("Should've been bad", ep)
def checkSubMap(self, m):
self.assertEqual(len(m), len(self.submap_expect))
for key, ep in iteritems(self.submap_expect):
self.assertEqual(repr(m.get(key)), repr(ep))
submap_expect = dict(
feature1=EntryPoint('feature1', 'somemodule', ['somefunction']),
feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']),
feature3=EntryPoint('feature3', 'this.module', extras=['something'])
)
submap_str = """
# define features for blah blah
feature1 = somemodule:somefunction
feature2 = another.module:SomeClass [extra1,extra2]
feature3 = this.module [something]
"""
def testParseList(self):
self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str))
self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar")
self.assertRaises(ValueError, EntryPoint.parse_group, "x",
["foo=baz", "foo=bar"])
def testParseMap(self):
m = EntryPoint.parse_map({'xyz':self.submap_str})
self.checkSubMap(m['xyz'])
self.assertEqual(list(m.keys()),['xyz'])
m = EntryPoint.parse_map("[xyz]\n"+self.submap_str)
self.checkSubMap(m['xyz'])
self.assertEqual(list(m.keys()),['xyz'])
self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"])
self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str)
class RequirementsTests(TestCase):
def testBasics(self):
r = Requirement.parse("Twisted>=1.2")
self.assertEqual(str(r),"Twisted>=1.2")
self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')")
self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ()))
self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ()))
self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2"))
def testOrdering(self):
r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ())
r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ())
self.assertEqual(r1,r2)
self.assertEqual(str(r1),str(r2))
self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2")
def testBasicContains(self):
r = Requirement("Twisted", [('>=','1.2')], ())
foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg")
twist11 = Distribution.from_filename("Twisted-1.1.egg")
twist12 = Distribution.from_filename("Twisted-1.2.egg")
self.assertTrue(parse_version('1.2') in r)
self.assertTrue(parse_version('1.1') not in r)
self.assertTrue('1.2' in r)
self.assertTrue('1.1' not in r)
self.assertTrue(foo_dist not in r)
self.assertTrue(twist11 not in r)
self.assertTrue(twist12 in r)
def testAdvancedContains(self):
r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5")
for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'):
self.assertTrue(v in r, (v,r))
for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'):
self.assertTrue(v not in r, (v,r))
def testOptionsAndHashing(self):
r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0")
self.assertEqual(r1,r2)
self.assertEqual(r1,r3)
self.assertEqual(r1.extras, ("foo","bar"))
self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized
self.assertEqual(hash(r1), hash(r2))
self.assertEqual(
hash(r1), hash(("twisted", ((">=",parse_version("1.2")),),
frozenset(["foo","bar"])))
)
def testVersionEquality(self):
r1 = Requirement.parse("foo==0.3a2")
r2 = Requirement.parse("foo!=0.3a4")
d = Distribution.from_filename
self.assertTrue(d("foo-0.3a4.egg") not in r1)
self.assertTrue(d("foo-0.3a1.egg") not in r1)
self.assertTrue(d("foo-0.3a4.egg") not in r2)
self.assertTrue(d("foo-0.3a2.egg") in r1)
self.assertTrue(d("foo-0.3a2.egg") in r2)
self.assertTrue(d("foo-0.3a3.egg") in r2)
self.assertTrue(d("foo-0.3a5.egg") in r2)
def testSetuptoolsProjectName(self):
"""
The setuptools project should implement the setuptools package.
"""
self.assertEqual(
Requirement.parse('setuptools').project_name, 'setuptools')
# setuptools 0.7 and higher means setuptools.
self.assertEqual(
Requirement.parse('setuptools == 0.7').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools == 0.7a1').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools >= 0.7').project_name, 'setuptools')
class ParseTests(TestCase):
def testEmptyParse(self):
self.assertEqual(list(parse_requirements('')), [])
def testYielding(self):
for inp,out in [
([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']),
(['x\n\n','y'], ['x','y']),
]:
self.assertEqual(list(pkg_resources.yield_lines(inp)),out)
def testSplitting(self):
sample = """
x
[Y]
z
a
[b ]
# foo
c
[ d]
[q]
v
"""
self.assertEqual(list(pkg_resources.split_sections(sample)),
[(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])]
)
self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo"))
def testSafeName(self):
self.assertEqual(safe_name("adns-python"), "adns-python")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker")
self.assertNotEqual(safe_name("peak.web"), "peak-web")
def testSafeVersion(self):
self.assertEqual(safe_version("1.2-1"), "1.2-1")
self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha")
self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521")
self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker")
self.assertEqual(safe_version("peak.web"), "peak.web")
def testSimpleRequirements(self):
self.assertEqual(
list(parse_requirements('Twis-Ted>=1.2-1')),
[Requirement('Twis-Ted',[('>=','1.2-1')], ())]
)
self.assertEqual(
list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')),
[Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())]
)
self.assertEqual(
Requirement.parse("FooBar==1.99a3"),
Requirement("FooBar", [('==','1.99a3')], ())
)
self.assertRaises(ValueError,Requirement.parse,">=2.3")
self.assertRaises(ValueError,Requirement.parse,"x\\")
self.assertRaises(ValueError,Requirement.parse,"x==2 q")
self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2")
self.assertRaises(ValueError,Requirement.parse,"#")
def testVersionEquality(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertEqual(p1,p2, (s1,s2,p1,p2))
c('1.2-rc1', '1.2rc1')
c('0.4', '0.4.0')
c('0.4.0.0', '0.4.0')
c('0.4.0-0', '0.4-0')
c('0pl1', '0.0pl1')
c('0pre1', '0.0c1')
c('0.0.0preview1', '0c1')
c('0.0c1', '0-rc1')
c('1.2a1', '1.2.a.1')
c('1.2...a', '1.2a')
def testVersionOrdering(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertTrue(p1<p2, (s1,s2,p1,p2))
c('2.1','2.1.1')
c('2a1','2b0')
c('2a1','2.1')
c('2.3a1', '2.3')
c('2.1-1', '2.1-2')
c('2.1-1', '2.1.1')
c('2.1', '2.1pl4')
c('2.1a0-20040501', '2.1')
c('1.1', '02.1')
c('A56','B27')
c('3.2', '3.2.pl0')
c('3.2-1', '3.2pl1')
c('3.2pl1', '3.2pl1-1')
c('0.4', '4.0')
c('0.0.4', '0.4.0')
c('0pl1', '0.4pl1')
c('2.1.0-rc1','2.1.0')
c('2.1dev','2.1a0')
torture ="""
0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1
0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2
0.77.2-1 0.77.1-1 0.77.0-1
""".split()
for p,v1 in enumerate(torture):
for v2 in torture[p+1:]:
c(v2,v1)
class ScriptHeaderTests(TestCase):
non_ascii_exe = '/Users/José/bin/python'
exe_with_spaces = r'C:\Program Files\Python33\python.exe'
def test_get_script_header(self):
if not sys.platform.startswith('java') or not is_sh(sys.executable):
# This test is for non-Jython platforms
expected = '#!%s\n' % nt_quote_arg(os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/local/bin/python'),
expected)
expected = '#!%s -x\n' % nt_quote_arg(os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/bin/python -x'),
expected)
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
candidate = get_script_header('#!/usr/bin/python',
executable=self.exe_with_spaces)
self.assertEqual(candidate, '#!"%s"\n' % self.exe_with_spaces)
def test_get_script_header_jython_workaround(self):
# This test doesn't work with Python 3 in some locales
if (sys.version_info >= (3,) and os.environ.get("LC_CTYPE")
in (None, "C", "POSIX")):
return
class java:
class lang:
class System:
@staticmethod
def getProperty(property):
return ""
sys.modules["java"] = java
platform = sys.platform
sys.platform = 'java1.5.0_13'
stdout, stderr = sys.stdout, sys.stderr
try:
# A mock sys.executable that uses a shebang line (this file)
exe = os.path.normpath(os.path.splitext(__file__)[0] + '.py')
self.assertEqual(
get_script_header('#!/usr/local/bin/python', executable=exe),
'#!/usr/bin/env %s\n' % exe)
# Ensure we generate what is basically a broken shebang line
# when there's options, with a warning emitted
sys.stdout = sys.stderr = StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python -x',
executable=exe),
'#!%s -x\n' % exe)
self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue())
sys.stdout = sys.stderr = StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue())
finally:
del sys.modules["java"]
sys.platform = platform
sys.stdout, sys.stderr = stdout, stderr
class NamespaceTests(TestCase):
def setUp(self):
self._ns_pkgs = pkg_resources._namespace_packages.copy()
self._tmpdir = tempfile.mkdtemp(prefix="tests-setuptools-")
os.makedirs(os.path.join(self._tmpdir, "site-pkgs"))
self._prev_sys_path = sys.path[:]
sys.path.append(os.path.join(self._tmpdir, "site-pkgs"))
def tearDown(self):
shutil.rmtree(self._tmpdir)
pkg_resources._namespace_packages = self._ns_pkgs.copy()
sys.path = self._prev_sys_path[:]
def _assertIn(self, member, container):
""" assertIn and assertTrue does not exist in Python2.3"""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def test_two_levels_deep(self):
"""
Test nested namespace packages
Create namespace packages in the following tree :
site-packages-1/pkg1/pkg2
site-packages-2/pkg1/pkg2
Check both are in the _namespace_packages dict and that their __path__
is correct
"""
sys.path.append(os.path.join(self._tmpdir, "site-pkgs2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2"))
ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n"
for site in ["site-pkgs", "site-pkgs2"]:
pkg1_init = open(os.path.join(self._tmpdir, site,
"pkg1", "__init__.py"), "w")
pkg1_init.write(ns_str)
pkg1_init.close()
pkg2_init = open(os.path.join(self._tmpdir, site,
"pkg1", "pkg2", "__init__.py"), "w")
pkg2_init.write(ns_str)
pkg2_init.close()
import pkg1
self._assertIn("pkg1", pkg_resources._namespace_packages.keys())
try:
import pkg1.pkg2
except ImportError:
self.fail("Setuptools tried to import the parent namespace package")
# check the _namespace_packages dict
self._assertIn("pkg1.pkg2", pkg_resources._namespace_packages.keys())
self.assertEqual(pkg_resources._namespace_packages["pkg1"], ["pkg1.pkg2"])
# check the __path__ attribute contains both paths
self.assertEqual(pkg1.pkg2.__path__, [
os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"),
os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")])
| gpl-3.0 |
Azure/WALinuxAgent | tests/utils/test_archive.py | 1 | 9301 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License.
import os
import shutil
import tempfile
import zipfile
from datetime import datetime, timedelta
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.utils import fileutil
from azurelinuxagent.common.utils.archive import StateFlusher, StateArchiver, _MAX_ARCHIVED_STATES
from tests.tools import AgentTestCase, patch
debug = False
if os.environ.get('DEBUG') == '1':
debug = True
# Enable verbose logger to stdout
if debug:
logger.add_logger_appender(logger.AppenderType.STDOUT,
logger.LogLevel.VERBOSE)
class TestArchive(AgentTestCase):
def setUp(self):
prefix = "{0}_".format(self.__class__.__name__)
self.tmp_dir = tempfile.mkdtemp(prefix=prefix)
def tearDown(self):
if not debug and self.tmp_dir is not None:
shutil.rmtree(self.tmp_dir)
def _write_file(self, filename, contents=None):
full_name = os.path.join(self.tmp_dir, filename)
fileutil.mkdir(os.path.dirname(full_name))
with open(full_name, 'w') as file_handler:
data = contents if contents is not None else filename
file_handler.write(data)
return full_name
@property
def history_dir(self):
return os.path.join(self.tmp_dir, 'history')
@staticmethod
def _parse_archive_name(name):
# Name can be a directory or a zip
# '0000-00-00T00:00:00.000000_incarnation_0'
# '0000-00-00T00:00:00.000000_incarnation_0.zip'
timestamp_str, incarnation_ext = name.split("_incarnation_")
incarnation_no_ext = os.path.splitext(incarnation_ext)[0]
return timestamp_str, incarnation_no_ext
def test_archive00(self):
"""
StateFlusher should move all 'goal state' files to a new directory
under the history folder that is timestamped.
"""
temp_files = [
'GoalState.0.xml',
'Prod.0.manifest.xml',
'Prod.0.agentsManifest',
'Microsoft.Azure.Extensions.CustomScript.0.xml'
]
for temp_file in temp_files:
self._write_file(temp_file)
test_subject = StateFlusher(self.tmp_dir)
test_subject.flush()
self.assertTrue(os.path.exists(self.history_dir))
self.assertTrue(os.path.isdir(self.history_dir))
timestamp_dirs = os.listdir(self.history_dir)
self.assertEqual(1, len(timestamp_dirs))
timestamp_str, incarnation = self._parse_archive_name(timestamp_dirs[0])
self.assert_is_iso8601(timestamp_str)
timestamp = self.parse_isoformat(timestamp_str)
self.assert_datetime_close_to(timestamp, datetime.utcnow(), timedelta(seconds=30))
self.assertEqual("0", incarnation)
for temp_file in temp_files:
history_path = os.path.join(self.history_dir, timestamp_dirs[0], temp_file)
msg = "expected the temp file {0} to exist".format(history_path)
self.assertTrue(os.path.exists(history_path), msg)
def test_archive01(self):
"""
StateArchiver should archive all history directories by
1. Creating a .zip of a timestamped directory's files
2. Saving the .zip to /var/lib/waagent/history/
2. Deleting the timestamped directory
"""
temp_files = [
'GoalState.0.xml',
'Prod.0.manifest.xml',
'Prod.0.agentsManifest',
'Microsoft.Azure.Extensions.CustomScript.0.xml'
]
for current_file in temp_files:
self._write_file(current_file)
flusher = StateFlusher(self.tmp_dir)
flusher.flush()
test_subject = StateArchiver(self.tmp_dir)
test_subject.archive()
timestamp_zips = os.listdir(self.history_dir)
self.assertEqual(1, len(timestamp_zips))
zip_fn = timestamp_zips[0] # 2000-01-01T00:00:00.000000_incarnation_N.zip
timestamp_str, incarnation = self._parse_archive_name(zip_fn)
self.assert_is_iso8601(timestamp_str)
timestamp = self.parse_isoformat(timestamp_str)
self.assert_datetime_close_to(timestamp, datetime.utcnow(), timedelta(seconds=30))
self.assertEqual("0", incarnation)
zip_full = os.path.join(self.history_dir, zip_fn)
self.assert_zip_contains(zip_full, temp_files)
def test_archive02(self):
"""
StateArchiver should purge the MAX_ARCHIVED_STATES oldest files
or directories. The oldest timestamps are purged first.
This test case creates a mixture of archive files and directories.
It creates 5 more values than MAX_ARCHIVED_STATES to ensure that
5 archives are cleaned up. It asserts that the files and
directories are properly deleted from the disk.
"""
count = 6
total = _MAX_ARCHIVED_STATES + count
start = datetime.now()
timestamps = []
for i in range(0, total):
timestamp = start + timedelta(seconds=i)
timestamps.append(timestamp)
if i % 2 == 0:
filename = os.path.join('history', "{0}_incarnation_0".format(timestamp.isoformat()), 'Prod.0.manifest.xml')
else:
filename = os.path.join('history', "{0}_incarnation_0.zip".format(timestamp.isoformat()))
self._write_file(filename)
self.assertEqual(total, len(os.listdir(self.history_dir)))
test_subject = StateArchiver(self.tmp_dir)
test_subject.purge()
archived_entries = os.listdir(self.history_dir)
self.assertEqual(_MAX_ARCHIVED_STATES, len(archived_entries))
archived_entries.sort()
for i in range(0, _MAX_ARCHIVED_STATES):
timestamp = timestamps[i + count].isoformat()
if i % 2 == 0:
filename = "{0}_incarnation_0".format(timestamp)
else:
filename = "{0}_incarnation_0.zip".format(timestamp)
self.assertTrue(filename in archived_entries, "'{0}' is not in the list of unpurged entires".format(filename))
def test_archive03(self):
"""
All archives should be purged, both with the new naming (with incarnation number) and with the old naming.
"""
start = datetime.now()
timestamp1 = start + timedelta(seconds=5)
timestamp2 = start + timedelta(seconds=10)
dir_old = timestamp1.isoformat()
dir_new = "{0}_incarnation_1".format(timestamp2.isoformat())
archive_old = "{0}.zip".format(timestamp1.isoformat())
archive_new = "{0}_incarnation_1.zip".format(timestamp2.isoformat())
self._write_file(os.path.join("history", dir_old, "Prod.0.manifest.xml"))
self._write_file(os.path.join("history", dir_new, "Prod.1.manifest.xml"))
self._write_file(os.path.join("history", archive_old))
self._write_file(os.path.join("history", archive_new))
self.assertEqual(4, len(os.listdir(self.history_dir)), "Not all entries were archived!")
test_subject = StateArchiver(self.tmp_dir)
with patch("azurelinuxagent.common.utils.archive._MAX_ARCHIVED_STATES", 0):
test_subject.purge()
archived_entries = os.listdir(self.history_dir)
self.assertEqual(0, len(archived_entries), "Not all entries were purged!")
def test_archive04(self):
"""
The archive directory is created if it does not exist.
This failure was caught when .purge() was called before .archive().
"""
test_subject = StateArchiver(os.path.join(self.tmp_dir, 'does-not-exist'))
test_subject.purge()
@staticmethod
def parse_isoformat(timestamp_str):
return datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%S.%f')
@staticmethod
def assert_is_iso8601(timestamp_str):
try:
TestArchive.parse_isoformat(timestamp_str)
except:
raise AssertionError("the value '{0}' is not an ISO8601 formatted timestamp".format(timestamp_str))
@staticmethod
def _total_seconds(delta):
"""
Compute the total_seconds for a timedelta because 2.6 does not have total_seconds.
"""
return (0.0 + delta.microseconds + (delta.seconds + delta.days * 24 * 60 * 60) * 10 ** 6) / 10 ** 6
def assert_datetime_close_to(self, time1, time2, within):
if time1 <= time2:
diff = time2 - time1
else:
diff = time1 - time2
secs = self._total_seconds(within - diff)
if secs < 0:
self.fail("the timestamps are outside of the tolerance of by {0} seconds".format(secs))
def assert_zip_contains(self, zip_filename, files):
ziph = None
try:
# contextmanager for zipfile.ZipFile doesn't exist for py2.6, manually closing it
ziph = zipfile.ZipFile(zip_filename, 'r')
zip_files = [x.filename for x in ziph.filelist]
for current_file in files:
self.assertTrue(current_file in zip_files, "'{0}' was not found in {1}".format(current_file, zip_filename))
finally:
if ziph is not None:
ziph.close()
| apache-2.0 |
WadeYuChen/django-oscar | sites/sandbox/apps/user/models.py | 43 | 2256 | """
Sample user/profile models for testing. These aren't enabled by default in the
sandbox
"""
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from oscar.core import compat
from oscar.apps.customer import abstract_models
class Profile(models.Model):
"""
Dummy profile model used for testing
"""
user = models.OneToOneField(compat.AUTH_USER_MODEL, related_name="profile")
MALE, FEMALE = 'M', 'F'
choices = (
(MALE, 'Male'),
(FEMALE, 'Female'))
gender = models.CharField(max_length=1, choices=choices,
verbose_name='Gender')
age = models.PositiveIntegerField(verbose_name='Age')
# A simple extension of the core User model for Django 1.5
try:
from django.contrib.auth.models import (
AbstractUser, BaseUserManager, AbstractBaseUser)
except ImportError:
pass
else:
class ExtendedUserModel(AbstractUser):
twitter_username = models.CharField(max_length=255, unique=True)
class CustomUserManager(BaseUserManager):
def create_user(self, email, password=None):
now = timezone.now()
email = BaseUserManager.normalize_email(email)
user = self.model(email=email, last_login=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
return self.create_user(email, password)
# A user model which doesn't extend AbstractUser
@python_2_unicode_compatible
class CustomUserModel(AbstractBaseUser):
name = models.CharField(max_length=255, blank=True)
email = models.EmailField(unique=True)
twitter_username = models.CharField(max_length=255, unique=True)
USERNAME_FIELD = 'email'
objects = CustomUserManager()
def __str__(self):
return self.email
def get_full_name(self):
return self.name
get_short_name = get_full_name
# A simple extension of the core Oscar User model
class ExtendedOscarUserModel(abstract_models.AbstractUser):
twitter_username = models.CharField(max_length=255, unique=True)
| bsd-3-clause |
boundarydevices/android_external_chromium_org | tools/telemetry/telemetry/timeline/slice.py | 8 | 2266 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.event as timeline_event
class Slice(timeline_event.TimelineEvent):
"""A Slice represents an interval of time plus parameters associated
with that interval.
NOTE: The Sample class implements the same interface as
Slice. These must be kept in sync.
All time units are stored in milliseconds.
"""
def __init__(self, parent_thread, category, name, timestamp, duration=0,
thread_timestamp=None, thread_duration=None, args=None):
super(Slice, self).__init__(
category, name, timestamp, duration, thread_timestamp, thread_duration,
args)
self.parent_thread = parent_thread
self.parent_slice = None
self.sub_slices = []
self.did_not_finish = False
def AddSubSlice(self, sub_slice):
assert sub_slice.parent_slice == self
self.sub_slices.append(sub_slice)
def IterEventsInThisContainerRecrusively(self):
for sub_slice in self.sub_slices:
yield sub_slice
for sub_sub in sub_slice.IterEventsInThisContainerRecrusively():
yield sub_sub
@property
def self_time(self):
"""Time spent in this function less any time spent in child events."""
child_total = sum(
[e.duration for e in self.sub_slices])
return self.duration - child_total
@property
def self_thread_time(self):
"""Thread (scheduled) time spent in this function less any thread time spent
in child events. Returns None if the slice or any of its children does not
have a thread_duration value.
"""
if not self.thread_duration:
return None
child_total = 0
for e in self.sub_slices:
if e.thread_duration == None:
return None
child_total += e.thread_duration
return self.thread_duration - child_total
def _GetSubSlicesRecursive(self):
for sub_slice in self.sub_slices:
for s in sub_slice.GetAllSubSlices():
yield s
yield sub_slice
def GetAllSubSlices(self):
return list(self._GetSubSlicesRecursive())
def GetAllSubSlicesOfName(self, name):
return [e for e in self.GetAllSubSlices() if e.name == name]
| bsd-3-clause |
durai145/youtube-dl | youtube_dl/extractor/stanfordoc.py | 173 | 3526 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
orderedSet,
unescapeHTML,
)
class StanfordOpenClassroomIE(InfoExtractor):
IE_NAME = 'stanfordoc'
IE_DESC = 'Stanford Open ClassRoom'
_VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
_TEST = {
'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
'md5': '544a9468546059d4e80d76265b0443b8',
'info_dict': {
'id': 'PracticalUnix_intro-environment',
'ext': 'mp4',
'title': 'Intro Environment',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('course') and mobj.group('video'): # A specific video
course = mobj.group('course')
video = mobj.group('video')
info = {
'id': course + '_' + video,
'uploader': None,
'upload_date': None,
}
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xmlUrl = baseUrl + video + '.xml'
mdoc = self._download_xml(xmlUrl, info['id'])
try:
info['title'] = mdoc.findall('./title')[0].text
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
except IndexError:
raise ExtractorError('Invalid metadata XML file')
return info
elif mobj.group('course'): # A course page
course = mobj.group('course')
info = {
'id': course,
'_type': 'playlist',
'uploader': None,
'upload_date': None,
}
coursepage = self._download_webpage(
url, info['id'],
note='Downloading course info page',
errnote='Unable to download course info page')
info['title'] = self._html_search_regex(
r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
info['description'] = self._html_search_regex(
r'(?s)<description>([^<]+)</description>',
coursepage, 'description', fatal=False)
links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
info['entries'] = [self.url_result(
'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
) for l in links]
return info
else: # Root page
info = {
'id': 'Stanford OpenClassroom',
'_type': 'playlist',
'uploader': None,
'upload_date': None,
}
info['title'] = info['id']
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
rootpage = self._download_webpage(rootURL, info['id'],
errnote='Unable to download course info page')
links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
info['entries'] = [self.url_result(
'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
) for l in links]
return info
| unlicense |
vadimtk/chrome4sdp | tools/telemetry/telemetry/web_perf/metrics/single_event_unittest.py | 21 | 3135 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from collections import namedtuple
from telemetry.internal.results import page_test_results
from telemetry.page import page
from telemetry.web_perf.metrics import single_event
from telemetry.web_perf import timeline_interaction_record
TRACE_EVENT_NAME = 'FrameView::performLayout'
METRIC_NAME = 'layout'
FakeEventTuple = namedtuple(
'Event',
'start, end, name, duration, thread_duration, has_thread_timestamps')
Interaction = timeline_interaction_record.TimelineInteractionRecord
class SingleEventTestMetric(single_event._SingleEventMetric):
def __init__(self):
super(SingleEventTestMetric, self).__init__(TRACE_EVENT_NAME, METRIC_NAME)
def GetSingleEventMetrics(events, interactions):
results = page_test_results.PageTestResults()
results.WillRunPage(page.Page('file://blank.html'))
SingleEventTestMetric()._AddResultsInternal(events, interactions, results)
return dict((value.name, value.values) for value in
results.current_page_run.values)
def FakeEvent(start, end, name=TRACE_EVENT_NAME):
dur = end - start
return FakeEventTuple(start, end, name, dur, dur, True)
class SingleEventMetricUnitTest(unittest.TestCase):
def testSingleEventMetric(self):
events = [FakeEvent(0, 1),
FakeEvent(9, 11),
FakeEventTuple(10, 13, TRACE_EVENT_NAME, 3, 0, False),
FakeEvent(20, 24),
FakeEvent(21, 26),
FakeEvent(29, 35),
FakeEvent(30, 37),
FakeEvent(40, 48),
FakeEvent(41, 50),
FakeEvent(10, 13, name='something'),
FakeEvent(20, 24, name='FrameView::something'),
FakeEvent(30, 37, name='SomeThing::performLayout'),
FakeEvent(40, 48, name='something else')]
interactions = [Interaction('interaction', 10, 20),
Interaction('interaction', 30, 40)]
self.assertFalse(GetSingleEventMetrics(events, []))
self.assertFalse(GetSingleEventMetrics([], interactions))
# The first event starts before the first interaction, so it is ignored.
# The second event starts before the first interaction, so it is ignored.
# The third event starts during the first interaction, and its duration is
# 13 - 10 = 3.
# The fourth event starts during the first interaction, and its duration is
# 24 - 20 = 4.
# The fifth event starts between the two interactions, so it is ignored.
# The sixth event starts between the two interactions, so it is ignored.
# The seventh event starts during the second interaction, and its duration
# is 37 - 30 = 7.
# The eighth event starts during the second interaction, and its duration is
# 48 - 40 = 8.
# The ninth event starts after the last interaction, so it is ignored.
# The rest of the events have the wrong name, so they are ignored.
self.assertEqual({METRIC_NAME: [3, 4, 7, 8]}, GetSingleEventMetrics(
events, interactions))
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer_test.py | 158 | 2156 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class HybridLayerTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=3,
num_features=7,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
def testLayerNums(self):
l1 = fully_connected.FullyConnectedLayer(self.params, 0, None)
self.assertEquals(l1.layer_num, 0)
l2 = fully_connected.FullyConnectedLayer(self.params, 1, None)
self.assertEquals(l2.layer_num, 1)
l3 = fully_connected.FullyConnectedLayer(self.params, 2, None)
self.assertEquals(l3.layer_num, 2)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
rosmo/ansible | test/units/modules/network/f5/test_bigip_ucs.py | 14 | 12793 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_ucs import ModuleParameters
from library.modules.bigip_ucs import ModuleManager
from library.modules.bigip_ucs import ArgumentSpec
from library.modules.bigip_ucs import V1Manager
from library.modules.bigip_ucs import V2Manager
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_ucs import ModuleParameters
from ansible.modules.network.f5.bigip_ucs import ModuleManager
from ansible.modules.network.f5.bigip_ucs import ArgumentSpec
from ansible.modules.network.f5.bigip_ucs import V1Manager
from ansible.modules.network.f5.bigip_ucs import V2Manager
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
ucs="/root/bigip.localhost.localdomain.ucs",
force=True,
include_chassis_level_config=True,
no_license=True,
no_platform_check=True,
passphrase="foobar",
reset_trust=True,
state='installed'
)
p = ModuleParameters(params=args)
assert p.ucs == '/root/bigip.localhost.localdomain.ucs'
assert p.force is True
assert p.include_chassis_level_config is True
assert p.no_license is True
assert p.no_platform_check is True
assert p.passphrase == "foobar"
assert p.reset_trust is True
assert p.install_command == \
"tmsh load sys ucs /var/local/ucs/bigip.localhost.localdomain.ucs " \
"include-chassis-level-config no-license no-platform-check " \
"passphrase foobar reset-trust"
def test_module_parameters_false_ucs_booleans(self):
args = dict(
ucs="/root/bigip.localhost.localdomain.ucs",
include_chassis_level_config=False,
no_license=False,
no_platform_check=False,
reset_trust=False
)
p = ModuleParameters(params=args)
assert p.ucs == '/root/bigip.localhost.localdomain.ucs'
assert p.include_chassis_level_config is False
assert p.no_license is False
assert p.no_platform_check is False
assert p.reset_trust is False
assert p.install_command == "tmsh load sys ucs /var/local/ucs/bigip.localhost.localdomain.ucs"
class TestV1Manager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_ucs_default_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_explicit_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_installed(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='installed',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(return_value=True)
vm.install_on_device = Mock(return_value=True)
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_exists(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, False])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_fails(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, True])
with pytest.raises(F5ModuleError) as ex:
vm.exec_module()
assert 'Failed to delete' in str(ex.value)
class TestV2Manager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_ucs_default_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_explicit_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_installed(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='installed',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(return_value=True)
vm.install_on_device = Mock(return_value=True)
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_exists(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, False])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_fails(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, True])
with pytest.raises(F5ModuleError) as ex:
vm.exec_module()
assert 'Failed to delete' in str(ex.value)
| gpl-3.0 |
sradanov/flyingpigeon | flyingpigeon/processes/wps_analogs.py | 1 | 8822 | from datetime import datetime, date
import types
import tempfile
import tarfile
import ocgis
from ocgis import RequestDataset
import datetime as dt
import os
from flyingpigeon import analogs
from pywps.Process import WPSProcess
import logging
class AnalogsProcess(WPSProcess):
def __init__(self):
# definition of this process
WPSProcess.__init__(self,
identifier = "analogs",
title="Days with analog pressure pattern",
version = "0.2",
metadata= [
{"title": "Institut Pierre Simon Laplace", "href": "https://www.ipsl.fr/en/"}
],
abstract="Search for days with analog pressure pattern",
statusSupported=True,
storeSupported=True
)
self.resource = self.addComplexInput(
identifier="resource",
title="Resource",
abstract="URL to netCDF file",
minOccurs=0,
maxOccurs=1000,
maxmegabites=5000,
formats=[{"mimeType":"application/x-netcdf"}],
)
self.experiment = self.addLiteralInput(
identifier="experiment",
title="Data experiment",
abstract="Choose the experiment",
default="NCEP",
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['NCEP']
)
self.region = self.addLiteralInput(
identifier="region",
title="Select Region",
abstract="Select a predifined region",
default="NA",
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['NA']
)
#self.bbox = self.addLiteralOutput(
#identifier="bbox",
#title="Bounding Box",
#abstract="This is a BBox: (minx,miny,maxx,maxy)",
#default="0,-90,180,90",
#type=type(''),
#)
self.dateSt = self.addLiteralInput(
identifier="dateSt",
title="Start date of analyse period",
abstract="This is a Date: 2013-07-15",
default="2014-07-15",
type=type(date(2014,7,15)),
minOccurs=1,
maxOccurs=1,
)
self.dateEn = self.addLiteralInput(
identifier="dateEn",
title="End date of analyse period",
abstract="This is a Date: 2013-12-31",
default="2013-12-31",
type=type(date(2013,12,31)),
minOccurs=1,
maxOccurs=1,
)
self.refSt = self.addLiteralInput(
identifier="refSt",
title="Start reference period",
abstract="Start YEAR of reference period",
default="1955-01-01",
type=type(date(1948,01,01)),
minOccurs=1,
maxOccurs=1,
)
self.refEn = self.addLiteralInput(
identifier="refEn",
title="End reference period",
abstract="End YEAR of reference period",
default="1957-12-31",
type=type(date(1958,12,31)),
minOccurs=1,
maxOccurs=1,
)
self.timewin = self.addLiteralInput(
identifier="timewin",
title="Time window",
abstract="Nr of days following the analog day",
default=1,
type=type(1),
minOccurs=0,
maxOccurs=1,
)
self.variable = self.addLiteralInput(
identifier="variable",
title="Variable",
abstract="Variable name in resource",
default='slp',
type=type(''),
minOccurs=0,
maxOccurs=1,
)
# self.seacyc = self.addLiteralInput(
# identifier="seacyc",
# title="Seasonal Cycle",
# abstract="normalized by the Seasonal Cycle",
# default=True,
# type=type(boolean),
# minOccurs=0,
# maxOccurs=1,
# )
# #seacyc=True,
# cycsmooth=91,
# nanalog=20,
# seasonwin=30,
# distfun='rms',
# calccor=True,
# silent=False)
# define the outputs
# self.ncout = self.addComplexOutput(
# identifier="ncout",
# title="netCDF inputfile",
# abstract="netCDF file of the ps valuels",
# formats=[{"mimeType":"application/netcdf"}],
# asReference=True,
# )
# self.tarout = self.addComplexOutput(
# identifier="tarout",
# title="Result tar file",
# abstract="Tar archive containing files with the analog dates",
# formats=[{"mimeType":"application/x-tar"}],
# asReference=True,
# )
self.config = self.addComplexOutput(
identifier="config",
title="Config File",
abstract="Config file used for the Fortran process",
formats=[{"mimeType":"text/plain"}],
asReference=True,
)
def execute(self):
self.status.set('execution started at : %s ' % dt.datetime.now() , 5)
refSt = self.getInputValues(identifier='refSt')
refEn = self.getInputValues(identifier='refEn')
dateSt = self.getInputValues(identifier='dateSt')
dateEn = self.getInputValues(identifier='dateEn')
refSt = dt.datetime.strptime(refSt[0],'%Y-%m-%d')
refEn = dt.datetime.strptime(refEn[0],'%Y-%m-%d')
dateSt = dt.datetime.strptime(dateSt[0],'%Y-%m-%d')
dateEn = dt.datetime.strptime(dateEn[0],'%Y-%m-%d')
timewin = 1 #int(self.getInputValues(identifier='timewin')[0])
start = min(refSt, refEn, dateSt, dateEn )
end = max(refSt, refEn, dateSt, dateEn )
archive = '/pfad/to/archive/file.nc'
simulation = '/pfad/to/simulation/file.nc'
output = '/pfad/to/output/file.nc'
files=[archive, simulation, output ]
config_file = analogs.get_configfile(files=files,
timewin=timewin,
varname='slp',
seacyc=True,
cycsmooth=91,
nanalog=20,
seasonwin=30,
distfun='rms',
calccor=True,
silent=False)
#self.ncout.setValue(ret)
self.config.setValue( config_file )
#self.tarout.setValue(tarout_file)
self.status.set('execution ended at : %s' % dt.datetime.now() , 100)
# uris = []
# (fp_tar, tarout_file) = tempfile.mkstemp(dir=".", suffix='.tar')
# tar = tarfile.open(tarout_file, "w")
# for y in range(start.year , end.year +1 , 1):
# url = 'http://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/surface/slp.%i.nc' % (y)
# (fp_tf, tf ) = tempfile.mkstemp(dir=".", suffix=".nc")
# (fp_tf2, tf2 ) = tempfile.mkstemp(dir=".", suffix=".nc")
# #cmd = str('ncks -O -d lon,280.0,50.0 -d lat,22.5,70.0 %s %s' %( download(url), tf))
# cmd = ['ncks', '-O', '-d', 'lon,280.0,50.0', '-d', 'lat,22.5,70.0', download(url), tf]
# #os.system(cmd) # ["ls", "-l"])nc = wget.download(url)
# self.cmd(cmd, stdout=True)
# #cdo_cmd = 'cdo sellonlatbox,-80,50,22.5,70 %s %s ' % (tf, tf2)
# cdo_cmd = ['cdo', 'sellonlatbox,-80,50,22.5,70', tf, tf2]
# self.cmd(cdo_cmd, stdout=True)
# #os.system(cdo_cmd)
# uris.append(tf2)
# self.status.set('NCEP file year: %i downloaded' % (y) , 7)
# us = ocgis.util.helpers.get_sorted_uris_by_time_dimension(uris, variable=None) # for time sorting
# fname = str('slp_NOA_NCEP_%i_%i' % (start.year , end.year))
# self.status.set('download done for : %s ' % (fname) , 10)
# # ocgis specifications:
# # try:
# # if (self.getInputValues(identifier='region') == 'NOA'):
# #geom = [-80, 22.5, 50, 70.0 ] # [min x, min y, max x, max y].
# ocgis.env.DIR_OUTPUT = os.curdir
# rds = RequestDataset(us, 'slp')
# ops = ocgis.OcgOperations(dataset=rds, prefix=fname, output_format='nc', allow_empty=True, add_auxiliary_files=False)
# ret = ops.execute()
# fpath = '%s' % (ret)
# # tar.add(fpath , arcname = fpath.replace(os.curdir, ""))
# self.status.set('ocgis subset succeded for file : %s ' % (ret) , 15)
# ### run R file
# pf = str(os.path.dirname(os.path.abspath(__file__)))
# Rskript = os.path.join(pf + '/Rsrc/analogs.R')
# Rsource = os.path.join(pf + '/Rsrc/')
# logger.debug('found R skript : %s', Rskript)
# curdir = os.path.abspath(os.path.curdir)
# logger.debug('curdir : %s ' % (curdir))
# logger.debug('analogs.R : %s ' % (Rskript))
# os.mkdir(os.path.curdir+'/RoutDir/')
# RoutDir = os.path.join(os.path.curdir+'/RoutDir/')
# (fp_Rlog, Rlog) = tempfile.mkstemp(dir="./RoutDir/", suffix='.log')
# Rcmd = 'R --vanilla --args %s %s %s %i %i %s %s < %s > %s ' % (ret, dateSt.date(), dateEn.date(), refSt.year, refEn.year, Rsource, curdir, Rskript, Rlog )
# #Rcmd = ['R', '--vanilla', '--args', ret, str(dateSt.date()), str(dateEn.date()), str(refSt.year), str(refEn.year), Rsource, curdir,'<', Rskript,'>', Rlog]
# logging.debug('system call : %s ' % (Rcmd))
# # Call the R skript
# os.system(str(Rcmd))
# #self.cmd(Rcmd, stdout=False)
# tar.add(RoutDir) # , arcname = fpath.replace(os.curdir, ""))
# ##except Exception as e:
# ##self.show_status('failed for file : %s ' % ( e ) , 15)
# tar.close()
| apache-2.0 |
openweave/openweave-wdlc | backend/lib/nwv/proto_pool.py | 1 | 11246 | #
# Copyright (c) 2019-2020 Google LLC. All Rights Reserved.
# Copyright (c) 2016-2018 Nest Labs Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Description:
# This file effects a utility for pooling a set of protobuf files
# together to make them easier to work with.
#
# The utility is similar to 'google.protobuf.descriptor_pool' and
# does denormalizing and adds some features otherwise missing
# from that package.
#
"""Utility for pooling a set of protobuf files together.
* Does a bunch of denormailzing to make them easier to work with
* Similar to google.protobuf.descriptor_pool, but adds some missing features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inflection
from google.protobuf import descriptor_pb2
class ProtoDesc(object):
"""Base protobuf descriptor wrapper.
Attributes:
file: Description
full_name: Description
location: Source code location info.
parent: Parent ProtoDesc
"""
def __init__(self, desc, full_name, file_desc, path, parent):
self._base_ = desc
self.full_name = full_name
self.file = file_desc
self.parent = parent
if self.file is None:
self.file = FileDesc(descriptor_pb2.FileDescriptorProto(), '<unknown>')
self.location = self.file.get_location(path)
# fill in an empty location if one can't be found, e.g. map entries
if not self.location:
self.location = descriptor_pb2.SourceCodeInfo.Location()
# Wrap base
def __getattr__(self, attr):
return getattr(self._base_, attr)
def __dir__(self):
return list(self.__dict__.keys()) + dir(self._base_)
# Just spit out the base as string
def __str__(self):
out = ''
out += str(self._base_)
return out
class FileDesc(ProtoDesc):
def __init__(self, desc, full_name):
self._locations = {str(l.path): l for l in desc.source_code_info.location}
super(FileDesc, self).__init__(desc, full_name, self, [], None)
# Get source_code_info location based on path
def get_location(self, path):
return self._locations.get(str(path))
class FieldDesc(ProtoDesc):
"""Base field descriptor wrapper."""
def __init__(self, desc, full_name, file_desc, path, parent):
super(FieldDesc, self).__init__(desc, full_name, file_desc, path, parent)
self.message_type = None
self.enum_type = None
self.message_dependencies = collections.OrderedDict()
self.enum_dependencies = collections.OrderedDict()
def is_oneof(self):
return self.HasField('oneof_index')
def oneof_label(self):
if self.is_oneof():
return self.parent.oneof_decl[self.oneof_index].name
# True if this field is a map
def is_map(self):
return (self.message_type and self.message_type.options and
self.message_type.options.map_entry)
class MessageDesc(ProtoDesc):
"""Base message descriptor wrapper."""
def __init__(self, desc, full_name, file_desc, path, parent=None):
super(MessageDesc, self).__init__(desc, full_name, file_desc, path, parent)
self.messages = collections.OrderedDict()
self.enums = collections.OrderedDict()
self.fields = collections.OrderedDict()
self.message_dependencies = collections.OrderedDict()
self.enum_dependencies = collections.OrderedDict()
self.is_map_entry = desc.options and desc.options.map_entry
def map_field(self):
if self.is_map_entry:
return self.parent.fields[inflection.underscore(
self.name[:-len('Entry')])]
return None
class EnumDesc(ProtoDesc):
"""Base enum descriptor wrapper."""
def __init__(self, desc, full_name, file_desc, path, parent=None):
super(EnumDesc, self).__init__(desc, full_name, file_desc, path, parent)
self.values = collections.OrderedDict()
class EnumValueDesc(ProtoDesc):
pass
class ProtoPool(object):
"""Pool for working with a set of proto files.
Vends ProtoDesc objects instead of plain protobuf descriptors.
"""
def __init__(self):
self._files = collections.OrderedDict()
self._messages = collections.OrderedDict()
self._enums = collections.OrderedDict()
self._fields = collections.OrderedDict()
def add_file_set(self, file_generator):
for file_desc in sorted(file_generator, key=lambda f: f.name):
self.add_file(file_desc)
self.update_dependencies()
def add_file(self, file_desc):
"""Add a protobuf file to the pool.
Args:
file_desc: A new protobuf file descriptor to add
Returns:
FileDesc: The wrapped file_desc
"""
full_name = normalize_type(file_desc.name).replace('/', '.').rsplit('.',
1)[0]
file_desc = self._files[file_desc.name] = FileDesc(file_desc, full_name)
msg_index = 0
for msg in file_desc.message_type:
self.add_message(msg, file_desc.package, file_desc, [
descriptor_pb2.FileDescriptorProto.MESSAGE_TYPE_FIELD_NUMBER,
msg_index
])
msg_index += 1
enum_index = 0
for enum in file_desc.enum_type:
self.add_enum(enum, file_desc.package, file_desc, [
descriptor_pb2.FileDescriptorProto.ENUM_TYPE_FIELD_NUMBER, enum_index
])
enum_index += 1
return file_desc
def add_message(self, msg, package, file_desc, path, parent=None):
"""Adds protobuf message descriptor to pool.
Args:
msg: A protobuf descriptor message
package: Package name for message
file_desc: A prorotbuf file descriptor
path: A list of paths.
parent: An optional parent ProtoDesc
Returns:
MessageDesc: The wrapped msg
"""
full_name = concat_name(package, msg.name)
msg_desc = MessageDesc(msg, full_name, file_desc, path, parent)
self._messages[full_name] = msg_desc
if parent:
parent.messages[msg.name] = msg_desc
nested_index = 0
for nested_msg in msg.nested_type:
nested_path = path + [
descriptor_pb2.DescriptorProto.NESTED_TYPE_FIELD_NUMBER, nested_index
]
self.add_message(nested_msg, full_name, file_desc, nested_path, msg_desc)
nested_index += 1
enum_index = 0
for enum in msg.enum_type:
enum_path = path + [
descriptor_pb2.DescriptorProto.ENUM_TYPE_FIELD_NUMBER, enum_index
]
self.add_enum(enum, full_name, file_desc, enum_path, msg_desc)
enum_index += 1
field_index = 0
for field in msg.field:
field_path = path + [
descriptor_pb2.DescriptorProto.FIELD_FIELD_NUMBER, field_index
]
self.add_field(field, full_name, file_desc, field_path, msg_desc)
field_index += 1
return msg_desc
def add_enum_value(self, enum_value, package, file_desc, path, parent):
full_name = concat_name(package, enum_value.name)
value_desc = EnumValueDesc(enum_value, full_name, file_desc, path, parent)
parent.values[enum_value.name] = value_desc
return value_desc
def add_enum(self, enum, package, file_desc, path, parent=None):
"""Add an enum to the pool.
Args:
enum: A protobuf enum description
package: Package name for the enum
file_desc: The file descriptor that contains the file
path: A path list for the enum within the file
parent: Optional parent message
Returns:
EnumDesc: The wrapped enum
"""
full_name = concat_name(package, enum.name)
enum_desc = self._enums[full_name] = EnumDesc(enum, full_name, file_desc,
path, parent)
if parent:
parent.enums[enum.name] = enum_desc
value_index = 0
for value in enum.value:
value_path = path + [
descriptor_pb2.EnumDescriptorProto.VALUE_FIELD_NUMBER, value_index
]
self.add_enum_value(value, full_name, file_desc, value_path, enum_desc)
value_index += 1
return enum_desc
def add_field(self, field, package, file_desc, path, parent):
full_name = concat_name(package, field.name)
desc = FieldDesc(field, full_name, file_desc, path, parent)
self._fields[full_name] = desc
parent.fields[field.name] = desc
return desc
def update_field_types(self):
"""Fill in the message_type and enum_type on each field.
Must be run after everything is loaded into the pool
"""
for field in self._fields.values():
if field.type == descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE:
field.message_type = self.get_message(field.type_name)
elif field.type == descriptor_pb2.FieldDescriptorProto.TYPE_ENUM:
field.enum_type = self.get_enum(field.type_name)
def update_dependencies(self):
"""Fill in the message and enum dependencies for each message and field.
Must be run after everything is loaded into the pool.
"""
self.update_field_types()
change = True
# Keep trying until nothing changes
while change:
change = False
for field in self._fields.values():
orig_len = (
len(field.message_dependencies) + len(field.enum_dependencies))
if field.message_type:
field.message_dependencies[
field.message_type.full_name] = field.message_type
field.message_dependencies.update(
field.message_type.message_dependencies)
field.enum_dependencies.update(field.message_type.enum_dependencies)
elif field.enum_type:
field.enum_dependencies[field.enum_type.full_name] = field.enum_type
field.parent.message_dependencies.update(field.message_dependencies)
field.parent.enum_dependencies.update(field.enum_dependencies)
if orig_len != (
len(field.message_dependencies) + len(field.enum_dependencies)):
change = True
def get_file(self, type_name):
return self._files.get(normalize_type(type_name))
def get_message(self, type_name):
return self._messages.get(normalize_type(type_name))
def get_enum(self, type_name):
return self._enums.get(normalize_type(type_name))
def get_field(self, field_name):
return self._fields.get(normalize_type(field_name))
def get_files(self):
return list(self._files.values())
def get_messages(self):
return list(self._messages.values())
def get_enums(self):
return list(self._enums.values())
def get_fields(self):
return list(self._fields.values())
def normalize_type(type_name):
if type_name[0] == '.':
type_name = type_name[1:]
return type_name
def concat_name(package, name):
return package + '.' + name
def parent_name(type_name):
return normalize_type(type_name).rsplit('.', 1)[0]
| apache-2.0 |
Devyani-Divs/pagure | pagure/ui/admin.py | 1 | 1932 | # -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from functools import wraps
import flask
import pagure.forms
from pagure import (APP, SESSION, LOG, cla_required, authenticated,
generate_gitolite_acls, generate_authorized_key_file,
is_admin, admin_session_timedout)
def admin_required(function):
""" Flask decorator to retrict access to admins of pagure.
"""
@wraps(function)
def decorated_function(*args, **kwargs):
""" Decorated function, actually does the work. """
if admin_session_timedout():
return flask.redirect(
flask.url_for('auth_login', next=flask.request.url))
elif not is_admin():
flask.flash('Access restricted', 'error')
return flask.redirect(flask.url_for('.index'))
return function(*args, **kwargs)
return decorated_function
# Application
@APP.route('/admin')
@admin_required
def admin_index():
""" Front page of the admin section of the application.
"""
form = pagure.forms.ConfirmationForm()
return flask.render_template(
'admin_index.html', form=form,
)
@APP.route('/admin/gitolite', methods=['POST'])
@admin_required
def admin_generate_acl():
""" Regenerate the gitolite ACL file. """
form = pagure.forms.ConfirmationForm()
if form.validate_on_submit():
generate_gitolite_acls()
flask.flash('Gitolite ACLs updated')
return flask.redirect(flask.url_for('admin_index'))
@APP.route('/admin/ssh', methods=['POST'])
@admin_required
def admin_refresh_ssh():
""" Regenerate the gitolite ACL file. """
form = pagure.forms.ConfirmationForm()
if form.validate_on_submit():
generate_authorized_key_file()
flask.flash('Authorized file updated')
return flask.redirect(flask.url_for('admin_index'))
| gpl-2.0 |
OpenBankProject/Hello-OBP-DirectLogin-Python | props/socgen.py | 1 | 1133 | # -*- coding: utf-8 -*-
"""
Settings for the hello scripts.
You most likely need to edit a few of them, e.g. API_HOST and the OAuth
credentials.
"""
OUR_BANK = '00100'
USERNAME = '1000203893'
PASSWORD = '1000203893'
CONSUMER_KEY = 'bvldezvlnqj4mtva4jfktke4xliep0bt1xm44yxi'
CONSUMER_SECRET = 'fgwo35uhkroebasxlqgzjjcc0cf1yaujuynkwodz'
# API server URL
BASE_URL = 'https://socgen2-k-api.openbankproject.com'
API_VERSION = "v2.1.0"
# API server will redirect your browser to this URL, should be non-functional
# You will paste the redirect location here when running the script
CALLBACK_URI = 'http://127.0.0.1/cb'
# Our COUNTERPARTY account id (of the same currency)
OUR_COUNTERPARTY = '3806441b-bbdf-3c60-b2b3-14e2f645635f'
COUNTERPARTY_BANK = '00100'
# this following two fields are just used in V210
OUR_COUNTERPARTY_ID = ''
OUR_COUNTERPARTY_IBAN = ''
# Our currency to use
OUR_CURRENCY = 'XAF'
# Our value to transfer
# values below 1000 do not requre challenge request
OUR_VALUE = '0.01'
OUR_VALUE_LARGE = '1000.00'
PAYMENT_DESCRIPTION = 'Hello Payments v2.1!'
| apache-2.0 |
cjcjameson/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/crashrecovery/test_suspendcheckpoint_crashrecovery_21_to_30.py | 2 | 7531 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from tinctest.lib import local_path
from tinctest.models.scenario import ScenarioTestCase
from mpp.lib.PSQL import PSQL
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.filerep_util import Filerepe2e_Util
class SuspendcheckpointCrashrecoveryTestCase(ScenarioTestCase):
'''
Testing state of prepared transactions upon crash-recovery
@gucs gp_create_table_random_default_distribution=off
'''
def __init__(self, methodName):
self.gpfile = Gpfilespace()
self.filereputil = Filerepe2e_Util()
super(SuspendcheckpointCrashrecoveryTestCase,self).__init__(methodName)
def setUp(self):
super(SuspendcheckpointCrashrecoveryTestCase, self).setUp()
'''Create filespace '''
self.gpfile.create_filespace('filespace_test_a')
def tearDown(self):
''' Cleanup up the filespace created , reset skip chekpoint fault'''
self.gpfile.drop_filespace('filespace_test_a')
port = os.getenv('PGPORT')
self.filereputil.inject_fault(f='checkpoint', y='reset', r='primary', o='0', p=port)
super(SuspendcheckpointCrashrecoveryTestCase, self).tearDown()
def test_crash_recovery_21_to_30(self):
'''
@note : Steps are same as Cdbfast and Previous tinc schedule
@param skip_state : skip checkpoint
@param cluster_state : sync/change_tracking/resync
@param ddl_type : create/drop
@fault_type : commit/abort .
@crash_type : gpstop_i/gpstop_a/failover_to_primary
@description: Test the state of prepared transactions upon crash-recovery.
Faults are used to suspend the transactions before segments flush commit/abort to xlog.
Crash followed by recovery are performed to evaluate the transaction state
Steps:
0. Check the state of the cluster before proceeding the test execution
1. Run any fault 'skip checkpoint' before pre_sqls
2. Run pre_sqls if any
3. Run any faults required before the trigger_sqls based on the fault_type as well as cluster_state
4. Run trigger_sqls - these are the transactions which will be suspended
5. Crash and recover.
6. Run post_sqls to validate whether the transactions at step 4 are commited/ aborted as expected
7. Recover and Validate using gpcheckcat and gpcheckmirrorseg
@data_provider data_types_provider
'''
test_num = self.test_data[0][0]+self.test_data[0][1]
tinctest.logger.info("\n ===============================================")
tinctest.logger.info("\n Starting New Test: %s " % test_num )
tinctest.logger.info("\n ===============================================")
pass_num = self.test_data[1][0]
cluster_state = self.test_data[1][1]
ddl_type = self.test_data[1][2]
test_type = self.test_data[1][3]
aborting_create_needed = self.test_data[1][4]
if test_type == 'abort':
test_dir = '%s_%s_tests' % ('abort', ddl_type)
elif aborting_create_needed == 'True':
test_dir = '%s_%s_%s_tests' % ('abort', ddl_type, 'needed')
else:
test_dir = '%s_%s_tests' % (test_type, ddl_type)
if aborting_create_needed == True and test_type == 'commit':
test_dir = 'abort_create_needed_tests'
elif aborting_create_needed == True and test_type == 'abort' :
test_dir = 'abort_abort_create_needed_tests'
tinctest.logger.info("TestDir == %s " % test_dir )
test_case_list0 = []
test_case_list0.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_system')
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.set_faults_before_executing_pre_sqls', [cluster_state]))
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('mpp.gpdb.tests.storage.crashrecovery.%s.pre_sql.test_pre_sqls.TestPreSQLClass' % test_dir)
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.set_faults_before_executing_trigger_sqls', [pass_num, cluster_state, test_type, ddl_type, aborting_create_needed]))
self.test_case_scenario.append(test_case_list3)
test_case_list4 = []
test_case_list4.append('mpp.gpdb.tests.storage.crashrecovery.%s.trigger_sql.test_triggersqls.TestTriggerSQLClass' % test_dir)
test_case_list4.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.run_crash_and_recovery_fast', [test_dir, pass_num, cluster_state, test_type, ddl_type, aborting_create_needed]))
self.test_case_scenario.append(test_case_list4)
test_case_list5 = []
test_case_list5.append('mpp.gpdb.tests.storage.crashrecovery.%s.post_sql.test_postsqls.TestPostSQLClass' % test_dir)
self.test_case_scenario.append(test_case_list5)
test_case_list6 = []
test_case_list6.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.validate_system',[cluster_state]))
self.test_case_scenario.append(test_case_list6)
test_case_list7 = []
test_case_list7.append(('mpp.gpdb.tests.storage.crashrecovery.SuspendCheckpointCrashRecovery.backup_output_dir',[test_dir, test_num]))
self.test_case_scenario.append(test_case_list7)
@tinctest.dataProvider('data_types_provider')
def test_data_provider():
data = {
'21_abort_phase2_pass1_create_inchangetracking':[1,'change_tracking','create','abort',False]
,'22_abort_phase2_pass2_create_inchangetracking':[2,'change_tracking','create','abort',False]
,'23_abort_phase2_pass1_create_inresync':[1,'resync','create','abort',False]
,'24_abort_phase2_pass2_create_inresync':[2,'resync','create','abort',False]
,'25_commit_phase1_aborting_create_needed_insync':[0,'sync','create','commit',True]
,'26_commit_phase1_aborting_create_needed_inchangetracking':[0,'change_tracking','create','commit',True]
,'27_commit_phase1_aborting_create_needed_inresync':[0,'resync','create','commit',True]
,'28_commit_phase2_pass1_aborting_create_needed_insync':[1,'sync','create','commit',True]
,'29_commit_phase2_pass2_aborting_create_needed_insync':[2,'sync','create','commit',True]
,'30_commit_phase2_pass1_aborting_create_needed_inchangetracking':[1,'change_tracking','create','commit',True]
}
return data
| apache-2.0 |
subramani95/neutron | neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py | 3 | 2379 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nvp_net_binding
Revision ID: 1341ed32cc1e
Revises: 4692d074d587
Create Date: 2013-02-26 01:28:29.182195
"""
# revision identifiers, used by Alembic.
revision = '1341ed32cc1e'
down_revision = '4692d074d587'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
new_type = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name='nvp_network_bindings_binding_type')
old_type = sa.Enum('flat', 'vlan', 'stt', 'gre',
name='nvp_network_bindings_binding_type')
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('nvp_network_bindings', 'tz_uuid',
name='phy_uuid',
existing_type=sa.String(36),
existing_nullable=True)
migration.alter_enum('nvp_network_bindings', 'binding_type', new_type,
nullable=False)
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.alter_column('nvp_network_bindings', 'phy_uuid',
name='tz_uuid',
existing_type=sa.String(36),
existing_nullable=True)
migration.alter_enum('nvp_network_bindings', 'binding_type', old_type,
nullable=False)
| apache-2.0 |
google-code-export/pyglet | examples/apple_remote_demo.py | 27 | 7618 | '''
A silly demonstration of how to use the Apple remote.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
from pyglet.gl import *
import sys
class MainWindow(pyglet.window.Window):
def __init__(self):
super(MainWindow, self).__init__(visible=False)
self.set_caption('Apple Remote Example')
# Look for the Apple Remote device.
remote = pyglet.input.get_apple_remote()
if not remote:
print 'Apple IR Remote not available.'
sys.exit(0)
# Open the remote in exclusive mode so that pressing the remote
# buttons does not activate Front Row, change volume, etc. while
# the remote is being used by our program.
remote.open(self, exclusive=True)
# We push this class onto the remote's event handler stack so that
# the on_button_press and on_button_release methods which we define
# below will be called for the appropriate remote events.
remote.push_handlers(self)
self.carousel = Carousel()
self.setup_opengl()
pyglet.clock.schedule_interval(self.update, 1/60.0)
# Event handler for Apple Remote button press events.
# The button parameter is a string specifying the button that was pressed.
def on_button_press(self, button):
print 'on_button_press', button
if button == 'up':
self.carousel.scroll_up()
elif button == 'down':
self.carousel.scroll_down()
elif button == 'left':
self.carousel.step_left()
elif button == 'right':
self.carousel.step_right()
elif button == 'left_hold':
self.carousel.rotate_left()
elif button == 'right_hold':
self.carousel.rotate_right()
elif button == 'select' or button == 'select_hold':
self.carousel.swap_left()
elif button == 'menu' or button == 'menu_hold':
self.carousel.swap_right()
# Event handler for Apple Remote button release events.
# The button parameter is a string specifying the button that was released.
def on_button_release(self, button):
print 'on_button_release', button
if button == 'left_hold':
self.carousel.stop_rotating()
elif button == 'right_hold':
self.carousel.stop_rotating()
def on_draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
gluLookAt(0,3,-12,0,3,0,0,1,0)
self.carousel.draw()
def on_resize(self, width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
aspect = width / float(height)
glFrustum(-1,1,-1.8/aspect,0.2/aspect,1,100)
glMatrixMode(GL_MODELVIEW)
return pyglet.event.EVENT_HANDLED
def setup_opengl(self):
glClearColor(1,1,1,1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def update(self, dt):
self.carousel.update(dt)
class Carousel:
"""A rotating collection of labeled tiles."""
def __init__(self):
self.num_tiles = 14
self.index = 0
self.float_index = 0.0
self.float_increment = 1.0 / self.num_tiles
self.angle = 0
self.index_diff = 0
self.is_rotating = False
self.speed = 4 * self.num_tiles
# Create the tiles in the carousel.
self.tiles = []
colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (0,205,205), (128,0,128), (255,165,0)]
class Tile:
value = 0
color = [255,255,255]
for i in range(self.num_tiles):
tile = Tile()
tile.value = i % 26
tile.color = colors[i%len(colors)]
self.tiles.append(tile)
# Create glyphs for the characters displayed on the tiles.
font = pyglet.font.load('Courier', 64)
self.glyphs = font.get_glyphs('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
def scroll_up(self):
"""Increment the character displayed on the main tile."""
self.tiles[self.index].value = (self.tiles[self.index].value + 1) % 26
def scroll_down(self):
"""Decrement the character displayed on the main tile."""
self.tiles[self.index].value = (self.tiles[self.index].value - 1) % 26
def swap_left(self):
"""Swap the two left tiles."""
i = self.index
j = (self.index - 1) % self.num_tiles
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
def swap_right(self):
"""Swap the two right tiles."""
i = self.index
j = (self.index + 1) % self.num_tiles
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
def step_left(self):
"""Rotate the carousel one tile to the left."""
self.direction = -1
self.index_diff += 1.0
def step_right(self):
"""Rotate the carousel one tile to the right."""
self.direction = 1
self.index_diff += 1.0
def rotate_left(self):
"""Start the carousel rotating continuously to the left."""
self.is_rotating = True
self.direction = -1
def rotate_right(self):
"""Start the carousel rotating continuously to the right."""
self.is_rotating = True
self.direction = 1
def stop_rotating(self):
"""Stop continuous rotation and make sure we end up at a tile location."""
self.index_diff = round(self.float_index) - self.float_index
if self.index_diff < 0:
self.direction = -1
else:
self.direction = 1
self.index_diff = abs(self.index_diff)
def draw(self):
glPushMatrix()
glRotatef(-self.angle, 0, 1, 0)
for i in range(self.num_tiles):
self.draw_tile(i)
glPopMatrix()
def draw_tile(self, index):
angle = index * (360.0 / self.num_tiles)
glPushMatrix()
glRotatef(angle,0,1,0)
glTranslatef(0,0,-7.5)
glRotatef(-angle+self.angle,0,1,0)
texture = self.glyphs[self.tiles[index].value].texture
vertex_list = pyglet.graphics.vertex_list(4, 'v2f', ('t3f', texture.tex_coords))
vertex_list.vertices[:] = [-1, -1, 1, -1, 1, 1, -1, 1]
# Draw tile background.
glColor3ub(*self.tiles[index].color)
vertex_list.draw(GL_QUADS)
# Draw tile label.
glBindTexture(texture.target, texture.id)
glEnable(texture.target)
glColor3ub(0,0,0)
vertex_list.vertices[:] = [.8, -.8, -.8, -.8, -.8, .8, .8, .8]
glTranslatef(0,0,-.01)
vertex_list.draw(GL_QUADS)
glDisable(texture.target)
glPopMatrix()
def update(self, dt):
if self.is_rotating or self.index_diff:
increment = self.direction * self.speed * self.float_increment * dt
self.float_index = (self.float_index + increment) % self.num_tiles
if self.index_diff:
self.index_diff -= abs(increment)
if self.index_diff < 0:
self.index_diff = 0
self.float_index = round(self.float_index) % self.num_tiles
self.index = int(self.float_index)
self.is_rotating = False
self.angle = (self.float_index / self.num_tiles) * 360
if __name__ == '__main__':
window = MainWindow()
window.clear()
window.flip()
window.set_visible(True)
pyglet.app.run()
| bsd-3-clause |
kybriainfotech/iSocioCRM | addons/hr_holidays/hr_holidays.py | 159 | 33482 | # -*- coding: utf-8 -*-
##################################################################################
#
# Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com)
# and 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# $Id: hr.py 4656 2006-11-24 09:58:42Z Cyp $
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import math
import time
from operator import attrgetter
from openerp.exceptions import Warning
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_holidays_status(osv.osv):
_name = "hr.holidays.status"
_description = "Leave Type"
def get_days(self, cr, uid, ids, employee_id, context=None):
result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0,
virtual_remaining_leaves=0)) for id in ids)
holiday_ids = self.pool['hr.holidays'].search(cr, uid, [('employee_id', '=', employee_id),
('state', 'in', ['confirm', 'validate1', 'validate']),
('holiday_status_id', 'in', ids)
], context=context)
for holiday in self.pool['hr.holidays'].browse(cr, uid, holiday_ids, context=context):
status_dict = result[holiday.holiday_status_id.id]
if holiday.type == 'add':
status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['max_leaves'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] += holiday.number_of_days_temp
elif holiday.type == 'remove': # number of days is negative
status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['leaves_taken'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] -= holiday.number_of_days_temp
return result
def _user_left_days(self, cr, uid, ids, name, args, context=None):
employee_id = False
if context and 'employee_id' in context:
employee_id = context['employee_id']
else:
employee_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if employee_ids:
employee_id = employee_ids[0]
if employee_id:
res = self.get_days(cr, uid, ids, employee_id, context=context)
else:
res = dict((res_id, {'leaves_taken': 0, 'remaining_leaves': 0, 'max_leaves': 0}) for res_id in ids)
return res
_columns = {
'name': fields.char('Leave Type', size=64, required=True, translate=True),
'categ_id': fields.many2one('calendar.event.type', 'Meeting Type',
help='Once a leave is validated, Odoo will create a corresponding meeting of this type in the calendar.'),
'color_name': fields.selection([('red', 'Red'),('blue','Blue'), ('lightgreen', 'Light Green'), ('lightblue','Light Blue'), ('lightyellow', 'Light Yellow'), ('magenta', 'Magenta'),('lightcyan', 'Light Cyan'),('black', 'Black'),('lightpink', 'Light Pink'),('brown', 'Brown'),('violet', 'Violet'),('lightcoral', 'Light Coral'),('lightsalmon', 'Light Salmon'),('lavender', 'Lavender'),('wheat', 'Wheat'),('ivory', 'Ivory')],'Color in Report', required=True, help='This color will be used in the leaves summary located in Reporting\Leaves by Department.'),
'limit': fields.boolean('Allow to Override Limit', help='If you select this check box, the system allows the employees to take more leaves than the available ones for this type and will not take them into account for the "Remaining Legal Leaves" defined on the employee form.'),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the leave type without removing it."),
'max_leaves': fields.function(_user_left_days, string='Maximum Allowed', help='This value is given by the sum of all holidays requests with a positive value.', multi='user_left_days'),
'leaves_taken': fields.function(_user_left_days, string='Leaves Already Taken', help='This value is given by the sum of all holidays requests with a negative value.', multi='user_left_days'),
'remaining_leaves': fields.function(_user_left_days, string='Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken', multi='user_left_days'),
'virtual_remaining_leaves': fields.function(_user_left_days, string='Virtual Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken - Leaves Waiting Approval', multi='user_left_days'),
'double_validation': fields.boolean('Apply Double Validation', help="When selected, the Allocation/Leave Requests for this type require a second validation to be approved."),
}
_defaults = {
'color_name': 'red',
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not context.get('employee_id',False):
# leave counts is based on employee_id, would be inaccurate if not based on correct employee
return super(hr_holidays_status, self).name_get(cr, uid, ids, context=context)
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if not record.limit:
name = name + (' (%g/%g)' % (record.leaves_taken or 0.0, record.max_leaves or 0.0))
res.append((record.id, name))
return res
class hr_holidays(osv.osv):
_name = "hr.holidays"
_description = "Leave"
_order = "type desc, date_from asc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'hr_holidays.mt_holidays_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'validate',
'hr_holidays.mt_holidays_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'refuse',
'hr_holidays.mt_holidays_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
def _employee_get(self, cr, uid, context=None):
emp_id = context.get('default_employee_id', False)
if emp_id:
return emp_id
ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
def _compute_number_of_days(self, cr, uid, ids, name, args, context=None):
result = {}
for hol in self.browse(cr, uid, ids, context=context):
if hol.type=='remove':
result[hol.id] = -hol.number_of_days_temp
else:
result[hol.id] = hol.number_of_days_temp
return result
def _get_can_reset(self, cr, uid, ids, name, arg, context=None):
"""User can reset a leave request if it is its own leave request or if
he is an Hr Manager. """
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
group_hr_manager_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')[1]
if group_hr_manager_id in [g.id for g in user.groups_id]:
return dict.fromkeys(ids, True)
result = dict.fromkeys(ids, False)
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.employee_id and holiday.employee_id.user_id and holiday.employee_id.user_id.id == uid:
result[holiday.id] = True
return result
def _check_date(self, cr, uid, ids, context=None):
for holiday in self.browse(cr, uid, ids, context=context):
domain = [
('date_from', '<=', holiday.date_to),
('date_to', '>=', holiday.date_from),
('employee_id', '=', holiday.employee_id.id),
('id', '!=', holiday.id),
('state', 'not in', ['cancel', 'refuse']),
]
nholidays = self.search_count(cr, uid, domain, context=context)
if nholidays:
return False
return True
_check_holidays = lambda self, cr, uid, ids, context=None: self.check_holidays(cr, uid, ids, context=context)
_columns = {
'name': fields.char('Description', size=64),
'state': fields.selection([('draft', 'To Submit'), ('cancel', 'Cancelled'),('confirm', 'To Approve'), ('refuse', 'Refused'), ('validate1', 'Second Approval'), ('validate', 'Approved')],
'Status', readonly=True, track_visibility='onchange', copy=False,
help='The status is set to \'To Submit\', when a holiday request is created.\
\nThe status is \'To Approve\', when holiday request is confirmed by user.\
\nThe status is \'Refused\', when holiday request is refused by manager.\
\nThe status is \'Approved\', when holiday request is approved by manager.'),
'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True),
'date_from': fields.datetime('Start Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, select=True, copy=False),
'date_to': fields.datetime('End Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'holiday_status_id': fields.many2one("hr.holidays.status", "Leave Type", required=True,readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'employee_id': fields.many2one('hr.employee', "Employee", select=True, invisible=False, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'manager_id': fields.many2one('hr.employee', 'First Approval', invisible=False, readonly=True, copy=False,
help='This area is automatically filled by the user who validate the leave'),
'notes': fields.text('Reasons',readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'number_of_days_temp': fields.float('Allocation', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'number_of_days': fields.function(_compute_number_of_days, string='Number of Days', store=True),
'meeting_id': fields.many2one('calendar.event', 'Meeting'),
'type': fields.selection([('remove','Leave Request'),('add','Allocation Request')], 'Request Type', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help="Choose 'Leave Request' if someone wants to take an off-day. \nChoose 'Allocation Request' if you want to increase the number of leaves available for someone", select=True),
'parent_id': fields.many2one('hr.holidays', 'Parent'),
'linked_request_ids': fields.one2many('hr.holidays', 'parent_id', 'Linked Requests',),
'department_id':fields.related('employee_id', 'department_id', string='Department', type='many2one', relation='hr.department', readonly=True, store=True),
'category_id': fields.many2one('hr.employee.category', "Employee Tag", help='Category of Employee', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'holiday_type': fields.selection([('employee','By Employee'),('category','By Employee Tag')], 'Allocation Mode', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help='By Employee: Allocation/Request for individual Employee, By Employee Tag: Allocation/Request for group of employees in category', required=True),
'manager_id2': fields.many2one('hr.employee', 'Second Approval', readonly=True, copy=False,
help='This area is automaticly filled by the user who validate the leave with second level (If Leave type need second validation)'),
'double_validation': fields.related('holiday_status_id', 'double_validation', type='boolean', relation='hr.holidays.status', string='Apply Double Validation'),
'can_reset': fields.function(
_get_can_reset,
type='boolean'),
}
_defaults = {
'employee_id': _employee_get,
'state': 'confirm',
'type': 'remove',
'user_id': lambda obj, cr, uid, context: uid,
'holiday_type': 'employee'
}
_constraints = [
(_check_date, 'You can not have 2 leaves that overlaps on same day!', ['date_from','date_to']),
(_check_holidays, 'The number of remaining leaves is not sufficient for this leave type', ['state','number_of_days_temp'])
]
_sql_constraints = [
('type_value', "CHECK( (holiday_type='employee' AND employee_id IS NOT NULL) or (holiday_type='category' AND category_id IS NOT NULL))",
"The employee or employee category of this request is missing. Please make sure that your user login is linked to an employee."),
('date_check2', "CHECK ( (type='add') OR (date_from <= date_to))", "The start date must be anterior to the end date."),
('date_check', "CHECK ( number_of_days_temp >= 0 )", "The number of days must be greater than 0."),
]
def _create_resource_leave(self, cr, uid, leaves, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays validated '''
obj_res_leave = self.pool.get('resource.calendar.leaves')
for leave in leaves:
vals = {
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_id.calendar_id.id
}
obj_res_leave.create(cr, uid, vals, context=context)
return True
def _remove_resource_leave(self, cr, uid, ids, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays cancel/removed'''
obj_res_leave = self.pool.get('resource.calendar.leaves')
leave_ids = obj_res_leave.search(cr, uid, [('holiday_id', 'in', ids)], context=context)
return obj_res_leave.unlink(cr, uid, leave_ids, context=context)
def onchange_type(self, cr, uid, ids, holiday_type, employee_id=False, context=None):
result = {}
if holiday_type == 'employee' and not employee_id:
ids_employee = self.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)])
if ids_employee:
result['value'] = {
'employee_id': ids_employee[0]
}
elif holiday_type != 'employee':
result['value'] = {
'employee_id': False
}
return result
def onchange_employee(self, cr, uid, ids, employee_id):
result = {'value': {'department_id': False}}
if employee_id:
employee = self.pool.get('hr.employee').browse(cr, uid, employee_id)
result['value'] = {'department_id': employee.department_id.id}
return result
# TODO: can be improved using resource calendar method
def _get_number_of_days(self, date_from, date_to):
"""Returns a float equals to the timedelta between two dates given as string."""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
from_dt = datetime.datetime.strptime(date_from, DATETIME_FORMAT)
to_dt = datetime.datetime.strptime(date_to, DATETIME_FORMAT)
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
return diff_day
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel', 'confirm']:
raise osv.except_osv(_('Warning!'),_('You cannot delete a leave which is in %s state.')%(rec.state))
return super(hr_holidays, self).unlink(cr, uid, ids, context)
def onchange_date_from(self, cr, uid, ids, date_to, date_from):
"""
If there are no date set for date_to, automatically set one 8 hours later than
the date_from.
Also update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# No date_to set so far: automatically compute one 8 hours later
if date_from and not date_to:
date_to_with_delta = datetime.datetime.strptime(date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8)
result['value']['date_to'] = str(date_to_with_delta)
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def onchange_date_to(self, cr, uid, ids, date_to, date_from):
"""
Update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def create(self, cr, uid, values, context=None):
""" Override to avoid automatic logging of creation """
if context is None:
context = {}
context = dict(context, mail_create_nolog=True)
if values.get('state') and values['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
return super(hr_holidays, self).create(cr, uid, values, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('state') and vals['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % vals.get('state'))
return super(hr_holidays, self).write(cr, uid, ids, vals, context=context)
def holidays_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'draft',
'manager_id': False,
'manager_id2': False,
})
to_unlink = []
for record in self.browse(cr, uid, ids, context=context):
for record2 in record.linked_request_ids:
self.holidays_reset(cr, uid, [record2.id], context=context)
to_unlink.append(record2.id)
if to_unlink:
self.unlink(cr, uid, to_unlink, context=context)
return True
def holidays_first_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.holidays_first_validate_notificate(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'state':'validate1', 'manager_id': manager})
def holidays_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.write(cr, uid, ids, {'state':'validate'})
data_holiday = self.browse(cr, uid, ids)
for record in data_holiday:
if record.double_validation:
self.write(cr, uid, [record.id], {'manager_id2': manager})
else:
self.write(cr, uid, [record.id], {'manager_id': manager})
if record.holiday_type == 'employee' and record.type == 'remove':
meeting_obj = self.pool.get('calendar.event')
meeting_vals = {
'name': record.name or _('Leave Request'),
'categ_ids': record.holiday_status_id.categ_id and [(6,0,[record.holiday_status_id.categ_id.id])] or [],
'duration': record.number_of_days_temp * 8,
'description': record.notes,
'user_id': record.user_id.id,
'start': record.date_from,
'stop': record.date_to,
'allday': False,
'state': 'open', # to block that meeting date in the calendar
'class': 'confidential'
}
#Add the partner_id (if exist) as an attendee
if record.user_id and record.user_id.partner_id:
meeting_vals['partner_ids'] = [(4,record.user_id.partner_id.id)]
ctx_no_email = dict(context or {}, no_email=True)
meeting_id = meeting_obj.create(cr, uid, meeting_vals, context=ctx_no_email)
self._create_resource_leave(cr, uid, [record], context=context)
self.write(cr, uid, ids, {'meeting_id': meeting_id})
elif record.holiday_type == 'category':
emp_ids = obj_emp.search(cr, uid, [('category_ids', 'child_of', [record.category_id.id])])
leave_ids = []
for emp in obj_emp.browse(cr, uid, emp_ids):
vals = {
'name': record.name,
'type': record.type,
'holiday_type': 'employee',
'holiday_status_id': record.holiday_status_id.id,
'date_from': record.date_from,
'date_to': record.date_to,
'notes': record.notes,
'number_of_days_temp': record.number_of_days_temp,
'parent_id': record.id,
'employee_id': emp.id
}
leave_ids.append(self.create(cr, uid, vals, context=None))
for leave_id in leave_ids:
# TODO is it necessary to interleave the calls?
for sig in ('confirm', 'validate', 'second_validate'):
self.signal_workflow(cr, uid, [leave_id], sig)
return True
def holidays_confirm(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.employee_id and record.employee_id.parent_id and record.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [record.id], user_ids=[record.employee_id.parent_id.user_id.id], context=context)
return self.write(cr, uid, ids, {'state': 'confirm'})
def holidays_refuse(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.state == 'validate1':
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id': manager})
else:
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id2': manager})
self.holidays_cancel(cr, uid, ids, context=context)
return True
def holidays_cancel(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids):
# Delete the meeting
if record.meeting_id:
record.meeting_id.unlink()
# If a category that created several holidays, cancel all related
self.signal_workflow(cr, uid, map(attrgetter('id'), record.linked_request_ids or []), 'refuse')
self._remove_resource_leave(cr, uid, ids, context=context)
return True
def check_holidays(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.holiday_type != 'employee' or record.type != 'remove' or not record.employee_id or record.holiday_status_id.limit:
continue
leave_days = self.pool.get('hr.holidays.status').get_days(cr, uid, [record.holiday_status_id.id], record.employee_id.id, context=context)[record.holiday_status_id.id]
if leave_days['remaining_leaves'] < 0 or leave_days['virtual_remaining_leaves'] < 0:
# Raising a warning gives a more user-friendly feedback than the default constraint error
raise Warning(_('The number of remaining leaves is not sufficient for this leave type.\n'
'Please verify also the leaves waiting for validation.'))
return True
# -----------------------------
# OpenChatter and notifications
# -----------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
# if this user is a hr.manager, he should do second validations
if self.pool.get('res.users').has_group(cr, uid, 'base.group_hr_manager'):
dom = ['|'] + dom + [('state', '=', 'validate1')]
return dom
def holidays_first_validate_notificate(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
self.message_post(cr, uid, [obj.id],
_("Request approved, waiting second validation."), context=context)
class resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'holiday_id': fields.many2one("hr.holidays", "Leave Request"),
}
class hr_employee(osv.osv):
_inherit="hr.employee"
def create(self, cr, uid, vals, context=None):
# don't pass the value of remaining leave if it's 0 at the creation time, otherwise it will trigger the inverse
# function _set_remaining_days and the system may not be configured for. Note that we don't have this problem on
# the write because the clients only send the fields that have been modified.
if 'remaining_leaves' in vals and not vals['remaining_leaves']:
del(vals['remaining_leaves'])
return super(hr_employee, self).create(cr, uid, vals, context=context)
def _set_remaining_days(self, cr, uid, empl_id, name, value, arg, context=None):
employee = self.browse(cr, uid, empl_id, context=context)
diff = value - employee.remaining_leaves
type_obj = self.pool.get('hr.holidays.status')
holiday_obj = self.pool.get('hr.holidays')
# Find for holidays status
status_ids = type_obj.search(cr, uid, [('limit', '=', False)], context=context)
if len(status_ids) != 1 :
raise osv.except_osv(_('Warning!'),_("The feature behind the field 'Remaining Legal Leaves' can only be used when there is only one leave type with the option 'Allow to Override Limit' unchecked. (%s Found). Otherwise, the update is ambiguous as we cannot decide on which leave type the update has to be done. \nYou may prefer to use the classic menus 'Leave Requests' and 'Allocation Requests' located in 'Human Resources \ Leaves' to manage the leave days of the employees if the configuration does not allow to use this field.") % (len(status_ids)))
status_id = status_ids and status_ids[0] or False
if not status_id:
return False
if diff > 0:
leave_id = holiday_obj.create(cr, uid, {'name': _('Allocation for %s') % employee.name, 'employee_id': employee.id, 'holiday_status_id': status_id, 'type': 'add', 'holiday_type': 'employee', 'number_of_days_temp': diff}, context=context)
elif diff < 0:
raise osv.except_osv(_('Warning!'), _('You cannot reduce validated allocation requests'))
else:
return False
for sig in ('confirm', 'validate', 'second_validate'):
holiday_obj.signal_workflow(cr, uid, [leave_id], sig)
return True
def _get_remaining_days(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
sum(h.number_of_days) as days,
h.employee_id
from
hr_holidays h
join hr_holidays_status s on (s.id=h.holiday_status_id)
where
h.state='validate' and
s.limit=False and
h.employee_id in %s
group by h.employee_id""", (tuple(ids),))
res = cr.dictfetchall()
remaining = {}
for r in res:
remaining[r['employee_id']] = r['days']
for employee_id in ids:
if not remaining.get(employee_id):
remaining[employee_id] = 0.0
return remaining
def _get_leave_status(self, cr, uid, ids, name, args, context=None):
holidays_obj = self.pool.get('hr.holidays')
holidays_id = holidays_obj.search(cr, uid,
[('employee_id', 'in', ids), ('date_from','<=',time.strftime('%Y-%m-%d %H:%M:%S')),
('date_to','>=',time.strftime('%Y-%m-%d 23:59:59')),('type','=','remove'),('state','not in',('cancel','refuse'))],
context=context)
result = {}
for id in ids:
result[id] = {
'current_leave_state': False,
'current_leave_id': False,
'leave_date_from':False,
'leave_date_to':False,
}
for holiday in self.pool.get('hr.holidays').browse(cr, uid, holidays_id, context=context):
result[holiday.employee_id.id]['leave_date_from'] = holiday.date_from
result[holiday.employee_id.id]['leave_date_to'] = holiday.date_to
result[holiday.employee_id.id]['current_leave_state'] = holiday.state
result[holiday.employee_id.id]['current_leave_id'] = holiday.holiday_status_id.id
return result
def _leaves_count(self, cr, uid, ids, field_name, arg, context=None):
Holidays = self.pool['hr.holidays']
return {
employee_id: Holidays.search_count(cr,uid, [('employee_id', '=', employee_id), ('type', '=', 'remove')], context=context)
for employee_id in ids
}
_columns = {
'remaining_leaves': fields.function(_get_remaining_days, string='Remaining Legal Leaves', fnct_inv=_set_remaining_days, type="float", help='Total number of legal leaves allocated to this employee, change this value to create allocation/leave request. Total based on all the leave types without overriding limit.'),
'current_leave_state': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Status", type="selection",
selection=[('draft', 'New'), ('confirm', 'Waiting Approval'), ('refuse', 'Refused'),
('validate1', 'Waiting Second Approval'), ('validate', 'Approved'), ('cancel', 'Cancelled')]),
'current_leave_id': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Type",type='many2one', relation='hr.holidays.status'),
'leave_date_from': fields.function(_get_leave_status, multi='leave_status', type='date', string='From Date'),
'leave_date_to': fields.function(_get_leave_status, multi='leave_status', type='date', string='To Date'),
'leaves_count': fields.function(_leaves_count, type='integer', string='Leaves'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Feandil/webapp-config | WebappConfig/filetype.py | 1 | 6545 | #!/usr/bin/python -O
#
# /usr/sbin/webapp-config
# Python script for managing the deployment of web-based
# applications
#
# Originally written for the Gentoo Linux distribution
#
# Copyright (c) 1999-2007 Authors
# Released under v2 of the GNU GPL
#
# Author(s) Stuart Herbert
# Renat Lumpau <rl03@gentoo.org>
# Gunnar Wrobel <wrobel@gentoo.org>
#
# ========================================================================
''' A class that returns the file type for a given path.'''
# ========================================================================
# Dependencies
# ------------------------------------------------------------------------
import re
from WebappConfig.debug import OUT
# ========================================================================
# Handler for File Types
# ------------------------------------------------------------------------
class FileType:
'''
A helper class to determine file and directory types.
The file type is determined based on two initial lists:
- a list of all files and directories owned by the config user
- a list of all files and directories owned by the server user
'''
def __init__(self,
config_owned,
server_owned,
server_owned_r,
virtual_files = 'virtual',
default_dirs = 'default-owned'):
'''
Populates the cache with the file types as provided by the
ebuild.
'''
self.__cache = {}
# Validity of entries are checked by the command line parser
self.__virtual_files = virtual_files
self.__default_dirs = default_dirs
# populate cache
for i in config_owned:
OUT.debug('Adding config-owned file', 8)
self.__cache[self.__fix(i)] = 'config-owned'
for i in server_owned:
if self.__fix(i) in list(self.__cache.keys()):
OUT.debug('Adding config-server-owned file', 8)
self.__cache[self.__fix(i)] = 'config-server-owned'
else:
OUT.debug('Adding server-owned file', 8)
self.__cache[self.__fix(i)] = 'server-owned'
for i in server_owned_r:
if self.__fix(i) in self.__cache.keys():
OUT.die('{} is a the same time recursively server-owned and {}: This case is not supported.'.format(self.__fix(i), self.__cache[self.__fix(i)]))
else :
OUT.debug('Adding recursively server-owned file', 8)
self.__cache[self.__fix(i).strip()] = 'server-owned-dir'
def filetype(self, filename, parent_type = ''):
'''
Inputs:
filename - the file that we need a decision about
parent_type - the type of the parent directory
returns one of these:
server-owned - file needs to be owned by the webserver user
(and needs to be a local copy)
config-owned - file needs to be owned by the config user
(and needs to be a local copy)
config-server-owned - Both the previous cases at the same time
virtual - we do not need a local copy of the file
NOTE:
Use get_dirtype(directory) for directories
NOTE:
the user can use --virtual-files on the command-line to change
what type virtual files are really reported as
'''
# remove any whitespace and trailing /
filename = self.__fix(filename)
# look for config-protected files in the cache
if filename in list(self.__cache.keys()):
# Check if parent type is recursive
if parent_type == 'server-owned-dir':
new_type = self.__cache[filename]
if new_type == 'config-owned':
return 'config-server-owned'
if new_type == 'server-owned':
OUT.warn('Configuration error: {} is marked server-owned twice'.format(filename))
return 'server-owned'
return self.__cache[filename]
# Check if parent type is recursive
if parent_type == 'server-owned-dir':
return 'server-owned'
# unspecified file (and thus virtual)
return self.__virtual_files
def dirtype(self, directory, parent_type = ''):
'''
Inputs:
directory - the directory that we need a decision about
parent_type - the type of the parent directory
returns one of these:
server-owned - dir needs to be owned by the webserver user
config-owned - dir needs to be owned by the config user
config-server-owned - Both the previous cases at the same time
server-owned-dir - Directory that contains file/dirs to be owned
by the webserver user
default-owned - we need a local copy, owned by root
NOTE:
Use get_filetype(filename) for files
NOTE:
the user can use --default-dirs on the command-line to change
what type default directories are really reported as
'''
# remove any whitespace and trailing /
directory = self.__fix(directory)
# check the cache
if directory in list(self.__cache.keys()):
# Check if parent type is recursive
if parent_type == 'server-owned-dir':
new_type = self.__cache[directory]
if new_type == 'config-owned':
OUT.die('This version does not support config dirs')
if new_type == server-owned:
OUT.warn('Configuration error: {} is marked server-owned two times'.format(filename))
return 'server-owned-dir'
return self.__cache[directory]
# Check if parent type is recursive
if parent_type == 'server-owned-dir':
return 'server-owned-dir'
# unspecified directories are default-owned
return self.__default_dirs
def __fix(self, filename):
''' Removes trailing slash and whitespace from a path '''
filename = filename.strip()
while filename[-1] == '/':
filename = filename[:-1]
# Fix double slashes
filename = re.compile('/+').sub('/', filename)
return filename
| gpl-2.0 |
GeotrekCE/Geotrek-admin | geotrek/trekking/migrations/0005_auto_20181219_1524.py | 2 | 1413 | # Generated by Django 1.11.14 on 2018-12-19 14:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trekking', '0004_auto_20181127_1551'),
]
operations = [
migrations.RunSQL(
"DROP VIEW IF EXISTS o_v_poi, o_v_itineraire;",
migrations.RunSQL.noop
),
migrations.AlterField(
model_name='poi',
name='eid',
field=models.CharField(blank=True, db_column='id_externe', max_length=1024, null=True, verbose_name='External id'),
),
migrations.AlterField(
model_name='service',
name='eid',
field=models.CharField(blank=True, db_column='id_externe', max_length=1024, null=True, verbose_name='External id'),
),
migrations.AlterField(
model_name='trek',
name='eid',
field=models.CharField(blank=True, db_column='id_externe', max_length=1024, null=True, verbose_name='External id'),
),
migrations.AlterField(
model_name='trek',
name='eid2',
field=models.CharField(blank=True, db_column='id_externe2', max_length=1024, null=True, verbose_name='Second external id'),
),
migrations.RunSQL(
migrations.RunSQL.noop,
"DROP VIEW IF EXISTS o_v_poi, o_v_itineraire;"
),
]
| bsd-2-clause |
asajeffrey/servo | tests/wpt/web-platform-tests/webdriver/tests/refresh/user_prompts.py | 26 | 4131 | # META: timeout=long
import pytest
from webdriver.error import StaleElementReferenceException
from tests.support.inline import inline
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
def refresh(session):
return session.transport.send(
"POST", "session/{session_id}/refresh".format(**vars(session)))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<div id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = refresh(session)
assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
with pytest.raises(StaleElementReferenceException):
element.property("id")
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<div id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = refresh(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert element.property("id") == "foo"
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<div id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = refresh(session)
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert element.property("id") == "foo"
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_accept(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window has been reloaded
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window has been reloaded
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 |
borisroman/vdsm | contrib/ivdsm.py | 5 | 1711 | #
# Copyright 2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
import pwd
import logging
import threading
import sys
def runVdsm(baseDir="/usr/share/vdsm/", configFilePath="/etc/vdsm/vdsm.conf",
loggerConfigurationPath='/etc/vdsm/logger.conf'):
"""
Start a VDSM instance in a new thread.
Return a tuple ``(ClientIF, Thread Running VDSM)``
"""
if pwd.getpwuid(os.geteuid())[0] != "vdsm":
raise Exception("You can't run vdsm with any user other then 'vdsm'.")
sys.path.append(baseDir)
from vdsm.config import config
from logging import config as lconfig
import clientIF
loggerConfFile = loggerConfigurationPath
lconfig.fileConfig(loggerConfFile, disable_existing_loggers=False)
log = logging.getLogger('vds')
config.read(configFilePath)
cif = clientIF.clientIF(log)
t = threading.Thread(target=cif.serve)
t.setDaemon(True)
t.start()
return (cif, t)
| gpl-2.0 |
jymannob/Sick-Beard | lib/imdb/parser/sql/alchemyadapter.py | 57 | 19295 | """
parser.sql.alchemyadapter module (imdb.parser.sql package).
This module adapts the SQLAlchemy ORM to the internal mechanism.
Copyright 2008-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import sys
import logging
from sqlalchemy import *
from sqlalchemy import schema
try: from sqlalchemy import exc # 0.5
except ImportError: from sqlalchemy import exceptions as exc # 0.4
_alchemy_logger = logging.getLogger('imdbpy.parser.sql.alchemy')
try:
import migrate.changeset
HAS_MC = True
except ImportError:
HAS_MC = False
_alchemy_logger.warn('Unable to import migrate.changeset: Foreign ' \
'Keys will not be created.')
from imdb._exceptions import IMDbDataAccessError
from dbschema import *
# Used to convert table and column names.
re_upper = re.compile(r'([A-Z])')
# XXX: I'm not sure at all that this is the best method to connect
# to the database and bind that connection to every table.
metadata = MetaData()
# Maps our placeholders to SQLAlchemy's column types.
MAP_COLS = {
INTCOL: Integer,
UNICODECOL: UnicodeText,
STRINGCOL: String
}
class NotFoundError(IMDbDataAccessError):
"""Exception raised when Table.get(id) returns no value."""
pass
def _renameTable(tname):
"""Build the name of a table, as done by SQLObject."""
tname = re_upper.sub(r'_\1', tname)
if tname.startswith('_'):
tname = tname[1:]
return tname.lower()
def _renameColumn(cname):
"""Build the name of a column, as done by SQLObject."""
cname = cname.replace('ID', 'Id')
return _renameTable(cname)
class DNNameObj(object):
"""Used to access table.sqlmeta.columns[column].dbName (a string)."""
def __init__(self, dbName):
self.dbName = dbName
def __repr__(self):
return '<DNNameObj(dbName=%s) [id=%s]>' % (self.dbName, id(self))
class DNNameDict(object):
"""Used to access table.sqlmeta.columns (a dictionary)."""
def __init__(self, colMap):
self.colMap = colMap
def __getitem__(self, key):
return DNNameObj(self.colMap[key])
def __repr__(self):
return '<DNNameDict(colMap=%s) [id=%s]>' % (self.colMap, id(self))
class SQLMetaAdapter(object):
"""Used to access table.sqlmeta (an object with .table, .columns and
.idName attributes)."""
def __init__(self, table, colMap=None):
self.table = table
if colMap is None:
colMap = {}
self.colMap = colMap
def __getattr__(self, name):
if name == 'table':
return getattr(self.table, name)
if name == 'columns':
return DNNameDict(self.colMap)
if name == 'idName':
return self.colMap.get('id', 'id')
return None
def __repr__(self):
return '<SQLMetaAdapter(table=%s, colMap=%s) [id=%s]>' % \
(repr(self.table), repr(self.colMap), id(self))
class QAdapter(object):
"""Used to access table.q attribute (remapped to SQLAlchemy table.c)."""
def __init__(self, table, colMap=None):
self.table = table
if colMap is None:
colMap = {}
self.colMap = colMap
def __getattr__(self, name):
try: return getattr(self.table.c, self.colMap[name])
except KeyError, e: raise AttributeError("unable to get '%s'" % name)
def __repr__(self):
return '<QAdapter(table=%s, colMap=%s) [id=%s]>' % \
(repr(self.table), repr(self.colMap), id(self))
class RowAdapter(object):
"""Adapter for a SQLAlchemy RowProxy object."""
def __init__(self, row, table, colMap=None):
self.row = row
# FIXME: it's OBSCENE that 'table' should be passed from
# TableAdapter through ResultAdapter only to land here,
# where it's used to directly update a row item.
self.table = table
if colMap is None:
colMap = {}
self.colMap = colMap
self.colMapKeys = colMap.keys()
def __getattr__(self, name):
try: return getattr(self.row, self.colMap[name])
except KeyError, e: raise AttributeError("unable to get '%s'" % name)
def __setattr__(self, name, value):
# FIXME: I can't even think about how much performances suffer,
# for this horrible hack (and it's used so rarely...)
# For sure something like a "property" to map column names
# to getter/setter functions would be much better, but it's
# not possible (or at least not easy) to build them for a
# single instance.
if name in self.__dict__.get('colMapKeys', ()):
# Trying to update a value in the database.
row = self.__dict__['row']
table = self.__dict__['table']
colMap = self.__dict__['colMap']
params = {colMap[name]: value}
table.update(table.c.id==row.id).execute(**params)
# XXX: minor bug: after a value is assigned with the
# 'rowAdapterInstance.colName = value' syntax, for some
# reason rowAdapterInstance.colName still returns the
# previous value (even if the database is updated).
# Fix it? I'm not even sure it's ever used.
return
# For every other attribute.
object.__setattr__(self, name, value)
def __repr__(self):
return '<RowAdapter(row=%s, table=%s, colMap=%s) [id=%s]>' % \
(repr(self.row), repr(self.table), repr(self.colMap), id(self))
class ResultAdapter(object):
"""Adapter for a SQLAlchemy ResultProxy object."""
def __init__(self, result, table, colMap=None):
self.result = result
self.table = table
if colMap is None:
colMap = {}
self.colMap = colMap
def count(self):
return len(self)
def __len__(self):
# FIXME: why sqlite returns -1? (that's wrooong!)
if self.result.rowcount == -1:
return 0
return self.result.rowcount
def __getitem__(self, key):
res = list(self.result)[key]
if not isinstance(key, slice):
# A single item.
return RowAdapter(res, self.table, colMap=self.colMap)
else:
# A (possible empty) list of items.
return [RowAdapter(x, self.table, colMap=self.colMap)
for x in res]
def __iter__(self):
for item in self.result:
yield RowAdapter(item, self.table, colMap=self.colMap)
def __repr__(self):
return '<ResultAdapter(result=%s, table=%s, colMap=%s) [id=%s]>' % \
(repr(self.result), repr(self.table),
repr(self.colMap), id(self))
class TableAdapter(object):
"""Adapter for a SQLAlchemy Table object, to mimic a SQLObject class."""
def __init__(self, table, uri=None):
"""Initialize a TableAdapter object."""
self._imdbpySchema = table
self._imdbpyName = table.name
self.connectionURI = uri
self.colMap = {}
columns = []
for col in table.cols:
# Column's paramters.
params = {'nullable': True}
params.update(col.params)
if col.name == 'id':
params['primary_key'] = True
if 'notNone' in params:
params['nullable'] = not params['notNone']
del params['notNone']
cname = _renameColumn(col.name)
self.colMap[col.name] = cname
colClass = MAP_COLS[col.kind]
colKindParams = {}
if 'length' in params:
colKindParams['length'] = params['length']
del params['length']
elif colClass is UnicodeText and col.index:
# XXX: limit length for UNICODECOLs that will have an index.
# this can result in name.name and title.title truncations!
colClass = Unicode
# Should work for most of the database servers.
length = 511
if self.connectionURI:
if self.connectionURI.startswith('mysql'):
# To stay compatible with MySQL 4.x.
length = 255
colKindParams['length'] = length
elif self._imdbpyName == 'PersonInfo' and col.name == 'info':
if self.connectionURI:
if self.connectionURI.startswith('ibm'):
# There are some entries longer than 32KB.
colClass = CLOB
# I really do hope that this space isn't wasted
# for each other shorter entry... <g>
colKindParams['length'] = 68*1024
colKind = colClass(**colKindParams)
if 'alternateID' in params:
# There's no need to handle them here.
del params['alternateID']
# Create a column.
colObj = Column(cname, colKind, **params)
columns.append(colObj)
self.tableName = _renameTable(table.name)
# Create the table.
self.table = Table(self.tableName, metadata, *columns)
self._ta_insert = self.table.insert()
self._ta_select = self.table.select
# Adapters for special attributes.
self.q = QAdapter(self.table, colMap=self.colMap)
self.sqlmeta = SQLMetaAdapter(self.table, colMap=self.colMap)
def select(self, conditions=None):
"""Return a list of results."""
result = self._ta_select(conditions).execute()
return ResultAdapter(result, self.table, colMap=self.colMap)
def get(self, theID):
"""Get an object given its ID."""
result = self.select(self.table.c.id == theID)
#if not result:
# raise NotFoundError, 'no data for ID %s' % theID
# FIXME: isn't this a bit risky? We can't check len(result),
# because sqlite returns -1...
# What about converting it to a list and getting the first item?
try:
return result[0]
except KeyError:
raise NotFoundError('no data for ID %s' % theID)
def dropTable(self, checkfirst=True):
"""Drop the table."""
dropParams = {'checkfirst': checkfirst}
# Guess what? Another work-around for a ibm_db bug.
if self.table.bind.engine.url.drivername.startswith('ibm_db'):
del dropParams['checkfirst']
try:
self.table.drop(**dropParams)
except exc.ProgrammingError:
# As above: re-raise the exception, but only if it's not ibm_db.
if not self.table.bind.engine.url.drivername.startswith('ibm_db'):
raise
def createTable(self, checkfirst=True):
"""Create the table."""
self.table.create(checkfirst=checkfirst)
# Create indexes for alternateID columns (other indexes will be
# created later, at explicit request for performances reasons).
for col in self._imdbpySchema.cols:
if col.name == 'id':
continue
if col.params.get('alternateID', False):
self._createIndex(col, checkfirst=checkfirst)
def _createIndex(self, col, checkfirst=True):
"""Create an index for a given (schema) column."""
# XXX: indexLen is ignored in SQLAlchemy, and that means that
# indexes will be over the whole 255 chars strings...
# NOTE: don't use a dot as a separator, or DB2 will do
# nasty things.
idx_name = '%s_%s' % (self.table.name, col.index or col.name)
if checkfirst:
for index in self.table.indexes:
if index.name == idx_name:
return
idx = Index(idx_name, getattr(self.table.c, self.colMap[col.name]))
# XXX: beware that exc.OperationalError can be raised, is some
# strange circumstances; that's why the index name doesn't
# follow the SQLObject convention, but includes the table name:
# sqlite, for example, expects index names to be unique at
# db-level.
try:
idx.create()
except exc.OperationalError, e:
_alchemy_logger.warn('Skipping creation of the %s.%s index: %s' %
(self.sqlmeta.table, col.name, e))
def addIndexes(self, ifNotExists=True):
"""Create all required indexes."""
for col in self._imdbpySchema.cols:
if col.index:
self._createIndex(col, checkfirst=ifNotExists)
def addForeignKeys(self, mapTables, ifNotExists=True):
"""Create all required foreign keys."""
if not HAS_MC:
return
# It seems that there's no reason to prevent the creation of
# indexes for columns with FK constrains: if there's already
# an index, the FK index is not created.
countCols = 0
for col in self._imdbpySchema.cols:
countCols += 1
if not col.foreignKey:
continue
fks = col.foreignKey.split('.', 1)
foreignTableName = fks[0]
if len(fks) == 2:
foreignColName = fks[1]
else:
foreignColName = 'id'
foreignColName = mapTables[foreignTableName].colMap.get(
foreignColName, foreignColName)
thisColName = self.colMap.get(col.name, col.name)
thisCol = self.table.columns[thisColName]
foreignTable = mapTables[foreignTableName].table
foreignCol = getattr(foreignTable.c, foreignColName)
# Need to explicitly set an unique name, otherwise it will
# explode, if two cols points to the same table.
fkName = 'fk_%s_%s_%d' % (foreignTable.name, foreignColName,
countCols)
constrain = migrate.changeset.ForeignKeyConstraint([thisCol],
[foreignCol],
name=fkName)
try:
constrain.create()
except exc.OperationalError:
continue
def __call__(self, *args, **kwds):
"""To insert a new row with the syntax: TableClass(key=value, ...)"""
taArgs = {}
for key, value in kwds.items():
taArgs[self.colMap.get(key, key)] = value
self._ta_insert.execute(*args, **taArgs)
def __repr__(self):
return '<TableAdapter(table=%s) [id=%s]>' % (repr(self.table), id(self))
# Module-level "cache" for SQLObject classes, to prevent
# "Table 'tableName' is already defined for this MetaData instance" errors,
# when two or more connections to the database are made.
# XXX: is this the best way to act?
TABLES_REPOSITORY = {}
def getDBTables(uri=None):
"""Return a list of TableAdapter objects to be used to access the
database through the SQLAlchemy ORM. The connection uri is optional, and
can be used to tailor the db schema to specific needs."""
DB_TABLES = []
for table in DB_SCHEMA:
if table.name in TABLES_REPOSITORY:
DB_TABLES.append(TABLES_REPOSITORY[table.name])
continue
tableAdapter = TableAdapter(table, uri)
DB_TABLES.append(tableAdapter)
TABLES_REPOSITORY[table.name] = tableAdapter
return DB_TABLES
# Functions used to emulate SQLObject's logical operators.
def AND(*params):
"""Emulate SQLObject's AND."""
return and_(*params)
def OR(*params):
"""Emulate SQLObject's OR."""
return or_(*params)
def IN(item, inList):
"""Emulate SQLObject's IN."""
if not isinstance(item, schema.Column):
return OR(*[x == item for x in inList])
else:
return item.in_(inList)
def ISNULL(x):
"""Emulate SQLObject's ISNULL."""
# XXX: Should we use null()? Can null() be a global instance?
# XXX: Is it safe to test None with the == operator, in this case?
return x == None
def ISNOTNULL(x):
"""Emulate SQLObject's ISNOTNULL."""
return x != None
def CONTAINSSTRING(expr, pattern):
"""Emulate SQLObject's CONTAINSSTRING."""
return expr.like('%%%s%%' % pattern)
def toUTF8(s):
"""For some strange reason, sometimes SQLObject wants utf8 strings
instead of unicode; with SQLAlchemy we just return the unicode text."""
return s
class _AlchemyConnection(object):
"""A proxy for the connection object, required since _ConnectionFairy
uses __slots__."""
def __init__(self, conn):
self.conn = conn
def __getattr__(self, name):
return getattr(self.conn, name)
def setConnection(uri, tables, encoding='utf8', debug=False):
"""Set connection for every table."""
# FIXME: why on earth MySQL requires an additional parameter,
# is well beyond my understanding...
if uri.startswith('mysql'):
if '?' in uri:
uri += '&'
else:
uri += '?'
uri += 'charset=%s' % encoding
params = {'encoding': encoding}
if debug:
params['echo'] = True
if uri.startswith('ibm_db'):
# Try to work-around a possible bug of the ibm_db DB2 driver.
params['convert_unicode'] = True
# XXX: is this the best way to connect?
engine = create_engine(uri, **params)
metadata.bind = engine
eng_conn = engine.connect()
if uri.startswith('sqlite'):
major = sys.version_info[0]
minor = sys.version_info[1]
if major > 2 or (major == 2 and minor > 5):
eng_conn.connection.connection.text_factory = str
# XXX: OH MY, THAT'S A MESS!
# We need to return a "connection" object, with the .dbName
# attribute set to the db engine name (e.g. "mysql"), .paramstyle
# set to the style of the paramters for query() calls, and the
# .module attribute set to a module (?) with .OperationalError and
# .IntegrityError attributes.
# Another attribute of "connection" is the getConnection() function,
# used to return an object with a .cursor() method.
connection = _AlchemyConnection(eng_conn.connection)
paramstyle = eng_conn.dialect.paramstyle
connection.module = eng_conn.dialect.dbapi
connection.paramstyle = paramstyle
connection.getConnection = lambda: connection.connection
connection.dbName = engine.url.drivername
return connection
| gpl-3.0 |
jasminelou/ggame-tutorials | ggame/pygamedeps.py | 228 | 12266 | def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
# PYTHON 3 and PYGAME DEPENDENCIES
if module_exists('pygame'):
import pygame
class _body(object):
def __init__(self):
self.events = {}
def appendChild(self, obj):
self.child = obj
def bind(self, evt, action):
self.events[evt] = action
print("Binding {} to {}".format(evt, action))
class _document(object):
def __init__(self):
self.body = _body()
class _window(object):
def __init__(self):
self.document = _document()
self.animatex = 0
def open(self, s1, s2):
return self
def requestAnimationFrame(self, target):
if self.animatex < 10:
self.animatex += 1
target('dummy')
print("Animation frame")
class _Container(object):
def __init__(self):
pass
def destroy(self):
pass
class _Renderer(object):
def __init__(self, x, y, argsdict):
self.x = x
self.y = y
self.argsdict = argsdict
self.view = 'view'
print("Rendering created with {}x{} area".format(x, y))
def render(self, stage):
pass
class _GFX(object):
def __init__(self):
self.Container = _Container
self.autoDetectRenderer = _Renderer
window = _window()
GFX = _GFX()
#document = object()
def JSConstructor(cls):
return cls
def JSObject(obj):
return obj
class _GFX_Rectangle(pygame.Rect):
pass
GFX_Rectangle = _GFX_Rectangle
class _Texture(object):
def __init__(self, img='', crossdomain=False):
self.name = img
if not img == '':
self.img = pygame.image.load(img) # pygame surface
self.basewidth = self.img.get_width()
self.baseheight = self.img.get_height()
self.width = self.basewidth
self.height = self.baseheight
print("Texture from image {}, {}x{} pixels".format(img, self.basewidth, self.baseheight))
self.baserect = _GFX_Rectangle(0, 0, self.basewidth, self.baseheight)
self.framerect = self.baserect
@classmethod
def fromTexture(cls, texture, frame):
inst = cls()
inst.img = pygame.Surface((frame.width, frame.height))
inst.img.blit(texture.img, (0,0), frame)
inst.name = texture.name
inst.basewidth = texture.basewidth
inst.baseheight = texture.baseheight
inst.baserect = texture.baserect
inst.framerect = frame
inst.width = frame.width
inst.height = frame.height
print("Texture from base texture {}, {}x{} subframe {}x{}".format(inst.name, inst.basewidth, inst.baseheight, inst.framerect.width, inst.framerect.height))
return inst
def destroy(self):
try:
self.img.close()
print("Destroying an image")
except:
print("Destroying a non-image")
GFX_Texture = _Texture.fromTexture
GFX_Texture_fromImage = _Texture
class vector(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise KeyError
def __setitem(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise KeyError
class GFX_Sprite(object):
def __init__(self, texture):
self.basetexture = texture
self.texture = self.basetexture
self.visible = True
self.pos = vector(0,0)
self.anch = vector(0,0)
self.scal = vector(1.0, 1.0)
self.width = texture.width
self.height = texture.height
self.rotation = 0.0
@property
def position(self):
return self.pos
@position.setter
def position(self, value):
self.pos.x = value[0]
self.pos.y = value[1]
@property
def anchor(self):
return self.anch
@anchor.setter
def anchor(self, value):
self.anch.x = value[0]
self.anch.y = value[1]
@property
def scale(self):
return self.scal
@scale.setter
def scale(self, value):
self.scal.x = value[0]
self.scal.y = value[1]
def destroy(self):
pass
class _GFX_Graphics(object):
def __init__(self):
self.clear()
def clear(self):
self.cleared = True
self.visible = True
self.lwidth = None
self.color = None
self.alpha = None
self.fillcolor = None
self.fillalpha = None
self.x = None
self.y = None
self.rwidth = None
self.rheight = None
self.radius = None
self.ehw = None
self.ehh = None
self.xto = None
self.yto = None
self.jpath = None
self.width = None
self.height = None
self.position = vector(0,0)
def destroy(self):
self.clear()
def clone(self):
clone = type(self)()
clone.cleared = self.cleared
clone.visible = self.visible
clone.lwidth = self.lwidth
clone.color = self.color
clone.alpha = self.alpha
clone.fillalpha = self.fillalpha
clone.fillcolor = self.fillcolor
clone.x = self.x
clone.y = self.y
clone.rwidth = self.rwidth
clone.rheight = self.rheight
clone.radius = self.radius
clone.ehw = self.ehw
clone.ehh = self.ehh
clone.xto = self.xto
clone.yto = self.yto
clone.jpath = self.jpath
clone.width = self.width
clone.height = self.height
clone.position = self.position
return clone
def lineStyle(self, width, color, alpha):
self.width = width
self.color = color
self.alpha = alpha
def beginFill(self, color, alpha):
self.fillcolor = color
self.fillalpha = alpha
def drawRect(self, x, y, w, h):
self.x = x
self.y = y
self.position = vector(x,y)
self.rwidth = w
self.rheight = h
self.width = w
self.height = h
self.cleared = False
print("Rectangle {}x{} at {},{}".format(w,h,x,y))
return self
def drawCircle(self, x, y, radius):
self.x = x
self.y = y
self.position = vector(x,y)
self.radius = radius
self.cleared = False
self.width = radius*2
self.height = radius*2
print("Circle, radius {} at {},{}".format(radius,x,y))
return self
def drawEllipse(self, x, y, hw, hh):
self.x = x
self.y = y
self.position = vector(x,y)
self.ehw = hw
self.ehh = hh
self.width = hw*2
self.height = hh*2
self.cleared = False
print("Ellipse, {}x{} at {},{}".format(hw,hh,x,y))
return self
def drawPolygon(self, jpath):
self.jpath = jpath
self.cleared = False
self.position = vector(jpath[0],jpath[1])
x = []
y = []
for i in range(0,len(jpath)-1,2):
x.append(jpath[i])
y.append(jpath[i+1])
self.width = max(x)-min(x)
self.height = max(y)-min(y)
print("Polygon")
return self
def moveTo(self, x, y):
self.x = x
self.y = y
self.position = vector(x,y)
return self
def lineTo(self, x, y):
self.xto = x
self.yto = y
self.width = abs(x)
self.height = abs(y)
self.cleared = False
print("Line from {},{} to {},{}".format(self.x, self.y, x, y))
return self
class _GFX_Text(object):
def __init__(self, text, styledict):
self.text = text
self.styledict = styledict
self.alpha = None
self.visible = None
self.width = 99
self.height = 99
self.position = vector(0,0)
print("Text: {} in {}".format(text, styledict['font']))
def clone(self):
clone = type(self)(self.text, self.styledict)
return clone
def destroy(self):
self.text = ''
GFX_Text = _GFX_Text
_globalGraphics = _GFX_Graphics()
GFX_Graphics = _globalGraphics
def GFX_DetectRenderer():
pass
class _SND_all(object):
def __init__(self):
pass
def stop(self):
print("Stopping all sounds")
class _SND(object):
def __init__(self):
self.all = _SND_all
SND = _SND()
class _SND_Sound(object):
def __init__(self, url):
self.url = url
print("Creating sound object {}".format(url))
def load(self):
pass
def play(self):
print("Playing sound object {}".format(self.url))
SND_Sound = _SND_Sound
class HwEvent(object):
evtmap = {2: 'keydown', 3: 'keyup', 4: 'mousemove', 5: 'mousedown', 6: 'mouseup'}
keymap = {304:16,
303:16,
306:17,
308:18,
301:20,
276:37,
273:38,
275:39,
274:40,
97:65,
98:66,
99:67,
100:68,
101:69,
102:70,
103:71,
104:72,
105:73,
106:74,
107:75,
108:76,
109:77,
110:78,
111:79,
112:80,
113:81,
114:82,
115:83,
116:84,
117:85,
118:86,
119:87,
120:88,
121:89,
122:90,
282:112,
283:113,
284:114,
285:115,
286:116,
287:117,
288:118,
289:119,
290:120,
291:121,
292:122,
293:123,
59:186,
61:187,
44:188,
46:190,
45:189,
47:191,
96:192,
92:220,
91:219,
93:221,
39:222}
def __init__(self, pevent):
self.type = HwEvent.evtmap.get(pevent.type, None)
if self.type in ['keydown', 'keyup']:
self.keyCode = HwEvent.keymap.get(pevent.key, pevent.key)
elif self.type in ['mousemove', 'mousedown', 'mouseup']:
self.wheelDelta = 0
if self.type != 'mousemove' and pevent.button == 5:
if self.type == 'mousedown':
self.wheelDelta = 1
else:
self.wheelDelta = -1
self.clientX = pevent.pos[0]
self.clientY = pevent.pos[1]
class GFX_Window(object):
def __init__(self, width, height, onclose):
pygame.init()
self._w = pygame.display.set_mode((width, height))
self.clock = pygame.time.Clock()
self.sprites = []
self.animatestarted = False
self.bindings = {}
self.onclose = onclose
self.stop = False
#self._w = window.open("", "")
#self._stage = JSConstructor(GFX.Container)()
#self._renderer = GFX.autoDetectRenderer(width, height, {'transparent':True})
#self._w.document.body.appendChild(self._renderer.view)
#self._w.onunload = onclose
def bind(self, evtspec, callback):
self.bindings[evtspec] = callback
def add(self, obj):
self.sprites.append(obj)
#self._stage.addChild(obj)
def remove(self, obj):
self.sprites.remove(obj)
#self._stage.removeChild(obj)
def animate(self, stepcallback):
# do stuff required to display
self._w.fill(pygame.Color('white'))
for s in self.sprites:
self._w.blit(s.texture.img, (s.pos.x, s.pos.y))
pygame.display.flip()
events = pygame.event.get()
for event in events:
hwevent = HwEvent(event)
if hwevent.type != None:
self.bindings[hwevent.type](hwevent)
if event.type == 12:
print("Close!")
self.onclose()
self.destroy()
self.stop = True
if not self.animatestarted:
self.animatestarted = True
while not self.stop:
self.clock.tick_busy_loop(30)
stepcallback(0)
#self._renderer.render(self._stage)
#self._w.requestAnimationFrame(stepcallback)
def destroy(self):
pass
#SND.all().stop()
#self._stage.destroy()
| mit |
66maintainer/66coin | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
saadasad/droid | Media.py | 1 | 2157 |
def menu():
from ftplib import FTP
import os
import time
from pygame import mixer
import sched
ftp = FTP()
choice = ''
while(choice!='0'):
print('1. Music')
print('2. Take a Picture')
print('3. Text To Speech')
print('0. To Exit')
choice = input('Enter Choice : ')
if(choice == '1'):
print('All Music Files')
ftp.connect('192.168.43.1', 2121)
ftp.login(user='user', passwd='password')
ftp.cwd('/storage/emulated/0/com.hipipal.qpyplus/project/Music/')
data= []
files = ftp.nlst()
counter=1
for f in files:
print(counter,':',end='')
print(f)
counter = counter + 1
ftp.close()
choice = input('Enter Music File Number :')
index = int(choice)-1
print(index)
file = open('action.txt', 'w')
file.writelines('getMusic' + '\n')
musicNumber = files[index]
path = 'D:\Computer Science\Semester 4\Data Structures & Algorithms\Mobile Simulator Project\\' + musicNumber
print(path)
file.writelines(musicNumber + '\n')
file.close()
ftp.connect('192.168.43.1', 2121)
ftp.login(user='user', passwd='password')
ftp.cwd('/storage/emulated/0/com.hipipal.qpyplus/project/')
ftp.storbinary('STOR action.txt', open('action.txt', 'rb'))
path = musicNumber
print(path)
time.sleep(15)
mixer.init()
choice = ''
while(choice!=0):
choice = input('1. To Play and 2. To Pause and 0. To Exit: ')
if(choice=='1'):
mixer.music.load(musicNumber)
mixer.music.play()
elif(choice=='2'):
mixer.music.pause()
elif(choice == '0'):
break
ftp.close()
elif( choice == '2'):
print('Picture Taken!')
elif( choice == '3'):
speak=input('Enter Your Message : ')
file = open('action.txt', 'w')
file.writelines('speak' + '\n')
file.writelines(speak + '\n')
file.close()
ftp.connect('192.168.43.1', 2121)
ftp.login(user='user', passwd='password')
ftp.cwd('/storage/emulated/0/com.hipipal.qpyplus/project/')
ftp.storbinary('STOR action.txt', open('action.txt', 'rb'))
ftp.close()
break
print('1. Messages 2. Contacts')
print('3. Phone 4. Media')
print('0. Back ') | gpl-3.0 |
m-tmatma/svnmailer | src/lib/svnmailer/notifier/stdout.py | 1 | 3635 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0201
# pylint-version = 0.7.0
#
# Copyright 2004-2005 André Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Stdout notifier - mostly for debugging purposes
"""
__author__ = "André Malo"
__docformat__ = "restructuredtext en"
__all__ = ['getNotifier']
# global imports
import sys
from svnmailer.notifier import _text
def getNotifier(config, groupset):
""" Returns an initialized notifier or nothing
:Parameters:
- `config`: The svnmailer config
- `groupset`: The groupset to process
:Types:
- `config`: `svnmailer.settings._base.BaseSettings`
- `groupset`: ``list``
:return: The list of notifiers (containing 0 or 1 member)
:rtype: ``list``
"""
return [StdoutNotifier(config, groupset)]
class StdoutNotifier(_text.TextNotifier):
""" Writes all stuff to sys.stdout
:ivar _fp: alternate file like object (for testing purposes)
:type _fp: ``file``
"""
__implements__ = [_text.TextNotifier]
_fp = None
def run(self):
""" Print the notification to stdout """
from svnmailer import stream
fp = self._fp or sys.stdout
fp = stream.UnicodeStream(fp, out_enc = self._getOutputEnc(fp))
groups = self._groupset.groups
self.fp = fp
self._writePreamble(groups)
self._writeNotification()
def _writeNotification(self):
""" Writes the whole diff notification body """
from svnmailer.settings import MODES
mode = self._settings.runtime.mode
if mode == MODES.commit:
self.writeMetaData()
self.writePathList()
self.writeDiffList()
elif mode == MODES.propchange:
self.writeRevPropData()
elif mode in (MODES.lock, MODES.unlock):
self.writeLockData()
else:
raise AssertionError("Unknown runtime.mode %r" % (mode,))
def _writePreamble(self, groups):
""" Writes the stdout preamble for the selected groups
:param groups: The groups that are notified
:type groups: ``list``
"""
self.fp.write(
">>> Notification for the following group%s:\n %s\n\n" %
(["", "s"][len(groups) > 1],
",\n ".join(["[%s]" % group._name for group in groups]))
)
def _getOutputEnc(self, fp):
""" Returns the "proper" output encoding
If the output goes to a terminal, the method tries to get
the current locale encoding. UTF-8 is default and fallback
if anything fails.
:param fp: The file object written to
:type fp: ``file``
:return: The chosen encoding
:rtype: ``str``
"""
import os
enc = "utf-8"
try:
isatty = os.isatty(fp.fileno())
except AttributeError:
isatty = False
if isatty:
import locale
enc = locale.getpreferredencoding() or enc
return enc
| apache-2.0 |
subhrm/google-code-jam-solutions | solutions/2018/1B/A/a.py | 1 | 1033 | # Problem a
def solve(n, l, ip):
assert n < 30, "n should be less than 300"
s = sum(ip)
rem = n - s
pc = [round_pc(v / n) for v in ip]
res = sum(pc)
if rem == 0:
return res
extra = [round_pc(i / n) for i in range(rem + 1)]
for v, p in zip(ip, pc):
if rem == 0:
break
e = 1
while(True):
if round_pc((v + e) / n) > p:
break
e += 1
add = (round_pc((v + e) / n) - p)
if e <= rem:
if add >= extra[e]:
rem -= e
res += add
res += extra[rem]
return res
def round_pc(r):
# roundent percentage
r = int(r * 1000)
last_int = r % 10
res = r // 10
if last_int >= 5:
res += 1
return res
if __name__ == "__main__":
t = int(input())
for ti in range(1, t + 1):
n, l = map(int, input().split(" "))
ip = list(map(int, input().split(" ")))
print("Case #{}: {}".format(ti, solve(n, l, ip)))
| mit |
Creworker/FreeCAD | src/App/FreeCADInit.py | 9 | 9855 | # FreeCAD init module
# (c) 2001 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
# imports the one and only
import FreeCAD
def InitApplications():
try:
import sys,os
except ImportError:
FreeCAD.PrintError("\n\nSeems the python standard libs are not installed, bailing out!\n\n")
raise
# Checking on FreeCAD module path ++++++++++++++++++++++++++++++++++++++++++
ModDir = FreeCAD.getHomePath()+'Mod'
ModDir = os.path.realpath(ModDir)
BinDir = FreeCAD.getHomePath()+'bin'
BinDir = os.path.realpath(BinDir)
LibDir = FreeCAD.getHomePath()+'lib'
LibDir = os.path.realpath(LibDir)
AddPath = FreeCAD.ConfigGet("AdditionalModulePaths").split(";")
HomeMod = FreeCAD.ConfigGet("UserAppData")+"Mod"
HomeMod = os.path.realpath(HomeMod)
MacroDir = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Macro").GetString("MacroPath")
MacroMod = os.path.realpath(MacroDir+"/Mod")
#print FreeCAD.getHomePath()
if os.path.isdir(FreeCAD.getHomePath()+'src\\Tools'):
sys.path.append(FreeCAD.getHomePath()+'src\\Tools')
# Searching for module dirs +++++++++++++++++++++++++++++++++++++++++++++++++++
# Use dict to handle duplicated module names
ModDict = {}
if os.path.isdir(ModDir):
ModDirs = os.listdir(ModDir)
for i in ModDirs: ModDict[i.lower()] = os.path.join(ModDir,i)
else:
Wrn ("No modules found in " + ModDir + "\n")
# Search for additional modules in the home directory
if os.path.isdir(HomeMod):
HomeMods = os.listdir(HomeMod)
for i in HomeMods: ModDict[i.lower()] = os.path.join(HomeMod,i)
# Search for additional modules in the macro directory
if os.path.isdir(MacroMod):
MacroMods = os.listdir(MacroMod)
for i in MacroMods:
key = i.lower()
if key not in ModDict: ModDict[key] = os.path.join(MacroMod,i)
# Search for additional modules in command line
for i in AddPath:
if os.path.isdir(i): ModDict[i] = i
#AddModPaths = App.ParamGet("System parameter:AdditionalModulePaths")
#Err( AddModPaths)
# add also this path so that all modules search for libraries
# they depend on first here
PathExtension = BinDir + os.pathsep
# prepend all module paths to Python search path
Log('Init: Searching for modules...\n')
FreeCAD.__path__ = ModDict.values()
for Dir in ModDict.values():
if ((Dir != '') & (Dir != 'CVS') & (Dir != '__init__.py')):
sys.path.insert(0,Dir)
PathExtension += Dir + os.pathsep
InstallFile = os.path.join(Dir,"Init.py")
if (os.path.exists(InstallFile)):
try:
#execfile(InstallFile)
exec open(InstallFile).read()
except Exception, inst:
Log('Init: Initializing ' + Dir + '... failed\n')
Err('During initialization the error ' + str(inst) + ' occurred in ' + InstallFile + '\n')
else:
Log('Init: Initializing ' + Dir + '... done\n')
else:
Log('Init: Initializing ' + Dir + '(Init.py not found)... ignore\n')
sys.path.insert(0,LibDir)
sys.path.insert(0,ModDir)
Log("Using "+ModDir+" as module path!\n")
# new paths must be prepended to avoid to load a wrong version of a library
try:
os.environ["PATH"] = PathExtension + os.environ["PATH"]
except KeyError:
os.environ["PATH"] = PathExtension
path = os.environ["PATH"].split(os.pathsep)
Log("System path after init:\n")
for i in path:
Log(" " + i + "\n")
# add MacroDir to path (RFE #0000504)
sys.path.append(MacroDir)
# add special path for MacOSX (bug #0000307)
import platform
if len(platform.mac_ver()[0]) > 0:
sys.path.append(os.path.expanduser('~/Library/Application Support/FreeCAD/Mod'))
# some often used shortcuts (for lazy people like me ;-)
App = FreeCAD
Log = FreeCAD.Console.PrintLog
Msg = FreeCAD.Console.PrintMessage
Err = FreeCAD.Console.PrintError
Wrn = FreeCAD.Console.PrintWarning
Log ('Init: starting App::FreeCADInit.py\n')
# init every application by importing Init.py
InitApplications()
FreeCAD.addImportType("FreeCAD document (*.FCStd)","FreeCAD")
# set to no gui, is overwritten by InitGui
App.GuiUp = 0
# fill up unit definitions
App.Units.NanoMetre = App.Units.Quantity('nm')
App.Units.MicroMetre = App.Units.Quantity('um')
App.Units.MilliMetre = App.Units.Quantity('mm')
App.Units.CentiMetre = App.Units.Quantity('cm')
App.Units.DeciMetre = App.Units.Quantity('dm')
App.Units.Metre = App.Units.Quantity('m')
App.Units.KiloMetre = App.Units.Quantity('km')
App.Units.Liter = App.Units.Quantity('l')
App.Units.MicroGram = App.Units.Quantity('ug')
App.Units.MilliGram = App.Units.Quantity('mg')
App.Units.Gram = App.Units.Quantity('g')
App.Units.KiloGram = App.Units.Quantity('kg')
App.Units.Ton = App.Units.Quantity('t')
App.Units.Second = App.Units.Quantity('s')
App.Units.Minute = App.Units.Quantity('min')
App.Units.Hour = App.Units.Quantity('h')
App.Units.Ampere = App.Units.Quantity('A')
App.Units.MilliAmpere = App.Units.Quantity('mA')
App.Units.KiloAmpere = App.Units.Quantity('kA')
App.Units.MegaAmpere = App.Units.Quantity('MA')
App.Units.Kelvin = App.Units.Quantity('K')
App.Units.MilliKelvin = App.Units.Quantity('mK')
App.Units.MicroKelvin = App.Units.Quantity('uK')
App.Units.Mole = App.Units.Quantity('mol')
App.Units.Candela = App.Units.Quantity('cd')
App.Units.Inch = App.Units.Quantity('in')
App.Units.Foot = App.Units.Quantity('ft')
App.Units.Thou = App.Units.Quantity('thou')
App.Units.Yard = App.Units.Quantity('yd')
App.Units.Mile = App.Units.Quantity('mi')
App.Units.Pound = App.Units.Quantity('lb')
App.Units.Ounce = App.Units.Quantity('oz')
App.Units.Stone = App.Units.Quantity('st')
App.Units.Hundredweights= App.Units.Quantity('cwt')
App.Units.Newton = App.Units.Quantity('N')
App.Units.KiloNewton = App.Units.Quantity('kN')
App.Units.MegaNewton = App.Units.Quantity('MN')
App.Units.MilliNewton = App.Units.Quantity('mN')
App.Units.Pascal = App.Units.Quantity('Pa')
App.Units.KiloPascal = App.Units.Quantity('kPa')
App.Units.MegaPascal = App.Units.Quantity('MPa')
App.Units.GigaPascal = App.Units.Quantity('GPa')
App.Units.PSI = App.Units.Quantity('psi')
App.Units.Watt = App.Units.Quantity('W')
App.Units.VoltAmpere = App.Units.Quantity('VA')
App.Units.Joule = App.Units.Quantity('J')
App.Units.NewtonMeter = App.Units.Quantity('Nm')
App.Units.VoltAmpereSecond = App.Units.Quantity('VAs')
App.Units.WattSecond = App.Units.Quantity('Ws')
App.Units.MPH = App.Units.Quantity('mi/h')
App.Units.KMH = App.Units.Quantity('km/h')
App.Units.Degree = App.Units.Quantity('deg')
App.Units.Radian = App.Units.Quantity('rad')
App.Units.Gon = App.Units.Quantity('gon')
App.Units.Length = App.Units.Unit(1)
App.Units.Area = App.Units.Unit(2)
App.Units.Volume = App.Units.Unit(3)
App.Units.Mass = App.Units.Unit(0,1)
App.Units.Angle = App.Units.Unit(0,0,0,0,0,0,0,1)
App.Units.TimeSpan = App.Units.Unit(0,0,1)
App.Units.Velocity = App.Units.Unit(1,0,-1)
App.Units.Acceleration = App.Units.Unit(1,0,-2)
App.Units.Temperature = App.Units.Unit(0,0,0,0,1)
App.Units.ElectricCurrent = App.Units.Unit(0,0,0,1)
App.Units.AmountOfSubstance = App.Units.Unit(0,0,0,0,0,1)
App.Units.LuminoseIntensity = App.Units.Unit(0,0,0,0,0,0,1)
App.Units.Stress = App.Units.Unit(-1,1,-2)
App.Units.Pressure = App.Units.Unit(-1,1,-2)
App.Units.Force = App.Units.Unit(1,1,-2)
App.Units.Work = App.Units.Unit(2,1,-2)
App.Units.Power = App.Units.Unit(2,1,-3)
# clean up namespace
del(InitApplications)
Log ('Init: App::FreeCADInit.py done\n')
| lgpl-2.1 |
edgarRd/incubator-airflow | airflow/migrations/versions/e3a246e0dc1_current_schema.py | 8 | 12374 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""current schema
Revision ID: e3a246e0dc1
Revises:
Create Date: 2015-08-18 16:35:00.883495
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import func
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'e3a246e0dc1'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if 'connection' not in tables:
op.create_table(
'connection',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('conn_id', sa.String(length=250), nullable=True),
sa.Column('conn_type', sa.String(length=500), nullable=True),
sa.Column('host', sa.String(length=500), nullable=True),
sa.Column('schema', sa.String(length=500), nullable=True),
sa.Column('login', sa.String(length=500), nullable=True),
sa.Column('password', sa.String(length=500), nullable=True),
sa.Column('port', sa.Integer(), nullable=True),
sa.Column('extra', sa.String(length=5000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'dag' not in tables:
op.create_table(
'dag',
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('is_paused', sa.Boolean(), nullable=True),
sa.Column('is_subdag', sa.Boolean(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('last_scheduler_run', sa.DateTime(), nullable=True),
sa.Column('last_pickled', sa.DateTime(), nullable=True),
sa.Column('last_expired', sa.DateTime(), nullable=True),
sa.Column('scheduler_lock', sa.Boolean(), nullable=True),
sa.Column('pickle_id', sa.Integer(), nullable=True),
sa.Column('fileloc', sa.String(length=2000), nullable=True),
sa.Column('owners', sa.String(length=2000), nullable=True),
sa.PrimaryKeyConstraint('dag_id')
)
if 'dag_pickle' not in tables:
op.create_table(
'dag_pickle',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pickle', sa.PickleType(), nullable=True),
sa.Column('created_dttm', sa.DateTime(), nullable=True),
sa.Column('pickle_hash', sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'import_error' not in tables:
op.create_table(
'import_error',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('filename', sa.String(length=1024), nullable=True),
sa.Column('stacktrace', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'job' not in tables:
op.create_table(
'job',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=True),
sa.Column('state', sa.String(length=20), nullable=True),
sa.Column('job_type', sa.String(length=30), nullable=True),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('latest_heartbeat', sa.DateTime(), nullable=True),
sa.Column('executor_class', sa.String(length=500), nullable=True),
sa.Column('hostname', sa.String(length=500), nullable=True),
sa.Column('unixname', sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(
'job_type_heart',
'job',
['job_type', 'latest_heartbeat'],
unique=False
)
if 'known_event_type' not in tables:
op.create_table(
'known_event_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('know_event_type', sa.String(length=200), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'log' not in tables:
op.create_table(
'log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dttm', sa.DateTime(), nullable=True),
sa.Column('dag_id', sa.String(length=250), nullable=True),
sa.Column('task_id', sa.String(length=250), nullable=True),
sa.Column('event', sa.String(length=30), nullable=True),
sa.Column('execution_date', sa.DateTime(), nullable=True),
sa.Column('owner', sa.String(length=500), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'sla_miss' not in tables:
op.create_table(
'sla_miss',
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('email_sent', sa.Boolean(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('task_id', 'dag_id', 'execution_date')
)
if 'slot_pool' not in tables:
op.create_table(
'slot_pool',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pool', sa.String(length=50), nullable=True),
sa.Column('slots', sa.Integer(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('pool')
)
if 'task_instance' not in tables:
op.create_table(
'task_instance',
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('state', sa.String(length=20), nullable=True),
sa.Column('try_number', sa.Integer(), nullable=True),
sa.Column('hostname', sa.String(length=1000), nullable=True),
sa.Column('unixname', sa.String(length=1000), nullable=True),
sa.Column('job_id', sa.Integer(), nullable=True),
sa.Column('pool', sa.String(length=50), nullable=True),
sa.Column('queue', sa.String(length=50), nullable=True),
sa.Column('priority_weight', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('task_id', 'dag_id', 'execution_date')
)
op.create_index(
'ti_dag_state',
'task_instance',
['dag_id', 'state'],
unique=False
)
op.create_index(
'ti_pool',
'task_instance',
['pool', 'state', 'priority_weight'],
unique=False
)
op.create_index(
'ti_state_lkp',
'task_instance',
['dag_id', 'task_id', 'execution_date', 'state'],
unique=False
)
if 'user' not in tables:
op.create_table(
'user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=250), nullable=True),
sa.Column('email', sa.String(length=500), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
if 'variable' not in tables:
op.create_table(
'variable',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('key', sa.String(length=250), nullable=True),
sa.Column('val', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('key')
)
if 'chart' not in tables:
op.create_table(
'chart',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('conn_id', sa.String(length=250), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('chart_type', sa.String(length=100), nullable=True),
sa.Column('sql_layout', sa.String(length=50), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('y_log_scale', sa.Boolean(), nullable=True),
sa.Column('show_datatable', sa.Boolean(), nullable=True),
sa.Column('show_sql', sa.Boolean(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('default_params', sa.String(length=5000), nullable=True),
sa.Column('x_is_date', sa.Boolean(), nullable=True),
sa.Column('iteration_no', sa.Integer(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
if 'known_event' not in tables:
op.create_table(
'known_event',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('known_event_type_id', sa.Integer(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['known_event_type_id'],
['known_event_type.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
if 'xcom' not in tables:
op.create_table(
'xcom',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('key', sa.String(length=512), nullable=True),
sa.Column('value', sa.PickleType(), nullable=True),
sa.Column(
'timestamp',
sa.DateTime(),
default=func.now(),
nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('known_event')
op.drop_table('chart')
op.drop_table('variable')
op.drop_table('user')
op.drop_index('ti_state_lkp', table_name='task_instance')
op.drop_index('ti_pool', table_name='task_instance')
op.drop_index('ti_dag_state', table_name='task_instance')
op.drop_table('task_instance')
op.drop_table('slot_pool')
op.drop_table('sla_miss')
op.drop_table('log')
op.drop_table('known_event_type')
op.drop_index('job_type_heart', table_name='job')
op.drop_table('job')
op.drop_table('import_error')
op.drop_table('dag_pickle')
op.drop_table('dag')
op.drop_table('connection')
op.drop_table('xcom')
| apache-2.0 |
fin/froide | froide/publicbody/migrations/0001_initial.py | 5 | 9441 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.manager
import django.utils.timezone
from django.conf import settings
import django.db.models.deletion
import froide.publicbody.models
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='FoiLaw',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('slug', models.SlugField(max_length=255, verbose_name='Slug')),
('description', models.TextField(verbose_name='Description', blank=True)),
('long_description', models.TextField(verbose_name='Website Text', blank=True)),
('created', models.DateField(null=True, verbose_name='Creation Date', blank=True)),
('updated', models.DateField(null=True, verbose_name='Updated Date', blank=True)),
('request_note', models.TextField(verbose_name='request note', blank=True)),
('meta', models.BooleanField(default=False, verbose_name='Meta Law')),
('letter_start', models.TextField(verbose_name='Start of Letter', blank=True)),
('letter_end', models.TextField(verbose_name='End of Letter', blank=True)),
('priority', models.SmallIntegerField(default=3, verbose_name='Priority')),
('url', models.CharField(max_length=255, verbose_name='URL', blank=True)),
('max_response_time', models.IntegerField(default=30, null=True, verbose_name='Maximal Response Time', blank=True)),
('max_response_time_unit', models.CharField(default=b'day', max_length=32, verbose_name='Unit of Response Time', blank=True, choices=[(b'day', 'Day(s)'), (b'working_day', 'Working Day(s)'), (b'month_de', 'Month(s) (DE)')])),
('refusal_reasons', models.TextField(verbose_name='Possible Refusal Reasons, one per line, e.g \xa7X.Y: Privacy Concerns', blank=True)),
('email_only', models.BooleanField(default=False, verbose_name='E-Mail only')),
('combined', models.ManyToManyField(to='publicbody.FoiLaw', verbose_name='Combined Laws', blank=True)),
],
options={
'verbose_name': 'Freedom of Information Law',
'verbose_name_plural': 'Freedom of Information Laws',
},
),
migrations.CreateModel(
name='Jurisdiction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('slug', models.SlugField(max_length=255, verbose_name='Slug')),
('description', models.TextField(verbose_name='Description', blank=True)),
('hidden', models.BooleanField(default=False, verbose_name='Hidden')),
('rank', models.SmallIntegerField(default=1)),
],
options={
'verbose_name': 'Jurisdiction',
'verbose_name_plural': 'Jurisdictions',
},
),
migrations.CreateModel(
name='PublicBody',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('other_names', models.TextField(default=b'', verbose_name='Other names', blank=True)),
('slug', models.SlugField(max_length=255, verbose_name='Slug')),
('description', models.TextField(verbose_name='Description', blank=True)),
('url', models.URLField(max_length=500, null=True, verbose_name='URL', blank=True)),
('depth', models.SmallIntegerField(default=0)),
('classification', models.CharField(max_length=255, verbose_name='Classification', blank=True)),
('classification_slug', models.SlugField(max_length=255, verbose_name='Classification Slug', blank=True)),
('email', models.EmailField(max_length=254, null=True, verbose_name='Email', blank=True)),
('contact', models.TextField(verbose_name='Contact', blank=True)),
('address', models.TextField(verbose_name='Address', blank=True)),
('website_dump', models.TextField(null=True, verbose_name='Website Dump', blank=True)),
('request_note', models.TextField(verbose_name='request note', blank=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created at')),
('updated_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated at')),
('confirmed', models.BooleanField(default=True, verbose_name='confirmed')),
('number_of_requests', models.IntegerField(default=0, verbose_name='Number of requests')),
('_created_by', models.ForeignKey(related_name='public_body_creators', on_delete=django.db.models.deletion.SET_NULL, default=1, blank=True, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Created by')),
('_updated_by', models.ForeignKey(related_name='public_body_updaters', on_delete=django.db.models.deletion.SET_NULL, default=1, blank=True, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Updated by')),
('jurisdiction', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Jurisdiction', blank=True, to='publicbody.Jurisdiction', null=True)),
('laws', models.ManyToManyField(to='publicbody.FoiLaw', verbose_name='Freedom of Information Laws')),
('parent', models.ForeignKey(related_name='children', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='publicbody.PublicBody', null=True)),
('root', models.ForeignKey(related_name='descendants', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='publicbody.PublicBody', null=True)),
('site', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=1, verbose_name='Site', to='sites.Site', null=True)),
],
options={
'ordering': ('name',),
'verbose_name': 'Public Body',
'verbose_name_plural': 'Public Bodies',
},
managers=[
('non_filtered_objects', django.db.models.manager.Manager()),
('published', froide.publicbody.models.PublicBodyManager()),
],
),
migrations.CreateModel(
name='PublicBodyTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100, verbose_name='Name')),
('slug', models.SlugField(unique=True, max_length=100, verbose_name='Slug')),
('is_topic', models.BooleanField(default=False, verbose_name='as topic')),
('rank', models.SmallIntegerField(default=0, verbose_name='rank')),
],
options={
'verbose_name': 'Public Body Tag',
'verbose_name_plural': 'Public Body Tags',
},
),
migrations.CreateModel(
name='TaggedPublicBody',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', models.ForeignKey(to='publicbody.PublicBody')),
('tag', models.ForeignKey(related_name='publicbodies', to='publicbody.PublicBodyTag')),
],
options={
'verbose_name': 'Tagged Public Body',
'verbose_name_plural': 'Tagged Public Bodies',
},
),
migrations.AddField(
model_name='publicbody',
name='tags',
field=taggit.managers.TaggableManager(to='publicbody.PublicBodyTag', through='publicbody.TaggedPublicBody', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
),
migrations.AddField(
model_name='foilaw',
name='jurisdiction',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Jurisdiction', blank=True, to='publicbody.Jurisdiction', null=True),
),
migrations.AddField(
model_name='foilaw',
name='mediator',
field=models.ForeignKey(related_name='mediating_laws', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='publicbody.PublicBody', null=True, verbose_name='Mediator'),
),
migrations.AddField(
model_name='foilaw',
name='site',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=1, verbose_name='Site', to='sites.Site', null=True),
),
]
| mit |
mpvoss/RickAndMortyWeatherTweets | env/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 515 | 5599 | from __future__ import absolute_import
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
super(HeaderParsingError, self).__init__(message)
| mit |
hellhovnd/dentexchange | dentexchange/apps/employee/tests/test_public_questionnaire_detail_from_list_view.py | 2 | 2428 | # -*- coding:utf-8 -*-
import unittest
import mock
from django.http.response import Http404
from ..views import PublicQuestionnaireDetailFromListView
from ..models import EmployeeQuestionnaire
class PublicQuestionnaireDetailFromListViewTestCase(unittest.TestCase):
def test_model_should_reference_job_posting_model(self):
# setup
view = PublicQuestionnaireDetailFromListView()
# assert
self.assertEqual(id(EmployeeQuestionnaire), id(view.model))
def test_should_call_template_response_with_template(self):
# setup
view = PublicQuestionnaireDetailFromListView()
request = mock.Mock()
view.request = request
view.get_context_data = mock.Mock()
view.response_class = mock.Mock()
view.get_object = mock.Mock(return_value=EmployeeQuestionnaire())
template_name = \
'employee/public_questionnaire_detail_from_list.html'
# action
view.get(request)
# assert
self.assertEqual(1, view.response_class.call_count)
self.assertEqual(template_name,
view.response_class.call_args[1]['template'][0])
@mock.patch('employee.views.EmployeeQuestionnaire.objects.get')
def test_get_object_should_return_questionnaire_from_pk(self, get):
# setup
view = PublicQuestionnaireDetailFromListView()
pk = 1
request = mock.Mock()
request.GET = dict(pk=pk)
view.request = request
# action
returned_value = view.get_object()
# assert
self.assertDictEqual(dict(pk=pk), get.call_args[1])
self.assertEqual(id(get.return_value), id(returned_value))
@mock.patch('employee.views.EmployeeQuestionnaire')
def test_get_object_should_raise_http404_exception_when_pk_is_invalid(
self, questionnaire_class):
# setup
view = PublicQuestionnaireDetailFromListView()
pk = 1
request = mock.Mock()
request.GET = dict(pk=pk)
view.request = request
questionnaire_class.DoesNotExist = EmployeeQuestionnaire.DoesNotExist
questionnaire_class.objects.get.side_effect = \
EmployeeQuestionnaire.DoesNotExist
# action
with self.assertRaises(Http404):
view.get_object()
# assert
self.assertDictEqual(dict(pk=pk),
questionnaire_class.objects.get.call_args[1])
| bsd-3-clause |
vincecr0ft/hhntup | higgstautau/trigger/emulation.py | 4 | 4536 | from rootpy.tree.filtering import EventFilter
def update_trigger_trees(student, tool, name, file, tree):
"""
This method must be called when each new tree is loaded in the chain
"""
if not tool.passthrough and tool.year == 2011:
print "Loading current tree in the TriggerTool ..."
tool.trigger_tool_wrapper.loadMainTree(tree)
trigtree = file.Get('%sMeta/TrigConfTree' % name)
tool.trigger_tool_wrapper.loadMetaTree(trigtree)
class TauTriggerEmulation(EventFilter):
"""
Tau trigger emulation (only apply on MC)
"""
def __init__(self, year, passthrough=False, **kwargs):
if not passthrough:
from externaltools import TauTriggerEmulation as TTE
from ROOT import CoEPP
self.year = year
if year == 2011:
# emulation not required in 2012 yet since the SFs are wrt
# the default triggers
# initialize the trigger emulation tool
self.trigger_tool_wrapper = CoEPP.OfficialWrapper()
self.trigger_tool = CoEPP.TriggerTool()
self.trigger_tool.setWrapper(self.trigger_tool_wrapper)
trigger_config = TTE.get_resource(
'config_EF_DiTau.xml')
self.trigger_tool.setXMLFile(trigger_config)
self.trigger_tool.initializeFromXML()
self.trigger_A = self.trigger_tool.getTriggerChecked(
"EF_tau29_medium1_tau20_medium1_Hypo_00_02_42")
self.trigger_B = self.trigger_tool.getTriggerChecked(
"EF_tau29_medium1_tau20_medium1_Hypo_00_03_02")
self.trigger_C = self.trigger_tool.getTriggerChecked(
"EF_tau29T_medium1_tau20T_medium1_Hypo_00_03_02")
self.trigger_run_dict = {
180164: (self.trigger_A, 'EF_tau29_medium1_tau20_medium1'),
183003: (self.trigger_B, 'EF_tau29_medium1_tau20_medium1'),
186169: (self.trigger_B, 'EF_tau29_medium1_tau20_medium1'),
189751: (self.trigger_C, 'EF_tau29T_medium1_tau20T_medium1'),
}
self.passes = self.passes_11
self.finalize = self.finalize_11
super(TauTriggerEmulation, self).__init__(
passthrough=passthrough,
**kwargs)
def passes_11(self, event):
self.trigger_tool_wrapper.setEventNumber(event._entry.value)
trigger, triggername = self.trigger_run_dict[event.RunNumber]
trigger.switchOn()
self.trigger_tool.executeTriggers()
if trigger.passed():
if triggername == 'EF_tau29_medium1_tau20_medium1':
event.EF_tau29_medium1_tau20_medium1 = True
else:
event.EF_tau29T_medium1_tau20T_medium1 = True
# trigger matching
trig1 = trigger.getTrigger1() # EF_tau29(T)_medium1
trig2 = trigger.getTrigger2() # EF_tau20(T)_medium1
for tau in event.taus:
idx = -1
idx1 = trig1.matchIndex(tau.fourvect)
idx2 = trig2.matchIndex(tau.fourvect)
if idx1 == idx2 != -1:
idx = idx1
elif idx1 == -1 and idx2 > -1:
idx = idx2
elif idx2 == -1 and idx1 > -1:
idx = idx1
elif idx2 != idx1: # both >-1 and non-equal
# take index of closer one using dR
trigtau1TLV = self.trigger_tool.buildEFTauTLV(idx1)
trigtau2TLV = self.trigger_tool.buildEFTauTLV(idx2)
if trigtau1TLV.DeltaR(tau.fourvect) < trigtau2TLV.DeltaR(tau.fourvect):
idx = idx1
else:
idx = idx2
tau.trigger_match_index = idx
else:
if triggername == 'EF_tau29_medium1_tau20_medium1':
event.EF_tau29_medium1_tau20_medium1 = False
else:
event.EF_tau29T_medium1_tau20T_medium1 = False
trigger.switchOff()
return True
def finalize_11(self):
# turn on triggers so they show up as "active" in the report
self.trigger_A.switchOn()
self.trigger_B.switchOn()
self.trigger_C.switchOn()
# finalize the trigger_tool
self.trigger_tool.finalize()
self.trigger_tool.summary()
| gpl-3.0 |
tamihiro/grpc | src/python/grpcio/grpc/framework/core/_expiration.py | 20 | 6055 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""State and behavior for operation expiration."""
import time
from grpc.framework.core import _interfaces
from grpc.framework.core import _utilities
from grpc.framework.foundation import later
from grpc.framework.interfaces.base import base
class _ExpirationManager(_interfaces.ExpirationManager):
"""An implementation of _interfaces.ExpirationManager."""
def __init__(
self, commencement, timeout, maximum_timeout, lock, termination_manager,
transmission_manager):
"""Constructor.
Args:
commencement: The time in seconds since the epoch at which the operation
began.
timeout: A length of time in seconds to allow for the operation to run.
maximum_timeout: The maximum length of time in seconds to allow for the
operation to run despite what is requested via this object's
change_timout method.
lock: The operation-wide lock.
termination_manager: The _interfaces.TerminationManager for the operation.
transmission_manager: The _interfaces.TransmissionManager for the
operation.
"""
self._lock = lock
self._termination_manager = termination_manager
self._transmission_manager = transmission_manager
self._commencement = commencement
self._maximum_timeout = maximum_timeout
self._timeout = timeout
self._deadline = commencement + timeout
self._index = None
self._future = None
def _expire(self, index):
def expire():
with self._lock:
if self._future is not None and index == self._index:
self._future = None
self._termination_manager.expire()
self._transmission_manager.abort(
_utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None))
return expire
def start(self):
self._index = 0
self._future = later.later(self._timeout, self._expire(0))
def change_timeout(self, timeout):
if self._future is not None and timeout != self._timeout:
self._future.cancel()
new_timeout = min(timeout, self._maximum_timeout)
new_index = self._index + 1
self._timeout = new_timeout
self._deadline = self._commencement + new_timeout
self._index = new_index
delay = self._deadline - time.time()
self._future = later.later(delay, self._expire(new_index))
if new_timeout != timeout:
self._transmission_manager.timeout(new_timeout)
def deadline(self):
return self._deadline
def terminate(self):
if self._future:
self._future.cancel()
self._future = None
self._deadline_index = None
def invocation_expiration_manager(
timeout, lock, termination_manager, transmission_manager):
"""Creates an _interfaces.ExpirationManager appropriate for front-side use.
Args:
timeout: A length of time in seconds to allow for the operation to run.
lock: The operation-wide lock.
termination_manager: The _interfaces.TerminationManager for the operation.
transmission_manager: The _interfaces.TransmissionManager for the
operation.
Returns:
An _interfaces.ExpirationManager appropriate for invocation-side use.
"""
expiration_manager = _ExpirationManager(
time.time(), timeout, timeout, lock, termination_manager,
transmission_manager)
expiration_manager.start()
return expiration_manager
def service_expiration_manager(
timeout, default_timeout, maximum_timeout, lock, termination_manager,
transmission_manager):
"""Creates an _interfaces.ExpirationManager appropriate for back-side use.
Args:
timeout: A length of time in seconds to allow for the operation to run. May
be None in which case default_timeout will be used.
default_timeout: The default length of time in seconds to allow for the
operation to run if the front-side customer has not specified such a value
(or if the value they specified is not yet known).
maximum_timeout: The maximum length of time in seconds to allow for the
operation to run.
lock: The operation-wide lock.
termination_manager: The _interfaces.TerminationManager for the operation.
transmission_manager: The _interfaces.TransmissionManager for the
operation.
Returns:
An _interfaces.ExpirationManager appropriate for service-side use.
"""
expiration_manager = _ExpirationManager(
time.time(), default_timeout if timeout is None else timeout,
maximum_timeout, lock, termination_manager, transmission_manager)
expiration_manager.start()
return expiration_manager
| bsd-3-clause |
rentongzhang/servo | tests/wpt/css-tests/tools/html5lib/utils/entities.py | 438 | 2734 | import json
import html5lib
def parse(path="html5ents.xml"):
return html5lib.parse(open(path), treebuilder="lxml")
def entity_table(tree):
return dict((entity_name("".join(tr[0].xpath(".//text()"))),
entity_characters(tr[1].text))
for tr in tree.xpath("//h:tbody/h:tr",
namespaces={"h":"http://www.w3.org/1999/xhtml"}))
def entity_name(inp):
return inp.strip()
def entity_characters(inp):
return "".join(codepoint_to_character(item)
for item in inp.split()
if item)
def codepoint_to_character(inp):
return ("\U000"+inp[2:]).decode("unicode-escape")
def make_tests_json(entities):
test_list = make_test_list(entities)
tests_json = {"tests":
[make_test(*item) for item in test_list]
}
return tests_json
def make_test(name, characters, good):
return {
"description":test_description(name, good),
"input":"&%s"%name,
"output":test_expected(name, characters, good)
}
def test_description(name, good):
with_semicolon = name.endswith(";")
semicolon_text = {True:"with a semi-colon",
False:"without a semi-colon"}[with_semicolon]
if good:
text = "Named entity: %s %s"%(name, semicolon_text)
else:
text = "Bad named entity: %s %s"%(name, semicolon_text)
return text
def test_expected(name, characters, good):
rv = []
if not good or not name.endswith(";"):
rv.append("ParseError")
rv.append(["Character", characters])
return rv
def make_test_list(entities):
tests = []
for entity_name, characters in entities.items():
if entity_name.endswith(";") and not subentity_exists(entity_name, entities):
tests.append((entity_name[:-1], "&" + entity_name[:-1], False))
tests.append((entity_name, characters, True))
return sorted(tests)
def subentity_exists(entity_name, entities):
for i in range(1, len(entity_name)):
if entity_name[:-i] in entities:
return True
return False
def make_entities_code(entities):
entities_text = "\n".join(" \"%s\": u\"%s\","%(
name, entities[name].encode(
"unicode-escape").replace("\"", "\\\""))
for name in sorted(entities.keys()))
return """entities = {
%s
}"""%entities_text
def main():
entities = entity_table(parse())
tests_json = make_tests_json(entities)
json.dump(tests_json, open("namedEntities.test", "w"), indent=4)
code = make_entities_code(entities)
open("entities_constants.py", "w").write(code)
if __name__ == "__main__":
main()
| mpl-2.0 |
nikhilraog/boto | boto/mws/exception.py | 153 | 2396 | # Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import BotoServerError
from boto.mws.response import ResponseFactory
class ResponseErrorFactory(ResponseFactory):
def __call__(self, status, reason, body=None):
server = BotoServerError(status, reason, body=body)
supplied = self.find_element(server.error_code, '', ResponseError)
print(supplied.__name__)
return supplied(status, reason, body=body)
class ResponseError(BotoServerError):
"""
Undefined response error.
"""
retry = False
def __repr__(self):
return '{0.__name__}({1.reason}: "{1.message}")' \
.format(self.__class__, self)
def __str__(self):
doc = self.__doc__ and self.__doc__.strip() + "\n" or ''
return '{1.__name__}: {0.reason} {2}\n{3}' \
'{0.message}'.format(self, self.__class__,
self.retry and '(Retriable)' or '', doc)
class RetriableResponseError(ResponseError):
retry = True
class InvalidParameterValue(ResponseError):
"""
One or more parameter values in the request is invalid.
"""
class InvalidParameter(ResponseError):
"""
One or more parameters in the request is invalid.
"""
class InvalidAddress(ResponseError):
"""
Invalid address.
"""
| mit |
InterfaceMasters/ONL | components/all/vendor-config/imt/src/python/imt/__init__.py | 3 | 1335 | #!/usr/bin/python
############################################################
# <bsn.cl fy=2013 v=onl>
#
# Copyright 2013, 2014 Big Switch Networks, Inc.
# Copyright 2015 Interface Masters Technologies, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
#
# </bsn.cl>
############################################################
#
# OpenNetworkPlatform support for IMT(Interface Masters) platforms.
#
############################################################
from onl.platform.base import OpenNetworkPlatformBase, sysinfo
import struct
import time
class OpenNetworkPlatformIMT(OpenNetworkPlatformBase):
def manufacturer(self):
return "Interface Masters Technologies, Inc."
def _sys_info_dict(self):
return {
sysinfo.PRODUCT_NAME : "IMTNotImplemented",
}
| epl-1.0 |
Carmezim/tensorflow | tensorflow/tools/docs/build_docs_test.py | 24 | 1725 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run the python doc generator and fail if there are any broken links."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorflow.python.platform import googletest
from tensorflow.python.platform import resource_loader
from tensorflow.tools.docs import generate_lib
class Flags(object):
resource_root = resource_loader.get_root_dir_with_all_resources()
src_dir = os.path.join(resource_root, 'third_party/tensorflow/docs_src')
base_dir = os.path.join(resource_root, 'third_party/tensorflow/')
output_dir = googletest.GetTempDir()
class BuildDocsTest(googletest.TestCase):
def testBuildDocs(self):
doc_generator = generate_lib.DocGenerator()
doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])
status = doc_generator.build(Flags())
if status:
self.fail('Found %s Errors!' % status)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
mohamed--abdel-maksoud/chromium.src | tools/telemetry/telemetry/image_processing/io/frame_generator.py | 52 | 1479 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import abc
class FrameReadError(Exception):
pass
class FrameGenerator(object):
""" Defines an interface for reading input frames.
Attributes:
_generator: A reference to the created generator.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
""" Initializes the FrameGenerator object. """
self._generator = self._CreateGenerator()
@abc.abstractmethod
def _CreateGenerator(self):
""" Creates a new generator.
Implemented in derived classes.
Raises:
FrameReadError: A error occurred in reading the frame.
"""
raise NotImplementedError
@property
def Generator(self):
""" Returns:
A reference to the created generator.
"""
return self._generator
@abc.abstractproperty
def CurrentTimestamp(self):
""" Returns:
float, The timestamp of the current frame in milliseconds.
"""
raise NotImplementedError
@abc.abstractproperty
def CurrentFrameNumber(self):
""" Returns:
int, The frame index of the current frame.
"""
raise NotImplementedError
@abc.abstractproperty
def Dimensions(self):
""" Returns:
The dimensions of the frame sequence as a tuple int (width, height).
This value should be constant across frames.
"""
raise NotImplementedError
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.