code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import asyncio
import base64
import binascii
import json
import re
import uuid
import warnings
import zlib
from collections import Mapping, Sequence, deque
from urllib.parse import parse_qsl, unquote, urlencode
from multidict import CIMultiDict
from .hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LENGTH,
CONTENT_TRANSFER_ENCODING, CONTENT_TYPE)
from .helpers import CHAR, PY_35, PY_352, TOKEN, parse_mimetype, reify
from .http import HttpParser
from .payload import (BytesPayload, LookupError, Payload, StringPayload,
get_payload, payload_type)
__all__ = ('MultipartReader', 'MultipartWriter', 'BodyPartReader',
'BadContentDispositionHeader', 'BadContentDispositionParam',
'parse_content_disposition', 'content_disposition_filename')
class BadContentDispositionHeader(RuntimeWarning):
pass
class BadContentDispositionParam(RuntimeWarning):
pass
def parse_content_disposition(header):
def is_token(string):
return string and TOKEN >= set(string)
def is_quoted(string):
return string[0] == string[-1] == '"'
def is_rfc5987(string):
return is_token(string) and string.count("'") == 2
def is_extended_param(string):
return string.endswith('*')
def is_continuous_param(string):
pos = string.find('*') + 1
if not pos:
return False
substring = string[pos:-1] if string.endswith('*') else string[pos:]
return substring.isdigit()
def unescape(text, *, chars=''.join(map(re.escape, CHAR))):
return re.sub('\\\\([{}])'.format(chars), '\\1', text)
if not header:
return None, {}
disptype, *parts = header.split(';')
if not is_token(disptype):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params = {}
while parts:
item = parts.pop(0)
if '=' not in item:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
key, value = item.split('=', 1)
key = key.lower().strip()
value = value.lstrip()
if key in params:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
if not is_token(key):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_continuous_param(key):
if is_quoted(value):
value = unescape(value[1:-1])
elif not is_token(value):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_extended_param(key):
if is_rfc5987(value):
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
else:
warnings.warn(BadContentDispositionParam(item))
continue
try:
value = unquote(value, encoding, 'strict')
except UnicodeDecodeError: # pragma: nocover
warnings.warn(BadContentDispositionParam(item))
continue
else:
failed = True
if is_quoted(value):
failed = False
value = unescape(value[1:-1].lstrip('\\/'))
elif is_token(value):
failed = False
elif parts:
# maybe just ; in filename, in any case this is just
# one case fix, for proper fix we need to redesign parser
_value = '%s;%s' % (value, parts[0])
if is_quoted(_value):
parts.pop(0)
value = unescape(_value[1:-1].lstrip('\\/'))
failed = False
if failed:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params[key] = value
return disptype.lower(), params
def content_disposition_filename(params, name='filename'):
name_suf = '%s*' % name
if not params:
return None
elif name_suf in params:
return params[name_suf]
elif name in params:
return params[name]
else:
parts = []
fnparams = sorted((key, value)
for key, value in params.items()
if key.startswith(name_suf))
for num, (key, value) in enumerate(fnparams):
_, tail = key.split('*', 1)
if tail.endswith('*'):
tail = tail[:-1]
if tail == str(num):
parts.append(value)
else:
break
if not parts:
return None
value = ''.join(parts)
if "'" in value:
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
return unquote(value, encoding, 'strict')
return value
class MultipartResponseWrapper(object):
"""Wrapper around the :class:`MultipartBodyReader` to take care about
underlying connection and close it when it needs in."""
def __init__(self, resp, stream):
self.resp = resp
self.stream = stream
if PY_35:
def __aiter__(self):
return self
if not PY_352: # pragma: no cover
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
def at_eof(self):
"""Returns ``True`` when all response data had been read.
:rtype: bool
"""
return self.resp.content.at_eof()
@asyncio.coroutine
def next(self):
"""Emits next multipart reader object."""
item = yield from self.stream.next()
if self.stream.at_eof():
yield from self.release()
return item
@asyncio.coroutine
def release(self):
"""Releases the connection gracefully, reading all the content
to the void."""
yield from self.resp.release()
class BodyPartReader(object):
"""Multipart reader for single body part."""
chunk_size = 8192
def __init__(self, boundary, headers, content):
self.headers = headers
self._boundary = boundary
self._content = content
self._at_eof = False
length = self.headers.get(CONTENT_LENGTH, None)
self._length = int(length) if length is not None else None
self._read_bytes = 0
self._unread = deque()
self._prev_chunk = None
self._content_eof = 0
self._cache = {}
if PY_35:
def __aiter__(self):
return self
if not PY_352: # pragma: no cover
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
@asyncio.coroutine
def next(self):
item = yield from self.read()
if not item:
return None
return item
@asyncio.coroutine
def read(self, *, decode=False):
"""Reads body part data.
:param bool decode: Decodes data following by encoding
method from `Content-Encoding` header. If it missed
data remains untouched
:rtype: bytearray
"""
if self._at_eof:
return b''
data = bytearray()
while not self._at_eof:
data.extend((yield from self.read_chunk(self.chunk_size)))
if decode:
return self.decode(data)
return data
@asyncio.coroutine
def read_chunk(self, size=chunk_size):
"""Reads body part content chunk of the specified size.
:param int size: chunk size
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._length:
chunk = yield from self._read_chunk_from_length(size)
else:
chunk = yield from self._read_chunk_from_stream(size)
self._read_bytes += len(chunk)
if self._read_bytes == self._length:
self._at_eof = True
if self._at_eof:
clrf = yield from self._content.readline()
assert b'\r\n' == clrf, \
'reader did not read all the data or it is malformed'
return chunk
@asyncio.coroutine
def _read_chunk_from_length(self, size):
"""Reads body part content chunk of the specified size.
The body part must has `Content-Length` header with proper value.
:param int size: chunk size
:rtype: bytearray
"""
assert self._length is not None, \
'Content-Length required for chunked read'
chunk_size = min(size, self._length - self._read_bytes)
chunk = yield from self._content.read(chunk_size)
return chunk
@asyncio.coroutine
def _read_chunk_from_stream(self, size):
"""Reads content chunk of body part with unknown length.
The `Content-Length` header for body part is not necessary.
:param int size: chunk size
:rtype: bytearray
"""
assert size >= len(self._boundary) + 2, \
'Chunk size must be greater or equal than boundary length + 2'
first_chunk = self._prev_chunk is None
if first_chunk:
self._prev_chunk = yield from self._content.read(size)
chunk = yield from self._content.read(size)
self._content_eof += int(self._content.at_eof())
assert self._content_eof < 3, "Reading after EOF"
window = self._prev_chunk + chunk
sub = b'\r\n' + self._boundary
if first_chunk:
idx = window.find(sub)
else:
idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))
if idx >= 0:
# pushing boundary back to content
self._content.unread_data(window[idx:])
if size > idx:
self._prev_chunk = self._prev_chunk[:idx]
chunk = window[len(self._prev_chunk):idx]
if not chunk:
self._at_eof = True
result = self._prev_chunk
self._prev_chunk = chunk
return result
@asyncio.coroutine
def readline(self):
"""Reads body part by line by line.
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._unread:
line = self._unread.popleft()
else:
line = yield from self._content.readline()
if line.startswith(self._boundary):
# the very last boundary may not come with \r\n,
# so set single rules for everyone
sline = line.rstrip(b'\r\n')
boundary = self._boundary
last_boundary = self._boundary + b'--'
# ensure that we read exactly the boundary, not something alike
if sline == boundary or sline == last_boundary:
self._at_eof = True
self._unread.append(line)
return b''
else:
next_line = yield from self._content.readline()
if next_line.startswith(self._boundary):
line = line[:-2] # strip CRLF but only once
self._unread.append(next_line)
return line
@asyncio.coroutine
def release(self):
"""Like :meth:`read`, but reads all the data to the void.
:rtype: None
"""
if self._at_eof:
return
while not self._at_eof:
yield from self.read_chunk(self.chunk_size)
@asyncio.coroutine
def text(self, *, encoding=None):
"""Like :meth:`read`, but assumes that body part contains text data.
:param str encoding: Custom text encoding. Overrides specified
in charset param of `Content-Type` header
:rtype: str
"""
data = yield from self.read(decode=True)
# see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA
# and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA
encoding = encoding or self.get_charset(default='utf-8')
return data.decode(encoding)
@asyncio.coroutine
def json(self, *, encoding=None):
"""Like :meth:`read`, but assumes that body parts contains JSON data.
:param str encoding: Custom JSON encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return json.loads(data.decode(encoding))
@asyncio.coroutine
def form(self, *, encoding=None):
"""Like :meth:`read`, but assumes that body parts contains form
urlencoded data.
:param str encoding: Custom form encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return parse_qsl(data.rstrip().decode(encoding),
keep_blank_values=True,
encoding=encoding)
def at_eof(self):
"""Returns ``True`` if the boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
def decode(self, data):
"""Decodes data according the specified `Content-Encoding`
or `Content-Transfer-Encoding` headers value.
Supports ``gzip``, ``deflate`` and ``identity`` encodings for
`Content-Encoding` header.
Supports ``base64``, ``quoted-printable``, ``binary`` encodings for
`Content-Transfer-Encoding` header.
:param bytearray data: Data to decode.
:raises: :exc:`RuntimeError` - if encoding is unknown.
:rtype: bytes
"""
if CONTENT_TRANSFER_ENCODING in self.headers:
data = self._decode_content_transfer(data)
if CONTENT_ENCODING in self.headers:
return self._decode_content(data)
return data
def _decode_content(self, data):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'deflate':
return zlib.decompress(data, -zlib.MAX_WBITS)
elif encoding == 'gzip':
return zlib.decompress(data, 16 + zlib.MAX_WBITS)
elif encoding == 'identity':
return data
else:
raise RuntimeError('unknown content encoding: {}'.format(encoding))
def _decode_content_transfer(self, data):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
return base64.b64decode(data)
elif encoding == 'quoted-printable':
return binascii.a2b_qp(data)
elif encoding in ('binary', '8bit', '7bit'):
return data
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def get_charset(self, default=None):
"""Returns charset parameter from ``Content-Type`` header or default.
"""
ctype = self.headers.get(CONTENT_TYPE, '')
*_, params = parse_mimetype(ctype)
return params.get('charset', default)
@reify
def name(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed or header is malformed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params, 'name')
@reify
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed or header is malformed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params, 'filename')
@payload_type(BodyPartReader)
class BodyPartReaderPayload(Payload):
def __init__(self, value, *args, **kwargs):
super().__init__(value, *args, **kwargs)
params = {}
if value.name is not None:
params['name'] = value.name
if value.filename is not None:
params['filename'] = value.name
if params:
self.set_content_disposition('attachment', **params)
@asyncio.coroutine
def write(self, writer):
field = self._value
chunk = yield from field.read_chunk(size=2**16)
while chunk:
writer.write(field.decode(chunk))
chunk = yield from field.read_chunk(size=2**16)
class MultipartReader(object):
"""Multipart body reader."""
#: Response wrapper, used when multipart readers constructs from response.
response_wrapper_cls = MultipartResponseWrapper
#: Multipart reader class, used to handle multipart/* body parts.
#: None points to type(self)
multipart_reader_cls = None
#: Body part reader class for non multipart/* content types.
part_reader_cls = BodyPartReader
def __init__(self, headers, content):
self.headers = headers
self._boundary = ('--' + self._get_boundary()).encode()
self._content = content
self._last_part = None
self._at_eof = False
self._at_bof = True
self._unread = []
if PY_35:
def __aiter__(self):
return self
if not PY_352: # pragma: no cover
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
@classmethod
def from_response(cls, response):
"""Constructs reader instance from HTTP response.
:param response: :class:`~aiohttp.client.ClientResponse` instance
"""
obj = cls.response_wrapper_cls(response, cls(response.headers,
response.content))
return obj
def at_eof(self):
"""Returns ``True`` if the final boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
@asyncio.coroutine
def next(self):
"""Emits the next multipart body part."""
# So, if we're at BOF, we need to skip till the boundary.
if self._at_eof:
return
yield from self._maybe_release_last_part()
if self._at_bof:
yield from self._read_until_first_boundary()
self._at_bof = False
else:
yield from self._read_boundary()
if self._at_eof: # we just read the last boundary, nothing to do there
return
self._last_part = yield from self.fetch_next_part()
return self._last_part
@asyncio.coroutine
def release(self):
"""Reads all the body parts to the void till the final boundary."""
while not self._at_eof:
item = yield from self.next()
if item is None:
break
yield from item.release()
@asyncio.coroutine
def fetch_next_part(self):
"""Returns the next body part reader."""
headers = yield from self._read_headers()
return self._get_part_reader(headers)
def _get_part_reader(self, headers):
"""Dispatches the response by the `Content-Type` header, returning
suitable reader instance.
:param dict headers: Response headers
"""
ctype = headers.get(CONTENT_TYPE, '')
mtype, *_ = parse_mimetype(ctype)
if mtype == 'multipart':
if self.multipart_reader_cls is None:
return type(self)(headers, self._content)
return self.multipart_reader_cls(headers, self._content)
else:
return self.part_reader_cls(self._boundary, headers, self._content)
def _get_boundary(self):
mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE])
assert mtype == 'multipart', 'multipart/* content type expected'
if 'boundary' not in params:
raise ValueError('boundary missed for Content-Type: %s'
% self.headers[CONTENT_TYPE])
boundary = params['boundary']
if len(boundary) > 70:
raise ValueError('boundary %r is too long (70 chars max)'
% boundary)
return boundary
@asyncio.coroutine
def _readline(self):
if self._unread:
return self._unread.pop()
return (yield from self._content.readline())
@asyncio.coroutine
def _read_until_first_boundary(self):
while True:
chunk = yield from self._readline()
if chunk == b'':
raise ValueError("Could not find starting boundary %r"
% (self._boundary))
chunk = chunk.rstrip()
if chunk == self._boundary:
return
elif chunk == self._boundary + b'--':
self._at_eof = True
return
@asyncio.coroutine
def _read_boundary(self):
chunk = (yield from self._readline()).rstrip()
if chunk == self._boundary:
pass
elif chunk == self._boundary + b'--':
self._at_eof = True
epilogue = yield from self._readline()
next_line = yield from self._readline()
# the epilogue is expected and then either the end of input or the
# parent multipart boundary, if the parent boundary is found then
# it should be marked as unread and handed to the parent for
# processing
if next_line[:2] == b'--':
self._unread.append(next_line)
# otherwise the request is likely missing an epilogue and both
# lines should be passed to the parent for processing
# (this handles the old behavior gracefully)
else:
self._unread.extend([next_line, epilogue])
else:
raise ValueError('Invalid boundary %r, expected %r'
% (chunk, self._boundary))
@asyncio.coroutine
def _read_headers(self):
lines = [b'']
while True:
chunk = yield from self._content.readline()
chunk = chunk.strip()
lines.append(chunk)
if not chunk:
break
parser = HttpParser()
headers, *_ = parser.parse_headers(lines)
return headers
@asyncio.coroutine
def _maybe_release_last_part(self):
"""Ensures that the last read body part is read completely."""
if self._last_part is not None:
if not self._last_part.at_eof():
yield from self._last_part.release()
self._unread.extend(self._last_part._unread)
self._last_part = None
class MultipartWriter(Payload):
"""Multipart body writer."""
def __init__(self, subtype='mixed', boundary=None):
boundary = boundary if boundary is not None else uuid.uuid4().hex
try:
self._boundary = boundary.encode('us-ascii')
except UnicodeEncodeError:
raise ValueError('boundary should contains ASCII only chars')
ctype = 'multipart/{}; boundary="{}"'.format(subtype, boundary)
super().__init__(None, content_type=ctype)
self._parts = []
self._headers = CIMultiDict()
self._headers[CONTENT_TYPE] = self.content_type
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return iter(self._parts)
def __len__(self):
return len(self._parts)
@property
def boundary(self):
return self._boundary
def append(self, obj, headers=None):
if headers is None:
headers = CIMultiDict()
if isinstance(obj, Payload):
if obj.headers is not None:
obj.headers.update(headers)
else:
obj._headers = headers
self.append_payload(obj)
else:
try:
self.append_payload(get_payload(obj, headers=headers))
except LookupError:
raise TypeError
def append_payload(self, payload):
"""Adds a new body part to multipart writer."""
# content-type
if CONTENT_TYPE not in payload.headers:
payload.headers[CONTENT_TYPE] = payload.content_type
# compression
encoding = payload.headers.get(CONTENT_ENCODING, '').lower()
if encoding and encoding not in ('deflate', 'gzip', 'identity'):
raise RuntimeError('unknown content encoding: {}'.format(encoding))
if encoding == 'identity':
encoding = None
# te encoding
te_encoding = payload.headers.get(
CONTENT_TRANSFER_ENCODING, '').lower()
if te_encoding not in ('', 'base64', 'quoted-printable', 'binary'):
raise RuntimeError('unknown content transfer encoding: {}'
''.format(te_encoding))
if te_encoding == 'binary':
te_encoding = None
# size
size = payload.size
if size is not None and not (encoding or te_encoding):
payload.headers[CONTENT_LENGTH] = str(size)
# render headers
headers = ''.join(
[k + ': ' + v + '\r\n' for k, v in payload.headers.items()]
).encode('utf-8') + b'\r\n'
self._parts.append((payload, headers, encoding, te_encoding))
def append_json(self, obj, headers=None):
"""Helper to append JSON part."""
if headers is None:
headers = CIMultiDict()
data = json.dumps(obj).encode('utf-8')
self.append_payload(
BytesPayload(
data, headers=headers, content_type='application/json'))
def append_form(self, obj, headers=None):
"""Helper to append form urlencoded part."""
assert isinstance(obj, (Sequence, Mapping))
if headers is None:
headers = CIMultiDict()
if isinstance(obj, Mapping):
obj = list(obj.items())
data = urlencode(obj, doseq=True)
return self.append_payload(
StringPayload(data, headers=headers,
content_type='application/x-www-form-urlencoded'))
@property
def size(self):
"""Size of the payload."""
if not self._parts:
return 0
total = 0
for part, headers, encoding, te_encoding in self._parts:
if encoding or te_encoding or part.size is None:
return None
total += (
2 + len(self._boundary) + 2 + # b'--'+self._boundary+b'\r\n'
part.size + len(headers) +
2 # b'\r\n'
)
total += 2 + len(self._boundary) + 4 # b'--'+self._boundary+b'--\r\n'
return total
@asyncio.coroutine
def write(self, writer):
"""Write body."""
if not self._parts:
return
for part, headers, encoding, te_encoding in self._parts:
yield from writer.write(b'--' + self._boundary + b'\r\n')
yield from writer.write(headers)
if encoding or te_encoding:
w = MultipartPayloadWriter(writer)
if encoding:
w.enable_compression(encoding)
if te_encoding:
w.enable_encoding(te_encoding)
yield from part.write(w)
yield from w.write_eof()
else:
yield from part.write(writer)
yield from writer.write(b'\r\n')
yield from writer.write(b'--' + self._boundary + b'--\r\n')
class MultipartPayloadWriter:
def __init__(self, writer):
self._writer = writer
self._encoding = None
self._compress = None
def enable_encoding(self, encoding):
if encoding == 'base64':
self._encoding = encoding
self._encoding_buffer = bytearray()
elif encoding == 'quoted-printable':
self._encoding = 'quoted-printable'
def enable_compression(self, encoding='deflate'):
zlib_mode = (16 + zlib.MAX_WBITS
if encoding == 'gzip' else -zlib.MAX_WBITS)
self._compress = zlib.compressobj(wbits=zlib_mode)
@asyncio.coroutine
def write_eof(self):
if self._compress is not None:
chunk = self._compress.flush()
if chunk:
self._compress = None
yield from self.write(chunk)
if self._encoding == 'base64':
if self._encoding_buffer:
yield from self._writer.write(base64.b64encode(
self._encoding_buffer))
@asyncio.coroutine
def write(self, chunk):
if self._compress is not None:
if chunk:
chunk = self._compress.compress(chunk)
if not chunk:
return
if self._encoding == 'base64':
self._encoding_buffer.extend(chunk)
if self._encoding_buffer:
buffer = self._encoding_buffer
div, mod = divmod(len(buffer), 3)
enc_chunk, self._encoding_buffer = (
buffer[:div * 3], buffer[div * 3:])
if enc_chunk:
enc_chunk = base64.b64encode(enc_chunk)
yield from self._writer.write(enc_chunk)
elif self._encoding == 'quoted-printable':
yield from self._writer.write(binascii.b2a_qp(chunk))
else:
yield from self._writer.write(chunk)
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/aiohttp/multipart.py
|
Python
|
apache-2.0
| 30,252
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from setuptools.command.test import test as TestCommand
import os
import sys
main_ns = {}
with open('swarmci/version.py') as f:
exec(f.read(), main_ns)
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
tox.cmdline(args=args)
setup(
name='swarmci',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=main_ns['__version__'],
description='CI extension leveraging Docker Swarm to enable parallel, distributed, isolated build tasks.',
long_description="",
# The project's main homepage.
url='https://github.com/ghostsquad/swarmci.git',
# Author details
author='Weston McNamee',
author_email='ghost.squadron@gmail.com',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Private :: Do Not Upload',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='docker swarm testing test build ci cd continuous deployment integration jenkins bamboo teamcity',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'PyYAML>=3.12',
'docker-py>=1.10.3'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
tests_require=['tox'],
cmdclass={'test': Tox},
)
|
ghostsquad/swarmci
|
setup.py
|
Python
|
apache-2.0
| 3,378
|
import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http
from livestreamer.stream import HLSStream
HLS_URL_FORMAT = "http://hls.goodgame.ru/hls/{0}{1}.m3u8"
QUALITIES = {
"1080p": "",
"720p": "_720",
"480p": "_480",
"240p": "_240"
}
_url_re = re.compile("http://(?:www\.)?goodgame.ru/channel/(?P<user>\w+)")
_stream_re = re.compile(
"iframe frameborder=\"0\" width=\"100%\" height=\"100%\" src=\"http://goodgame.ru/player(\d)?\?(\w+)\""
)
_ddos_re = re.compile(
"document.cookie=\"(__DDOS_[^;]+)"
)
class GoodGame(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _check_stream(self, url):
res = http.get(url, acceptable_status=(200, 404))
if res.status_code == 200:
return True
def _get_streams(self):
headers = {
"Referer": self.url
}
res = http.get(self.url, headers=headers)
match = _ddos_re.search(res.text)
if (match):
headers["Cookie"] = match.group(1)
res = http.get(self.url, headers=headers)
match = _stream_re.search(res.text)
if not match:
return
stream_id = match.group(2)
streams = {}
for name, url_suffix in QUALITIES.items():
url = HLS_URL_FORMAT.format(stream_id, url_suffix)
if not self._check_stream(url):
continue
streams[name] = HLSStream(self.session, url)
return streams
__plugin__ = GoodGame
|
chrippa/livestreamer
|
src/livestreamer/plugins/goodgame.py
|
Python
|
bsd-2-clause
| 1,558
|
# find a peer
def generate():
"""find a socket, which is connected to the specified port in thumb mode
Leaves socket in r0 reg.
argument:
port (int/str): specific port
backup:
r6: indicates found socket/file descriptor
"""
sc = """
findpeer_1:
sub r5, r5, r5
add r5, r5, #-1
mov r3, sp
looplabel_2:
mov sp, r3
add r5, r5, #1
mov r0, r5
movs r2, #4
push {r2}
mov r2, sp
add r1, sp, #32
sub r7, r7, r7
add r7, r7, #255
add r7, r7, #32
svc 1
cmp r0, #0
bne looplabel_2
mov r6, r5
"""
return sc
if __name__ == '__main__':
print generate()
|
sigma-random/ARMSCGen
|
shellcodes/thumb/findpeer.py
|
Python
|
gpl-2.0
| 660
|
import h5py
import numpy as np
from os.path import expanduser
def load_data(filename=expanduser("~/data/CK/dataset_10708.h5")):
with h5py.File(filename,'r') as hf:
data = hf.get('dataset')
np_data = np.array(data)
return np_data
|
dfdx/Faceless.jl
|
py/data.py
|
Python
|
mit
| 260
|
"""
VCS Link
########
Copyright (c) 2021 Nordic Semiconductor ASA
SPDX-License-Identifier: Apache-2.0
Introduction
============
This Sphinx extension can be used to obtain the VCS URL for a given Sphinx page.
This is useful, for example, when adding features like "Open on GitHub" on top
of pages. The extension installs a Jinja filter which can be used on the
template to obtain VCS page URLs.
Configuration options
=====================
- ``vcs_link_base_url``: Base URL used as a prefix for generated URLs.
- ``vcs_link_prefixes``: Mapping of pages (regex) <> VCS prefix.
- ``vcs_link_exclude``: List of pages (regex) that will not report a URL. Useful
for, e.g., auto-generated pages not in VCS.
"""
from functools import partial
import os
import re
from typing import Optional
from sphinx.application import Sphinx
__version__ = "0.1.0"
def vcs_link_get_url(app: Sphinx, pagename: str) -> Optional[str]:
"""Obtain VCS URL for the given page.
Args:
app: Sphinx instance.
pagename: Page name (path).
Returns:
VCS URL if applicable, None otherwise.
"""
if not os.path.isfile(app.env.project.doc2path(pagename)):
return None
for exclude in app.config.vcs_link_exclude:
if re.match(exclude, pagename):
return None
found_prefix = ""
for pattern, prefix in app.config.vcs_link_prefixes.items():
if re.match(pattern, pagename):
found_prefix = prefix
break
return "/".join(
[
app.config.vcs_link_base_url,
found_prefix,
app.env.project.doc2path(pagename, basedir=False),
]
)
def add_jinja_filter(app: Sphinx):
if app.builder.name != "html":
return
app.builder.templates.environment.filters["vcs_link_get_url"] = partial(
vcs_link_get_url, app
)
def setup(app: Sphinx):
app.add_config_value("vcs_link_base_url", "", "")
app.add_config_value("vcs_link_prefixes", {}, "")
app.add_config_value("vcs_link_exclude", [], "")
app.connect("builder-inited", add_jinja_filter)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
zephyrproject-rtos/zephyr
|
doc/_extensions/zephyr/vcs_link.py
|
Python
|
apache-2.0
| 2,232
|
# -*- coding: utf-8 -*-
#
# flask-mail documentation build configuration file, created by
# sphinx-quickstart on Fri May 28 11:39:14 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Mail'
copyright = u'2010, Dan Jacob'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'flask_small'
#html_theme = 'default'
html_theme_options = {
'index_logo': 'flask-mail.png',
'github_fork': None
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'flask-maildoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'flask-mail.tex', u'flask-mail Documentation',
u'Dan Jacob', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
Junnplus/flask-mail
|
docs/conf.py
|
Python
|
bsd-3-clause
| 6,521
|
#! /usr/bin/env python
'''
this file will flatten a directory with as many (and as complicated) a directory structure as there is.
It links everything into a single dir, except the things you don't want it to, in this case .fits,.cat,.pkl files i don't really need.
'''
#adam-example# ./adam_flattened_linkable_dir.py /u/ki/awright/thiswork/eyes/
import sys
sys.path.append('/u/ki/awright/quick/pythons/')
from adam_quicktools_ArgCleaner import ArgCleaner
args=ArgCleaner(sys.argv)
#from import_tools import *
#SUBARUDIR="/u/ki/awright/data/"
#backup_main="/u/ki/awright/data/backup_files/"
import os,shutil
#os.walk
#adam_flattened_linkable_dir.py
maindir=args[0]
if not os.path.isdir(maindir):
print 'maindir='+maindir
raise Exception('you have to input a directory as an argument, not maindir='+maindir)
flatdir=maindir+'adam_flatten_linkable/'
flatdirplots=maindir+'adam_flatten_linkable/plots/'
justplots=1
if not justplots:
if os.path.isdir(flatdir):
os.system('rm -r %s' % (flatdir))
os.mkdir(flatdir)
os.mkdir(flatdirplots)
num_copied=0
num_plots_copied=0
followlinks=True
for root, dirs, files in os.walk(maindir,topdown=False, followlinks=followlinks):
if 'adam_flatten_linkable' in root:
continue
elif '.svn' in root:
continue
print '\n##### ', root,' #####'
#print dirs
fitsfiles=[]
catfiles=[]
pklfiles=[]
pngfiles=[]
files2link=[]
for fl in files:
if fl.endswith('.fits'):
fitsfiles.append(fl)
elif fl.endswith('.pkl'):
pklfiles.append(fl)
elif fl.endswith('.cat'):
catfiles.append(fl)
elif fl.endswith('.png'):
pngfiles.append(fl)
#elif os.path.islink(fl):
else:
files2link.append(fl)
link_locators=root.replace(maindir,'').split('/')
try:
link_locators.remove('')
except ValueError:
pass
#link_datetime="%.2i-%.2i-%.4i_at_%.2i-%.2i" % (tm_mon,tm_mday,tm_year,tm_hour,tm_min)
if not justplots:
link_location=flatdir+'__'.join(link_locators) #+"_"+link_datetime
for fl in files2link:
source_fl=os.path.join(root,fl)
dest_fl=link_location+'_NAME_'+fl
if os.path.islink(dest_fl):
print 'already exists:',dest_fl
print 'ln -s %s %s' % (source_fl,dest_fl)
num_copied+=1
os.symlink(source_fl,dest_fl)
link_location=flatdirplots+'__'.join(link_locators) #+"_"+link_datetime
for fl in pngfiles:
source_fl=os.path.join(root,fl)
dest_fl=link_location+'_NAME_'+fl
if os.path.islink(dest_fl):
print 'already exists:',dest_fl
print 'ln -s %s %s' % (source_fl,dest_fl)
num_plots_copied+=1
os.symlink(source_fl,dest_fl)
print 'copied:',num_copied,' to ',flatdir
print 'copied:',num_plots_copied,' to ',flatdirplots
|
deapplegate/wtgpipeline
|
adam_flattened_linkable_dir.py
|
Python
|
mit
| 2,616
|
# Generated by Django 2.0.8 on 2018-08-18 18:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0029_auto_20180818_1505'),
]
operations = [
migrations.AddField(
model_name='brandingperiod',
name='override_listings_root',
field=models.CharField(blank=True, default='', max_length=255),
),
]
|
sussexstudent/falmer
|
falmer/events/migrations/0030_brandingperiod_override_listings_root.py
|
Python
|
mit
| 432
|
"""
storlever.lib.secucrity
~~~~~~~~~~~~~~~~
This module implements some security configuration for storlever.
:copyright: (c) 2014 by OpenSight (www.opensight.cn).
:license: AGPLv3, see LICENSE for more details.
"""
from pyramid.security import Allow, Everyone, Authenticated, DENY_ALL
class AclRootFactory(object):
__acl__ = [ (Allow, Everyone, 'api'),
(Allow, Authenticated, 'web'),
DENY_ALL]
def __init__(self, request):
pass
|
OpenSight/StorLever
|
storlever/lib/security.py
|
Python
|
agpl-3.0
| 484
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module constains energy functions, that can be used
to re-configure an energy based pooler.
The methods are expected to be set
using `type.MethodType` or something similar.
Example:
> pooler = EnergyBasedPooler()
> pooler.energy = MethodType(energy_functions.numenta_extended, pooler)
"""
import numpy as np
from numpy import dot, exp, maximum
# The raw string is used because I don't want to escape special characters,
# so one can copy and paste the docstring into an environment which
# is able to display LaTex.
def numenta(self, x, y):
r"""
Numenta's energy:
$$
E(x,y) = - \sum_i y_i \cdot \exp( - b_i ) \cdot (\sum_j W_{ij} \ x_j ) + S(y)
$$
where the size size penalty is given by
$$
S(y) = \begin{cases}
0 & \text{if $\|y\| \leq w$, and} \\
+\infty & \text{otherwise,}
\end{cases}
$$
"""
pooler = self
W = pooler.connections.visible_to_hidden
H = pooler.connections.hidden_to_hidden
b = pooler.connections.hidden_bias
n, m = pooler.output_size, pooler.input_size
w = pooler.code_weight
size_penalty = 0 if dot(y,y) <= w else np.inf
energy = - dot( y , exp( - b ) * dot(W, x) )
return energy + size_penalty
# The raw string is used because I don't want to escape special characters,
# so one can copy and paste the docstring into an environment which
# is able to display LaTex.
def numenta_extended(self, x, y):
r"""
Numenta's energy extended with an additional term (the H-term)
to decorrelate pairwise column activity:
$$
E(x,y) = - \sum_i y_i \cdot \exp( - b_i - \sum_j H_{ij} \ y_j ) \cdot (\sum_j W_{ij} \ x_j ) + S(y)
$$
where the size size penalty is given by
$$
S(y) = \begin{cases}
0 & \text{if $\|y\| \leq w$, and} \\
+\infty & \text{otherwise,}
\end{cases}
$$
"""
pooler = self
W = pooler.connections.visible_to_hidden
H = pooler.connections.hidden_to_hidden
b = pooler.connections.hidden_bias
n, m = pooler.output_size, pooler.input_size
w = pooler.code_weight
size_penalty = 0 if dot(y,y) <= w else np.inf
energy = - dot( y , exp( - b - dot(H, y) ) * dot(W, x) )
return energy + size_penalty
# The raw string is used because I don't want to escape special characters,
# so one can copy and paste the docstring into an environment which
# is able to display LaTex.
def numenta_extended_no_size_penalty(self, x, y):
r"""
Numenta's energy with an additional term (the H-term)
to decorrelate pairwise column activity, but with NO size penalty:
$$
E(x,y) = - \sum_i y_i \cdot \exp( - b_i - \sum_j H_{ij} \ y_j ) \cdot (\sum_j W_{ij} \ x_j )
$$
"""
pooler = self
W = pooler.connections.visible_to_hidden
H = pooler.connections.hidden_to_hidden
b = pooler.connections.hidden_bias
n, m = pooler.output_size, pooler.input_size
w = pooler.code_weight
energy = - dot( y , exp( - b - dot(H, y) ) * dot(W, x) )
return energy
|
ywcui1990/nupic.research
|
projects/energy_based_pooling/energy_based_models/energy_functions.py
|
Python
|
agpl-3.0
| 4,211
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_account_info
short_description: Get infos about the Vultr account.
description:
- Get infos about account balance, charges and payments.
version_added: "2.9"
author: "René Moser (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Get Vultr account infos
vultr_account_info:
register: result
- name: Print the infos
debug:
var: result.vultr_account_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_account_info:
description: Response from Vultr API
returned: success
type: complex
contains:
balance:
description: Your account balance.
returned: success
type: float
sample: -214.69
pending_charges:
description: Charges pending.
returned: success
type: float
sample: 57.03
last_payment_date:
description: Date of the last payment.
returned: success
type: str
sample: "2017-08-26 12:47:48"
last_payment_amount:
description: The amount of the last payment transaction.
returned: success
type: float
sample: -250.0
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrAccountInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrAccountInfo, self).__init__(module, "vultr_account_info")
self.returns = {
'balance': dict(convert_to='float'),
'pending_charges': dict(convert_to='float'),
'last_payment_date': dict(),
'last_payment_amount': dict(convert_to='float'),
}
def get_account_info(self):
return self.api_query(path="/v1/account/info")
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
account_info = AnsibleVultrAccountInfo(module)
result = account_info.get_result(account_info.get_account_info())
module.exit_json(**result)
if __name__ == '__main__':
main()
|
pgmillon/ansible
|
lib/ansible/modules/cloud/vultr/vultr_account_info.py
|
Python
|
gpl-3.0
| 3,185
|
"""
Just run the Material command with a bunch of inputs to make sure
it works as expected
"""
import pyroomacoustics as pra
scat_test = {
"coeffs": [0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.3],
"center_freqs": [125, 250, 500, 1000, 2000, 4000, 8000],
}
abs_test = {
"coeffs": [0.3, 0.4, 0.25, 0.11, 0.05, 0.03, 0.3],
"center_freqs": [125, 250, 500, 1000, 2000, 4000, 8000],
}
def test_material_e_float():
mat = pra.Material(0.3)
def test_material_es_float():
mat = pra.Material(0.3, 0.1)
def test_material_e_db():
mat = pra.Material("hard_surface")
def test_material_es_db():
mat = pra.Material("hard_surface", "rpg_qrd")
def test_material_e_dict():
mat = pra.Material(abs_test)
def test_material_es_dict():
mat = pra.Material(abs_test, scat_test)
def test_material_e_float_s_db():
mat = pra.Material(0.3, "rpg_skyline")
def test_material_e_db_s_float():
mat = pra.Material("brickwork", 0.1)
def test_material_e_float_s_dict():
mat = pra.Material(0.1, scat_test)
def test_material_e_dict_s_float():
mat = pra.Material(abs_test, 0.1)
def test_material_e_db_s_dict():
mat = pra.Material("brickwork", scat_test)
def test_material_e_dict_s_db():
mat = pra.Material(abs_test, "classroom_tables")
def test_dict_pairs():
materials = pra.make_materials(
ceiling=(0.25, 0.01),
floor=(0.5, 0.1),
east=(0.15, 0.15),
west=(0.07, 0.15),
north=(0.15, 0.15),
south=(0.10, 0.15),
)
assert isinstance(materials, dict)
def test_list_pairs():
materials = pra.make_materials(
(0.25, 0.01),
(0.5, 0.1),
(0.15, 0.15),
(0.07, 0.15),
(0.15, 0.15),
(0.10, 0.15),
)
assert isinstance(materials, list)
def test_dict_list_mix():
mat_list, mat_dict = pra.make_materials(
(0.25, 0.01),
abs_test,
"brickwork",
("brickwork", scat_test),
1.0,
ceilling=(abs_test, scat_test),
floor=(abs_test, "rpg_skyline"),
one=(0.10, 0.15),
)
assert isinstance(mat_list, list)
assert isinstance(mat_dict, dict)
def test_empty():
mat_list = pra.make_materials()
assert mat_list == []
if __name__ == "__main__":
test_material_e_float()
test_material_es_float()
test_material_es_dict
test_material_e_db()
test_material_es_db()
test_material_e_dict()
test_material_es_dict()
test_material_e_float_s_db()
test_material_e_db_s_float()
test_material_e_float_s_dict()
test_material_e_dict_s_float()
test_material_e_db_s_dict()
test_material_e_dict_s_db()
test_dict_pairs()
test_list_pairs()
test_dict_list_mix()
test_empty()
|
LCAV/pyroomacoustics
|
pyroomacoustics/tests/test_materials.py
|
Python
|
mit
| 2,742
|
import os
import logger
from cluster_helper import cluster as ipc
config_default = {"sample": [4, 2, None],
"group": [45, 4, 1],
"qc": [8, 1, None],
"pirna": [8, 1, None],
"report": [8, 1, 1]}
def get_cluster_view(args, num_jobs=None):
if not os.path.exists("ipython"):
os.mkdir("ipython")
os.mkdir("checkpoint")
if not num_jobs:
num_jobs = args.num_jobs
return ipc.cluster_view(args.scheduler, args.queue,
num_jobs, args.cores_per_job,
start_wait=args.timeout,
profile="ipython",
extra_params={"resources": args.resources,
"mem": args.memory_per_job,
"tag": "seqcluster",
"run_local": args.local})
def wait_until_complete(jobs):
return [j.get() for j in jobs]
def is_done(step):
if os.path.exists(os.path.join("checkpoint", step)):
return True
return False
def flag_done(step):
with open(os.path.join("checkpoint", step), "w") as handle:
handle.write("done")
def send_job(fn, data, args, step):
"""decide if send jobs with ipython or run locally"""
res = []
num_jobs = None
if not args.config:
resources = config_default
logger.my_logger.debug("doing %s" % step)
if step not in resources:
raise ValueError("step not in resources %s" % step)
else:
args.memory_per_job = resources[step][0]
args.cores_per_job = resources[step][1]
if resources[step][2]:
num_jobs = resources[step][2]
if args.parallel == "ipython":
if not is_done(step):
with get_cluster_view(args, num_jobs) as view:
for sample in data:
res.append(view.apply_async(fn, sample, args))
res = wait_until_complete(res)
flag_done(step)
return res
for sample in data:
res.append(fn(sample, args))
return res
|
lpantano/seqcluster-helper
|
sqhelper/cluster.py
|
Python
|
mit
| 2,131
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref:
# - http://doc.qt.io/qt-5/modelview.html#3-4-delegates
# - http://doc.qt.io/qt-5/model-view-programming.html#delegate-classes
# - http://doc.qt.io/qt-5/qabstractitemdelegate.html#details
# - http://doc.qt.io/qt-5/qitemdelegate.html#details
# - http://doc.qt.io/qt-5/qstyleditemdelegate.html#details
# - http://doc.qt.io/qt-5/qtwidgets-itemviews-spinboxdelegate-example.html
import sys
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView, QStyledItemDelegate, QDial
class MyData:
def __init__(self):
self._num_rows = 3
self._num_columns = 2
self._data = [[0 for j in range(self._num_columns)] for i in range(self._num_rows)]
def get_num_rows(self):
return self._num_rows
def get_num_columns(self):
return self._num_columns
def get_data(self, row_index, column_index):
value = self._data[row_index][column_index]
print("read ({},{}): {}".format(row_index, column_index, value))
return value
def set_data(self, row_index, column_index, value):
print("write ({},{}): {}".format(row_index, column_index, value))
self._data[row_index][column_index] = value
###############################################################################
class MyModel(QAbstractTableModel):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data # DON'T CALL THIS ATTRIBUTE "data", A METHOD ALREADY HAVE THIS NAME (model.data(index, role)) !!!
def rowCount(self, parent):
return self._data.get_num_rows()
def columnCount(self, parent):
return self._data.get_num_columns()
def data(self, index, role):
if role == Qt.DisplayRole or role == Qt.EditRole:
# See https://stackoverflow.com/a/8480223
return self._data.get_data(index.row(), index.column())
return QVariant()
def setData(self, index, value, role):
if role == Qt.EditRole:
try:
self._data.set_data(index.row(), index.column(), value)
# The following line are necessary e.g. to dynamically update the QSortFilterProxyModel
self.dataChanged.emit(index, index, [Qt.EditRole])
except Exception as e:
print(e)
return False
return True
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled
###############################################################################
class MyDelegate(QStyledItemDelegate):
def createEditor(self, parent, option, index):
editor = QDial(parent=parent)
editor.setRange(-5, 5)
editor.setNotchesVisible(True)
editor.setAutoFillBackground(True)
return editor
def setEditorData(self, editor, index):
value = int(index.data(Qt.EditRole)) # equivalent of value = index.model().data(index, Qt.EditRole)
editor.setValue(value)
def setModelData(self, editor, model, index):
value = editor.value()
model.setData(index, value, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
if __name__ == '__main__':
app = QApplication(sys.argv)
data = MyData()
table_view = QTableView()
my_model = MyModel(data)
table_view.setModel(my_model)
delegate = MyDelegate()
table_view.setItemDelegate(delegate)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
|
jeremiedecock/snippets
|
python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_dial_widget.py
|
Python
|
mit
| 3,962
|
"""
Support for Xiaomi Mi Flora BLE plant sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.miflora/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_MONITORED_CONDITIONS, CONF_NAME, CONF_MAC)
REQUIREMENTS = ['miflora==0.1.16']
_LOGGER = logging.getLogger(__name__)
CONF_ADAPTER = 'adapter'
CONF_CACHE = 'cache_value'
CONF_FORCE_UPDATE = 'force_update'
CONF_MEDIAN = 'median'
CONF_RETRIES = 'retries'
CONF_TIMEOUT = 'timeout'
DEFAULT_ADAPTER = 'hci0'
DEFAULT_UPDATE_INTERVAL = 1200
DEFAULT_FORCE_UPDATE = False
DEFAULT_MEDIAN = 3
DEFAULT_NAME = 'Mi Flora'
DEFAULT_RETRIES = 2
DEFAULT_TIMEOUT = 10
# Sensor types are defined like: Name, units
SENSOR_TYPES = {
'temperature': ['Temperature', '°C'],
'light': ['Light intensity', 'lux'],
'moisture': ['Moisture', '%'],
'conductivity': ['Conductivity', 'µS/cm'],
'battery': ['Battery', '%'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MAC): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MEDIAN, default=DEFAULT_MEDIAN): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_RETRIES, default=DEFAULT_RETRIES): cv.positive_int,
vol.Optional(CONF_CACHE, default=DEFAULT_UPDATE_INTERVAL): cv.positive_int,
vol.Optional(CONF_ADAPTER, default=DEFAULT_ADAPTER): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the MiFlora sensor."""
from miflora import miflora_poller
cache = config.get(CONF_CACHE)
poller = miflora_poller.MiFloraPoller(
config.get(CONF_MAC), cache_timeout=cache,
adapter=config.get(CONF_ADAPTER))
force_update = config.get(CONF_FORCE_UPDATE)
median = config.get(CONF_MEDIAN)
poller.ble_timeout = config.get(CONF_TIMEOUT)
poller.retries = config.get(CONF_RETRIES)
devs = []
for parameter in config[CONF_MONITORED_CONDITIONS]:
name = SENSOR_TYPES[parameter][0]
unit = SENSOR_TYPES[parameter][1]
prefix = config.get(CONF_NAME)
if len(prefix) > 0:
name = "{} {}".format(prefix, name)
devs.append(MiFloraSensor(
poller, parameter, name, unit, force_update, median))
add_devices(devs)
class MiFloraSensor(Entity):
"""Implementing the MiFlora sensor."""
def __init__(self, poller, parameter, name, unit, force_update, median):
"""Initialize the sensor."""
self.poller = poller
self.parameter = parameter
self._unit = unit
self._name = name
self._state = None
self.data = []
self._force_update = force_update
# Median is used to filter out outliers. median of 3 will filter
# single outliers, while median of 5 will filter double outliers
# Use median_count = 1 if no filtering is required.
self.median_count = median
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""
Update current conditions.
This uses a rolling median over 3 values to filter out outliers.
"""
try:
_LOGGER.debug("Polling data for %s", self.name)
data = self.poller.parameter_value(self.parameter)
except IOError as ioerr:
_LOGGER.info("Polling error %s", ioerr)
data = None
return
if data is not None:
_LOGGER.debug("%s = %s", self.name, data)
self.data.append(data)
else:
_LOGGER.info("Did not receive any data from Mi Flora sensor %s",
self.name)
# Remove old data from median list or set sensor value to None
# if no data is available anymore
if len(self.data) > 0:
self.data = self.data[1:]
else:
self._state = None
return
_LOGGER.debug("Data collected: %s", self.data)
if len(self.data) > self.median_count:
self.data = self.data[1:]
if len(self.data) == self.median_count:
median = sorted(self.data)[int((self.median_count - 1) / 2)]
_LOGGER.debug("Median is: %s", median)
self._state = median
else:
_LOGGER.debug("Not yet enough data for median calculation")
|
Duoxilian/home-assistant
|
homeassistant/components/sensor/miflora.py
|
Python
|
mit
| 5,190
|
# Copyright (c) 2017 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
VMware VStorageObject driver
Volume driver based on VMware VStorageObject aka First Class Disk (FCD). This
driver requires a minimum vCenter version of 6.5.
"""
from oslo_log import log as logging
from oslo_utils import units
from oslo_vmware import image_transfer
from oslo_vmware.objects import datastore
from oslo_vmware import vim_util
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import vmdk
from cinder.volume.drivers.vmware import volumeops as vops
LOG = logging.getLogger(__name__)
@interface.volumedriver
class VMwareVStorageObjectDriver(vmdk.VMwareVcVmdkDriver):
"""Volume driver based on VMware VStorageObject"""
# 1.0 - initial version based on vSphere 6.5 vStorageObject APIs
VERSION = '1.0.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "VMware_CI"
# minimum supported vCenter version
MIN_SUPPORTED_VC_VERSION = '6.5'
STORAGE_TYPE = 'vstorageobject'
def do_setup(self, context):
"""Any initialization the volume driver needs to do while starting.
:param context: The admin context.
"""
super(VMwareVStorageObjectDriver, self).do_setup(context)
self._storage_policy_enabled = False
self.volumeops.set_vmx_version('vmx-13')
def get_volume_stats(self, refresh=False):
"""Collects volume backend stats.
:param refresh: Whether to discard any cached values and force a full
refresh of stats.
:returns: dict of appropriate values.
"""
stats = super(VMwareVStorageObjectDriver, self).get_volume_stats(
refresh=refresh)
stats['storage_protocol'] = self.STORAGE_TYPE
return stats
def _select_ds_fcd(self, volume):
req = {}
req[hub.DatastoreSelector.SIZE_BYTES] = volume.size * units.Gi
(_host_ref, _resource_pool, summary) = self._select_datastore(req)
return summary.datastore
def _get_temp_image_folder(self, size_bytes, preallocated=False):
req = {}
req[hub.DatastoreSelector.SIZE_BYTES] = size_bytes
if preallocated:
req[hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = (
hub.DatastoreType.get_all_types() -
{hub.DatastoreType.VSAN, hub.DatastoreType.VVOL})
(host_ref, _resource_pool, summary) = self._select_datastore(req)
folder_path = vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH
dc_ref = self.volumeops.get_dc(host_ref)
self.volumeops.create_datastore_folder(
summary.name, folder_path, dc_ref)
return (dc_ref, summary, folder_path)
def _get_disk_type(self, volume):
extra_spec_disk_type = super(
VMwareVStorageObjectDriver, self)._get_disk_type(volume)
return vops.VirtualDiskType.get_virtual_disk_type(extra_spec_disk_type)
def create_volume(self, volume):
"""Create a new volume on the backend.
:param volume: Volume object containing specifics to create.
:returns: (Optional) dict of database updates for the new volume.
"""
disk_type = self._get_disk_type(volume)
ds_ref = self._select_ds_fcd(volume)
fcd_loc = self.volumeops.create_fcd(
volume.name, volume.size * units.Ki, ds_ref, disk_type)
return {'provider_location': fcd_loc.provider_location()}
def _delete_fcd(self, provider_loc):
fcd_loc = vops.FcdLocation.from_provider_location(provider_loc)
self.volumeops.delete_fcd(fcd_loc)
def delete_volume(self, volume):
"""Delete a volume from the backend.
:param volume: The volume to delete.
"""
self._delete_fcd(volume.provider_location)
def initialize_connection(self, volume, connector, initiator_data=None):
"""Allow connection to connector and return connection info.
:param volume: The volume to be attached.
:param connector: Dictionary containing information about what is being
connected to.
:param initiator_data: (Optional) A dictionary of driver_initiator_data
objects with key-value pairs that have been
saved for this initiator by a driver in previous
initialize_connection calls.
:returns: A dictionary of connection information.
"""
fcd_loc = vops.FcdLocation.from_provider_location(
volume.provider_location)
connection_info = {'driver_volume_type': self.STORAGE_TYPE}
connection_info['data'] = {
'id': fcd_loc.fcd_id,
'ds_ref_val': fcd_loc.ds_ref_val,
'adapter_type': self._get_adapter_type(volume)
}
LOG.debug("Connection info for volume %(name)s: %(connection_info)s.",
{'name': volume.name, 'connection_info': connection_info})
return connection_info
def _validate_container_format(self, container_format, image_id):
if container_format and container_format != 'bare':
msg = _("Container format: %s is unsupported, only 'bare' "
"is supported.") % container_format
LOG.error(msg)
raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume.
:param context: Security/policy info for the request.
:param volume: The volume to create.
:param image_service: The image service to use.
:param image_id: The image identifier.
:returns: Model updates.
"""
metadata = image_service.show(context, image_id)
self._validate_disk_format(metadata['disk_format'])
self._validate_container_format(
metadata.get('container_format'), image_id)
properties = metadata['properties'] or {}
disk_type = properties.get('vmware_disktype',
vmdk.ImageDiskType.PREALLOCATED)
vmdk.ImageDiskType.validate(disk_type)
size_bytes = metadata['size']
dc_ref, summary, folder_path = self._get_temp_image_folder(
volume.size * units.Gi)
disk_name = volume.id
if disk_type in [vmdk.ImageDiskType.SPARSE,
vmdk.ImageDiskType.STREAM_OPTIMIZED]:
vmdk_path = self._create_virtual_disk_from_sparse_image(
context, image_service, image_id, size_bytes, dc_ref,
summary.name, folder_path, disk_name)
else:
vmdk_path = self._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, size_bytes, dc_ref,
summary.name, folder_path, disk_name,
vops.VirtualDiskAdapterType.LSI_LOGIC)
ds_path = datastore.DatastorePath.parse(
vmdk_path.get_descriptor_ds_file_path())
dc_path = self.volumeops.get_inventory_path(dc_ref)
vmdk_url = datastore.DatastoreURL(
'https', self.configuration.vmware_host_ip, ds_path.rel_path,
dc_path, ds_path.datastore)
fcd_loc = self.volumeops.register_disk(
str(vmdk_url), volume.name, summary.datastore)
return {'provider_location': fcd_loc.provider_location()}
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image.
:param context: Security/policy info for the request.
:param volume: The volume to copy.
:param image_service: The image service to use.
:param image_meta: Information about the image.
:returns: Model updates.
"""
self._validate_disk_format(image_meta['disk_format'])
fcd_loc = vops.FcdLocation.from_provider_location(
volume.provider_location)
hosts = self.volumeops.get_connected_hosts(fcd_loc.ds_ref())
host = vim_util.get_moref(hosts[0], 'HostSystem')
LOG.debug("Selected host: %(host)s for downloading fcd: %(fcd_loc)s.",
{'host': host, 'fcd_loc': fcd_loc})
attached = False
try:
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True}
backing = self._create_backing(volume, host, create_params)
self.volumeops.attach_fcd(backing, fcd_loc)
attached = True
vmdk_file_path = self.volumeops.get_vmdk_path(backing)
conf = self.configuration
image_transfer.upload_image(
context,
conf.vmware_image_transfer_timeout_secs,
image_service,
image_meta['id'],
volume.project_id,
session=self.session,
host=conf.vmware_host_ip,
port=conf.vmware_host_port,
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=volume.size * units.Gi,
image_name=image_meta['name'])
finally:
if attached:
self.volumeops.detach_fcd(backing, fcd_loc)
backing = self.volumeops.get_backing_by_uuid(volume.id)
if backing:
self._delete_temp_backing(backing)
def extend_volume(self, volume, new_size):
"""Extend the size of a volume.
:param volume: The volume to extend.
:param new_size: The new desired size of the volume.
"""
fcd_loc = vops.FcdLocation.from_provider_location(
volume.provider_location)
self.volumeops.extend_fcd(fcd_loc, new_size * units.Ki)
def _clone_fcd(self, provider_loc, name, dest_ds_ref,
disk_type=vops.VirtualDiskType.THIN):
fcd_loc = vops.FcdLocation.from_provider_location(provider_loc)
return self.volumeops.clone_fcd(name, fcd_loc, dest_ds_ref, disk_type)
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: Information for the snapshot to be created.
"""
ds_ref = self._select_ds_fcd(snapshot.volume)
cloned_fcd_loc = self._clone_fcd(
snapshot.volume.provider_location, snapshot.name, ds_ref)
return {'provider_location': cloned_fcd_loc.provider_location()}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: The snapshot to delete.
"""
self._delete_fcd(snapshot.provider_location)
def _extend_if_needed(self, fcd_loc, cur_size, new_size):
if new_size > cur_size:
self.volumeops.extend_fcd(fcd_loc, new_size * units.Ki)
def _create_volume_from_fcd(self, provider_loc, cur_size, volume):
ds_ref = self._select_ds_fcd(volume)
disk_type = self._get_disk_type(volume)
cloned_fcd_loc = self._clone_fcd(
provider_loc, volume.name, ds_ref, disk_type=disk_type)
self._extend_if_needed(cloned_fcd_loc, cur_size, volume.size)
return {'provider_location': cloned_fcd_loc.provider_location()}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
:param volume: The volume to be created.
:param snapshot: The snapshot from which to create the volume.
:returns: A dict of database updates for the new volume.
"""
return self._create_volume_from_fcd(
snapshot.provider_location, snapshot.volume.size, volume)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: New Volume object
:param src_vref: Source Volume object
"""
return self._create_volume_from_fcd(
src_vref.provider_location, src_vref.size, volume)
|
j-griffith/cinder
|
cinder/volume/drivers/vmware/fcd.py
|
Python
|
apache-2.0
| 12,601
|
"""WSGI interface (see PEP 333 and 3333).
Note that WSGI environ keys and values are 'native strings'; that is,
whatever the type of "" is. For Python 2, that's a byte string; for Python 3,
it's a unicode string. But PEP 3333 says: "even if Python's str type is
actually Unicode "under the hood", the content of native strings must
still be translatable to bytes via the Latin-1 encoding!"
"""
import sys as _sys
import cherrypy as _cherrypy
from cherrypy._cpcompat import BytesIO, bytestr, ntob, ntou, py3k, unicodestr
from cherrypy import _cperror
from cherrypy.lib import httputil
from cherrypy.lib import is_closable_iterator
def downgrade_wsgi_ux_to_1x(environ):
"""Return a new environ dict for WSGI 1.x from the given WSGI u.x environ.
"""
env1x = {}
url_encoding = environ[ntou('wsgi.url_encoding')]
for k, v in list(environ.items()):
if k in [ntou('PATH_INFO'), ntou('SCRIPT_NAME'), ntou('QUERY_STRING')]:
v = v.encode(url_encoding)
elif isinstance(v, unicodestr):
v = v.encode('ISO-8859-1')
env1x[k.encode('ISO-8859-1')] = v
return env1x
class VirtualHost(object):
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different applications. For example::
root = Root()
RootApp = cherrypy.Application(root)
Domain2App = cherrypy.Application(root)
SecureApp = cherrypy.Application(Secure())
vhost = cherrypy._cpwsgi.VirtualHost(RootApp,
domains={'www.domain2.example': Domain2App,
'www.domain2.example:443': SecureApp,
})
cherrypy.tree.graft(vhost)
"""
default = None
"""Required. The default WSGI application."""
use_x_forwarded_host = True
"""If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying."""
domains = {}
"""A dict of {host header value: application} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding WSGI application
will be called instead of the default. Note that you often need
separate entries for "example.com" and "www.example.com".
In addition, "Host" headers may contain the port number.
"""
def __init__(self, default, domains=None, use_x_forwarded_host=True):
self.default = default
self.domains = domains or {}
self.use_x_forwarded_host = use_x_forwarded_host
def __call__(self, environ, start_response):
domain = environ.get('HTTP_HOST', '')
if self.use_x_forwarded_host:
domain = environ.get("HTTP_X_FORWARDED_HOST", domain)
nextapp = self.domains.get(domain)
if nextapp is None:
nextapp = self.default
return nextapp(environ, start_response)
class InternalRedirector(object):
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive=False):
self.nextapp = nextapp
self.recursive = recursive
def __call__(self, environ, start_response):
redirections = []
while True:
environ = environ.copy()
try:
return self.nextapp(environ, start_response)
except _cherrypy.InternalRedirect:
ir = _sys.exc_info()[1]
sn = environ.get('SCRIPT_NAME', '')
path = environ.get('PATH_INFO', '')
qs = environ.get('QUERY_STRING', '')
# Add the *previous* path_info + qs to redirections.
old_uri = sn + path
if qs:
old_uri += "?" + qs
redirections.append(old_uri)
if not self.recursive:
# Check to see if the new URI has been redirected to
# already
new_uri = sn + ir.path
if ir.query_string:
new_uri += "?" + ir.query_string
if new_uri in redirections:
ir.request.close()
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % new_uri)
# Munge the environment and try again.
environ['REQUEST_METHOD'] = "GET"
environ['PATH_INFO'] = ir.path
environ['QUERY_STRING'] = ir.query_string
environ['wsgi.input'] = BytesIO()
environ['CONTENT_LENGTH'] = "0"
environ['cherrypy.previous_request'] = ir.request
class ExceptionTrapper(object):
"""WSGI middleware that traps exceptions."""
def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):
self.nextapp = nextapp
self.throws = throws
def __call__(self, environ, start_response):
return _TrappedResponse(
self.nextapp,
environ,
start_response,
self.throws
)
class _TrappedResponse(object):
response = iter([])
def __init__(self, nextapp, environ, start_response, throws):
self.nextapp = nextapp
self.environ = environ
self.start_response = start_response
self.throws = throws
self.started_response = False
self.response = self.trap(
self.nextapp, self.environ, self.start_response)
self.iter_response = iter(self.response)
def __iter__(self):
self.started_response = True
return self
def __next__(self):
return self.trap(next, self.iter_response)
# todo: https://pythonhosted.org/six/#six.Iterator
if not py3k:
next = __next__
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except StopIteration:
raise
except:
tb = _cperror.format_exc()
#print('trapped (started %s):' % self.started_response, tb)
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ""
s, h, b = _cperror.bare_error(tb)
if py3k:
# What fun.
s = s.decode('ISO-8859-1')
h = [(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in h]
if self.started_response:
# Empty our iterable (so future calls raise StopIteration)
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except:
# "The application must not trap any exceptions raised by
# start_response, if it called start_response with exc_info.
# Instead, it should allow such exceptions to propagate
# back to the server or gateway."
# But we still log and call close() to clean up ourselves.
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return ntob("").join(b)
else:
return b
# WSGI-to-CP Adapter #
class AppResponse(object):
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
self.cpapp = cpapp
try:
if not py3k:
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
environ = downgrade_wsgi_ux_to_1x(environ)
self.environ = environ
self.run()
r = _cherrypy.serving.response
outstatus = r.output_status
if not isinstance(outstatus, bytestr):
raise TypeError("response.output_status is not a byte string.")
outheaders = []
for k, v in r.header_list:
if not isinstance(k, bytestr):
raise TypeError(
"response.header_list key %r is not a byte string." %
k)
if not isinstance(v, bytestr):
raise TypeError(
"response.header_list value %r is not a byte string." %
v)
outheaders.append((k, v))
if py3k:
# According to PEP 3333, when using Python 3, the response
# status and headers must be bytes masquerading as unicode;
# that is, they must be of type "str" but are restricted to
# code points in the "latin-1" set.
outstatus = outstatus.decode('ISO-8859-1')
outheaders = [(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in outheaders]
self.iter_response = iter(r.body)
self.write = start_response(outstatus, outheaders)
except:
self.close()
raise
def __iter__(self):
return self
def __next__(self):
return next(self.iter_response)
# todo: https://pythonhosted.org/six/#six.Iterator
if not py3k:
next = __next__
def close(self):
"""Close and de-reference the current request and response. (Core)"""
streaming = _cherrypy.serving.response.stream
self.cpapp.release_serving()
# We avoid the expense of examining the iterator to see if it's
# closable unless we are streaming the response, as that's the
# only situation where we are going to have an iterator which
# may not have been exhausted yet.
if streaming and is_closable_iterator(self.iter_response):
iter_close = self.iter_response.close
try:
iter_close()
except Exception:
_cherrypy.log(traceback=True, severity=40)
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host('',
int(env('SERVER_PORT', 80) or -1),
env('SERVER_NAME', ''))
remote = httputil.Host(env('REMOTE_ADDR', ''),
int(env('REMOTE_PORT', -1) or -1),
env('REMOTE_HOST', ''))
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', "HTTP/1.1")
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
# LOGON_USER is served by IIS, and is the name of the
# user after having been mapped to a local account.
# Both IIS and Apache set REMOTE_USER, when possible.
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(self.environ.get('SCRIPT_NAME', ''),
self.environ.get('PATH_INFO', ''))
qs = self.environ.get('QUERY_STRING', '')
if py3k:
# This isn't perfect; if the given PATH_INFO is in the
# wrong encoding, it may fail to match the appropriate config
# section URI. But meh.
old_enc = self.environ.get('wsgi.url_encoding', 'ISO-8859-1')
new_enc = self.cpapp.find_config(self.environ.get('PATH_INFO', ''),
"request.uri_encoding", 'utf-8')
if new_enc.lower() != old_enc.lower():
# Even though the path and qs are unicode, the WSGI server
# is required by PEP 3333 to coerce them to ISO-8859-1
# masquerading as unicode. So we have to encode back to
# bytes and then decode again using the "correct" encoding.
try:
u_path = path.encode(old_enc).decode(new_enc)
u_qs = qs.encode(old_enc).decode(new_enc)
except (UnicodeEncodeError, UnicodeDecodeError):
# Just pass them through without transcoding and hope.
pass
else:
# Only set transcoded values if they both succeed.
path = u_path
qs = u_qs
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
headerNames = {'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
'REMOTE_HOST': 'Remote-Host',
'REMOTE_ADDR': 'Remote-Addr',
}
def translate_headers(self, environ):
"""Translate CGI-environ header names to HTTP header names."""
for cgiName in environ:
# We assume all incoming header keys are uppercase already.
if cgiName in self.headerNames:
yield self.headerNames[cgiName], environ[cgiName]
elif cgiName[:5] == "HTTP_":
# Hackish attempt at recovering original header names.
translatedHeader = cgiName[5:].replace("_", "-")
yield translatedHeader, environ[cgiName]
class CPWSGIApp(object):
"""A WSGI application object for a CherryPy Application."""
pipeline = [('ExceptionTrapper', ExceptionTrapper),
('InternalRedirector', InternalRedirector),
]
"""A list of (name, wsgiapp) pairs. Each 'wsgiapp' MUST be a
constructor that takes an initial, positional 'nextapp' argument,
plus optional keyword arguments, and returns a WSGI application
(that takes environ and start_response arguments). The 'name' can
be any you choose, and will correspond to keys in self.config."""
head = None
"""Rather than nest all apps in the pipeline on each call, it's only
done the first time, and the result is memoized into self.head. Set
this to None again if you change self.pipeline after calling self."""
config = {}
"""A dict whose keys match names listed in the pipeline. Each
value is a further dict which will be passed to the corresponding
named WSGI callable (from the pipeline) as keyword arguments."""
response_class = AppResponse
"""The class to instantiate and return as the next app in the WSGI chain.
"""
def __init__(self, cpapp, pipeline=None):
self.cpapp = cpapp
self.pipeline = self.pipeline[:]
if pipeline:
self.pipeline.extend(pipeline)
self.config = self.config.copy()
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
You probably shouldn't call this; call self.__call__ instead,
so that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
def __call__(self, environ, start_response):
head = self.head
if head is None:
# Create and nest the WSGI apps in our pipeline (in reverse order).
# Then memoize the result in self.head.
head = self.tail
for name, callable in self.pipeline[::-1]:
conf = self.config.get(name, {})
head = callable(head, **conf)
self.head = head
return head(environ, start_response)
def namespace_handler(self, k, v):
"""Config handler for the 'wsgi' namespace."""
if k == "pipeline":
# Note this allows multiple 'wsgi.pipeline' config entries
# (but each entry will be processed in a 'random' order).
# It should also allow developers to set default middleware
# in code (passed to self.__init__) that deployers can add to
# (but not remove) via config.
self.pipeline.extend(v)
elif k == "response_class":
self.response_class = v
else:
name, arg = k.split(".", 1)
bucket = self.config.setdefault(name, {})
bucket[arg] = v
|
xpavlus/parabaramba
|
venv/lib/python2.7/site-packages/cherrypy/_cpwsgi.py
|
Python
|
gpl-3.0
| 16,891
|
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes import _
from webnotes.utils import cstr, validate_email_add
from webnotes.model.doc import Document, addchild
from webnotes import session, msgprint
sql = webnotes.conn.sql
from controllers.selling_controller import SellingController
class DocType(SellingController):
def __init__(self, doc, doclist):
self.doc = doc
self.doclist = doclist
def onload(self):
self.add_communication_list()
def on_communication_sent(self, comm):
webnotes.conn.set(self.doc, 'status', 'Replied')
def check_status(self):
chk = sql("select status from `tabLead` where name=%s", self.doc.name)
chk = chk and chk[0][0] or ''
return cstr(chk)
def validate(self):
if self.doc.status == 'Lead Lost' and not self.doc.order_lost_reason:
msgprint("Please Enter Lost Reason under More Info section")
raise Exception
if self.doc.source == 'Campaign' and not self.doc.campaign_name and session['user'] != 'Guest':
msgprint("Please specify campaign name")
raise Exception
if self.doc.email_id:
if not validate_email_add(self.doc.email_id):
msgprint('Please enter valid email id.')
raise Exception
def on_update(self):
if self.doc.contact_date:
self.add_calendar_event()
self.check_email_id_is_unique()
def check_email_id_is_unique(self):
if self.doc.email_id:
# validate email is unique
email_list = webnotes.conn.sql("""select name from tabLead where email_id=%s""",
self.doc.email_id)
if len(email_list) > 1:
items = [e[0] for e in email_list if e[0]!=self.doc.name]
webnotes.msgprint(_("""Email Id must be unique, already exists for: """) + \
", ".join(items), raise_exception=True)
def add_calendar_event(self):
# delete any earlier event by this lead
sql("delete from tabEvent where ref_type='Lead' and ref_name=%s", self.doc.name)
# create new event
ev = Document('Event')
ev.owner = self.doc.lead_owner
ev.description = ('Contact ' + cstr(self.doc.lead_name)) + \
(self.doc.contact_by and ('. By : ' + cstr(self.doc.contact_by)) or '') + \
(self.doc.remark and ('.To Discuss : ' + cstr(self.doc.remark)) or '')
ev.event_date = self.doc.contact_date
ev.event_hour = '10:00'
ev.event_type = 'Private'
ev.ref_type = 'Lead'
ev.ref_name = self.doc.name
ev.save(1)
event_user = addchild(ev, 'event_individuals', 'Event User')
event_user.person = self.doc.contact_by
event_user.save()
def get_sender(self, comm):
return webnotes.conn.get_value('Sales Email Settings',None,'email_id')
def on_trash(self):
webnotes.conn.sql("""update tabCommunication set lead=null where lead=%s""", self.doc.name)
webnotes.conn.sql("""update `tabSupport Ticket` set lead='' where lead=%s""",
self.doc.name)
|
gangadhar-kadam/church-erpnext
|
selling/doctype/lead/lead.py
|
Python
|
agpl-3.0
| 3,535
|
#Module amara
__version__ = '1.0'
|
rsachetto/harpia
|
harpia/amara/__init__.py
|
Python
|
gpl-2.0
| 34
|
"""
models of catalog are..
"""
from django.db import models
from django.forms import ModelForm
import useraccounts
from django.contrib.auth.models import User
from django.http import HttpResponse
from mptt.models import MPTTModel, TreeForeignKey
import mptt.fields
from django.core.exceptions import ValidationError
import datetime
from tinymce.models import HTMLField
from librehatti.config import _BUYER
from librehatti.config import _DELIVERY_ADDRESS
from librehatti.config import _IS_DEBIT
from librehatti.config import _PURCHASED_ITEMS
from librehatti.config import _QTY
from librehatti.config import _REFERENCE
from librehatti.config import _REFERENCE_DATE
"""
This class defines the name of category and parent category of product
"""
class mCategory(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', blank=True, null=True)
class Meta:
verbose_name_plural = "Categories"
def __unicode__(self):
return unicode(self.name)
class Unit(models.Model):
unit = models.CharField(max_length=100)
def __unicode__(self):
return '%s' % (self.unit)
class Category(MPTTModel):
name = models.CharField(max_length=100)
parent = TreeForeignKey('self', null=True, blank=True, \
related_name="children")
unit = models.ForeignKey(Unit, null=True, blank=True)
class MPTTMeta:
order_insertion_by = ['name']
def __unicode__(self):
return '%s' % self.name
"""
This class defines the name of product, category, price of eact item of
that product and the organisation with which user deals
"""
class Product(models.Model):
name = models.CharField(max_length=100)
category = mptt.fields.TreeForeignKey(Category, related_name="products")
price_per_unit = models.IntegerField(blank=True,null=True)
organisation = models.ForeignKey('useraccounts.AdminOrganisations')
def __unicode__(self):
return self.name
"""
This class defines the features of product
"""
class Attributes(models.Model):
name = models.CharField(max_length=200)
is_number = models.BooleanField(default = True)
is_string = models.BooleanField(default = False)
class Meta:
verbose_name_plural = "Attributes"
def __unicode__(self):
return self.name
"""
This class defines the details about user, its organisation, along with
total discount and payment of job, and mode of payment
"""
class ModeOfPayment(models.Model):
method = models.CharField(max_length=25)
class Meta:
verbose_name_plural = "Modes of payment"
def __unicode__(self):
return self.method
"""
This class defines the type of taxes, value, validation of taxes
mentioning the startdate and end date
"""
class Surcharge(models.Model):
tax_name = models.CharField(max_length=200)
value = models.FloatField()
taxes_included = models.BooleanField(default = False)
tax_effected_from = models.DateField(null = True)
tax_valid_till = models.DateField(null = True)
Remark = models.CharField(max_length=1000, null = True)
def __unicode__(self):
return self.tax_name
class PurchaseOrder(models.Model):
buyer = models.ForeignKey(User,verbose_name= _BUYER)
is_debit = models.BooleanField(default = False, verbose_name = _IS_DEBIT)
reference = models.CharField(max_length=200, verbose_name=_REFERENCE)
reference_date = models.DateField(blank=True, null=True, verbose_name=_REFERENCE_DATE)
delivery_address = models.CharField(max_length=500, blank=True, null=True,\
verbose_name = _DELIVERY_ADDRESS)
organisation = models.ForeignKey('useraccounts.AdminOrganisations', default=1)
date_time = models.DateField(auto_now_add=True)
purchase_order_time = models.TimeField(auto_now_add=True)
total_discount = models.IntegerField(default = 0)
tds = models.IntegerField(default = 0)
mode_of_payment = models.ForeignKey(ModeOfPayment)
cheque_dd_number = models.CharField(max_length=50, blank=True)
cheque_dd_date = models.DateField(max_length=50, blank=True, null=True)
type_of_service = models.ForeignKey('useraccounts.OrganisationType')
is_active = models.BooleanField(default = True)
def save(self, *args, **kwargs):
surchages = Surcharge.objects.filter(taxes_included=1)
if surchages:
pass
else:
raise ValidationError('No Active Taxes. Unable to add Order')
from librehatti.voucher.models import FinancialSession
now = datetime.datetime.now()
financialsession = FinancialSession.objects.\
values('id','session_start_date','session_end_date')
for value in financialsession:
start_date = value['session_start_date']
end_date = value['session_end_date']
if start_date <= now.date() <= end_date:
session_id = value['id']
try:
session_id
super(PurchaseOrder, self).save(*args, **kwargs)
except:
raise ValidationError('No Current Financial Session')
def __unicode__(self):
return '%s' % (self.id)
class PurchasedItem(models.Model):
purchase_order = models.ForeignKey(PurchaseOrder)
price_per_unit = models.IntegerField()
qty = models.IntegerField(verbose_name = _QTY)
price = models.IntegerField()
item = models.ForeignKey(Product)
def save(self, *args, **kwargs):
try:
if self.purchase_order:
self.price = self.price_per_unit * self.qty
super(PurchasedItem, self).save(*args, **kwargs)
except:
raise ValidationError('No Active Taxes. Unable to add Items')
def __unicode__(self):
return '%s' % (self.item) + ' - ' '%s' % (self.purchase_order)
class Meta:
verbose_name = _PURCHASED_ITEMS
verbose_name_plural = _PURCHASED_ITEMS
"""
This class defines the features, value of product
"""
class Catalog(models.Model):
attribute = models.ForeignKey(Attributes)
value = models.CharField(max_length=200)
product = models.ForeignKey(Product)
def __unicode__(self):
return self.attribute.name
"""
This class defines the taxes applied on the purchase order
"""
class TaxesApplied(models.Model):
purchase_order = models.ForeignKey(PurchaseOrder)
surcharge = models.ForeignKey(Surcharge)
tax = models.IntegerField()
def __unicode__(self):
return "%s" % (self.surcharge)
"""
This class defines the grand total of the purchase order
"""
class Bill(models.Model):
purchase_order = models.ForeignKey(PurchaseOrder)
delivery_charges = models.IntegerField()
total_cost = models.IntegerField()
totalplusdelivery = models.IntegerField()
total_tax = models.IntegerField()
grand_total = models.IntegerField()
amount_received = models.IntegerField()
class HeaderFooter(models.Model):
header = HTMLField()
footer = HTMLField()
is_active = models.BooleanField(default = False)
def save(self, *args, **kwargs):
if self.is_active == True:
temp = HeaderFooter.objects.filter(is_active=1)
if temp:
HeaderFooter.objects.filter(is_active=1).\
update(is_active=0)
super(HeaderFooter, self).save(*args, **kwargs)
else:
super(HeaderFooter, self).save(*args, **kwargs)
else:
super(HeaderFooter, self).save(*args, **kwargs)
def __unicode__(self):
return '%s' % (self.id)
class Meta:
verbose_name_plural = "Header and Footer"
class SurchargePaid(models.Model):
surcharge = models.ForeignKey(Surcharge)
value = models.IntegerField()
date = models.DateField(auto_now_add = True)
def __unicode__(self):
return '%s paid on ' % (self.surcharge, self.date)
class ChangeRequest(models.Model):
purchase_order_of_session = models.IntegerField()
from librehatti.voucher.models import FinancialSession
session = models.ForeignKey(FinancialSession)
previous_total = models.IntegerField()
new_total = models.IntegerField()
description = models.CharField(max_length=100)
initiator = models.CharField(max_length=50)
initiation_date = models.DateField(auto_now_add = True)
class RequestSurchargeChange(models.Model):
change_request = models.ForeignKey(ChangeRequest)
surcharge = models.ForeignKey(TaxesApplied)
previous_value = models.IntegerField()
new_value = models.IntegerField()
class RequestStatus(models.Model):
change_request = models.ForeignKey(ChangeRequest)
confirmed = models.BooleanField(default=False)
cancelled = models.BooleanField(default=False)
request_response = models.DateField(null = True)
class NonPaymentOrder(models.Model):
buyer = models.ForeignKey(User,verbose_name= _BUYER)
reference = models.CharField(max_length=200, verbose_name=_REFERENCE)
reference_date = models.DateField(verbose_name=_REFERENCE_DATE)
date = models.DateField(auto_now_add=True)
delivery_address = models.CharField(max_length=500, blank=True, null=True,\
verbose_name = _DELIVERY_ADDRESS)
item_type = models.CharField(max_length = 200)
def __unicode__(self):
return '%s' % (self.id)
|
s-monisha/LibreHatti
|
src/librehatti/catalog/models.py
|
Python
|
gpl-2.0
| 9,306
|
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from troveclient.v1 import client
from openstack_dashboard.api import base
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
LOG = logging.getLogger(__name__)
@memoized
def troveclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
trove_url = base.url_for(request, 'database')
c = client.Client(request.user.username,
request.user.token.id,
project_id=request.user.project_id,
auth_url=trove_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = trove_url
return c
def instance_list(request, marker=None):
page_size = utils.get_page_size(request)
return troveclient(request).instances.list(limit=page_size, marker=marker)
def instance_get(request, instance_id):
return troveclient(request).instances.get(instance_id)
def instance_delete(request, instance_id):
return troveclient(request).instances.delete(instance_id)
def instance_create(request, name, volume, flavor, databases=None,
users=None, restore_point=None, nics=None,
datastore=None, datastore_version=None):
# TODO(dklyle): adding conditional to support trove without volume
# support for now until API supports checking for volume support
if volume > 0:
volume_params = {'size': volume}
else:
volume_params = None
return troveclient(request).instances.create(
name,
flavor,
volume=volume_params,
databases=databases,
users=users,
restorePoint=restore_point,
nics=nics,
datastore=datastore,
datastore_version=datastore_version)
def instance_resize_volume(request, instance_id, size):
return troveclient(request).instances.resize_volume(instance_id, size)
def instance_resize(request, instance_id, flavor_id):
return troveclient(request).instances.resize_instance(instance_id,
flavor_id)
def instance_backups(request, instance_id):
return troveclient(request).instances.backups(instance_id)
def instance_restart(request, instance_id):
return troveclient(request).instances.restart(instance_id)
def database_list(request, instance_id):
return troveclient(request).databases.list(instance_id)
def database_delete(request, instance_id, db_name):
return troveclient(request).databases.delete(instance_id, db_name)
def backup_list(request):
return troveclient(request).backups.list()
def backup_get(request, backup_id):
return troveclient(request).backups.get(backup_id)
def backup_delete(request, backup_id):
return troveclient(request).backups.delete(backup_id)
def backup_create(request, name, instance_id, description=None,
parent_id=None):
return troveclient(request).backups.create(name, instance_id,
description, parent_id)
def flavor_list(request):
return troveclient(request).flavors.list()
def flavor_get(request, flavor_id):
return troveclient(request).flavors.get(flavor_id)
def users_list(request, instance_id):
return troveclient(request).users.list(instance_id)
def user_delete(request, instance_id, user):
return troveclient(request).users.delete(instance_id, user)
def user_list_access(request, instance_id, user):
return troveclient(request).users.list_access(instance_id, user)
def datastore_list(request):
return troveclient(request).datastores.list()
def datastore_version_list(request, datastore):
return troveclient(request).datastore_versions.list(datastore)
|
orbitfp7/horizon
|
openstack_dashboard/api/trove.py
|
Python
|
apache-2.0
| 4,577
|
"""Code for parsing with bulk tagging forms."""
import hashlib
import logging
import re
import sys
VERIFIED = "verified"
QUESTION = "???"
DUPLICATE = "duplicate"
DELETE = "delete"
TALB_MISMATCH = "TALB mismatch"
# Example:
# a7314af4 [ 1 ] 192 12 Mer de Noms
LINE_RE = re.compile(r"([0-9a-f]{8})\s+\[([^\]]*)\]\s+(\d+)\s+\d+\s+(.+)",
re.UNICODE)
# Example match is between >> <<:
# >>00123--------- <<A Perfect Circle
NEW_SECTION_RE = re.compile(r"^\d{5}-{9}\s(.*)", re.UNICODE)
def _update(this_batch, artist, results):
dup_resolver = {}
for dir_hash, code, bitrate_str, talb in this_batch:
code = code.strip()
bitrate = int(bitrate_str)
save = False
if not code:
results[dir_hash] = (VERIFIED, artist, talb)
elif code.lower() == "x":
results[dir_hash] = (DELETE, artist, talb)
elif code == "?":
results[dir_hash] = (QUESTION, artist, talb)
elif code in dup_resolver:
(other_dir_hash, other_bitrate,
artist, other_talb) = dup_resolver[code]
if talb != other_talb:
# Note that this will produce strange results in the presense
# of repeated mismatches. Oh well.
msg = '"%s" vs. "%s"' % tuple(sorted([talb, other_talb]))
results[dir_hash] = (TALB_MISMATCH, artist, talb, msg)
results[other_dir_hash] = (TALB_MISMATCH,
artist, other_talb, msg)
logging.warn("TALB mismatch: %s", msg)
elif other_bitrate < bitrate:
results[other_dir_hash] = (DUPLICATE, artist, talb)
save = True
else:
results[dir_hash] = (DUPLICATE, artist, talb)
else:
save = True
if save:
dup_resolver[code] = (dir_hash, bitrate, artist, talb)
# Move anything left in our dup_resolver dict into results.
for dir_hash, _, artist, talb in dup_resolver.itervalues():
if dir_hash not in results:
results[dir_hash] = (VERIFIED, artist, talb)
def get_path_hash(full_dirpath):
"""Map a full directory name to a bulk tagging form hash key."""
return hashlib.md5(full_dirpath).hexdigest()[:8]
def parse_file(file_obj):
"""Parse a human-editted bulk tagging form, and return a dict.
Args:
file_obj: A file-like object to read the form from.
Returns:
A dict mapping hashed paths to (action code, artist, album, ...) tuples.
"""
results = {}
this_batch = []
artist = None
for line in file_obj:
match = NEW_SECTION_RE.match(line)
if match:
_update(this_batch, artist, results)
artist = match.group(1)
this_batch = []
continue
line = line.strip()
match = LINE_RE.search(line)
if match:
this_batch.append([x.strip() for x in match.groups()])
elif line:
logging.info("Skipping line %r\n", line)
_update(this_batch, artist, results)
return results
|
chirpradio/chirpradio-machine
|
chirp/library/bulk_tagging_form.py
|
Python
|
apache-2.0
| 3,136
|
import contextlib
import unittest
import fin.patch
import sentiment.feed
import sentiment.models
import sentiment.testutil
class FeedTest(unittest.TestCase):
def basic_tweet(self):
return {
"id": 1,
"user_handle": "@one",
"followers": 2,
"message": "Test",
"sentiment": 0.1,
}
def test_with_no_new_tweets(self):
sentiment.feed.handle_new_tweets([])
def test_adding_simple_tweet(self):
with sentiment.testutil.temp_db():
sentiment.feed.handle_new_tweets([self.basic_tweet()])
users = list(sentiment.models.User.select())
self.assertEqual(len(users), 1)
self.assertEqual(users[0].handle, "@one")
self.assertEqual(users[0].followers, 2)
tweets = list(sentiment.models.Tweet.select())
self.assertEqual(len(tweets), 1)
self.assertEqual(tweets[0].message, "Test")
self.assertEqual(tweets[0].sentiment, 0.1)
self.assertEqual(tweets[0].message_id, 1)
self.assertEqual(tweets[0].seen_count, 1)
self.assertEqual(tweets[0].contains_keyword, False)
def test_adding_duplicate(self):
with sentiment.testutil.temp_db():
sentiment.feed.handle_new_tweets(
[self.basic_tweet(), self.basic_tweet()])
# Should only be one user
self.assertEqual(sentiment.models.User.select().count(), 1)
# Should also only be one user
tweets = list(sentiment.models.Tweet.select())
self.assertEqual(len(tweets), 1)
# but seen count is now two
self.assertEqual(tweets[0].seen_count, 2)
def test_feed_failures(self):
# Here's to hoping that 404.example.com never exists,
# Replace the feed url with a valid, but non-existant URL
# and check that there's no traceback, but an error is reported
with fin.patch.patch(sentiment.feed,
"FEED_URL",
"http://404.example.com"):
self.assertIn(
"[Errno",
sentiment.feed.fetch_tweets())
|
stestagg/al-sentiment
|
sentiment/feed_test.py
|
Python
|
mit
| 2,206
|
from cmd import foo
print(foo)
|
github/codeql
|
python/ql/test/library-tests/PointsTo/regressions/wrong/module-imports/conflict-stdlib/code-invalid-package-name/test_ok.py
|
Python
|
mit
| 31
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehuman.org/
**Code Home Page:** https://bitbucket.org/MakeHuman/makehuman/
**Authors:** Thomas Larsson, Jonas Hauquier
**Copyright(c):** MakeHuman Team 2001-2014
**Licensing:** AGPL3 (http://www.makehuman.org/doc/node/the_makehuman_application.html)
This file is part of MakeHuman (www.makehuman.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
**Coding Standards:** See http://www.makehuman.org/node/165
Abstract
--------
Geometry export
"""
import math
import numpy as np
import log
from progress import Progress
#----------------------------------------------------------------------
# library_geometry
#----------------------------------------------------------------------
def writeLibraryGeometry(fp, meshes, config, shapes=None):
progress = Progress(len(meshes), None)
fp.write('\n <library_geometries>\n')
for mIdx,mesh in enumerate(meshes):
if shapes is None:
shape = None
else:
shape = shapes[mIdx]
writeGeometry(fp, mesh, config, shape)
progress.step()
fp.write(' </library_geometries>\n')
def rotateCoord(coord, config):
if config.meshOrientation == 'yUpFaceZ':
pass
elif config.meshOrientation == 'yUpFaceX':
# z,y,-x
coord = np.dstack((coord[:,2],coord[:,1],-coord[:,0]))[0]
elif config.meshOrientation == 'zUpFaceNegY':
# x,z,-y
coord = np.dstack((coord[:,0],-coord[:,2],coord[:,1]))[0]
elif config.meshOrientation == 'zUpFaceX':
# z,x,y
coord = np.dstack((coord[:,2],coord[:,0],coord[:,1]))[0]
return coord
def writeGeometry(fp, mesh, config, shapes=None):
progress = Progress()
progress(0)
coord = mesh.coord + config.offset
coord = rotateCoord(coord, config)
nVerts = len(coord)
fp.write('\n' +
' <geometry id="%sMesh" name="%s">\n' % (mesh.name,mesh.name) +
' <mesh>\n' +
' <source id="%s-Position">\n' % mesh.name +
' <float_array count="%d" id="%s-Position-array">\n' % (3*nVerts,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(co)) for co in coord]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-Position-array" stride="3">\n' % (nVerts,mesh.name) +
' <param type="float" name="X"></param>\n' +
' <param type="float" name="Y"></param>\n' +
' <param type="float" name="Z"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.2)
# Normals
if config.useNormals:
mesh.calcNormals()
vnorm = rotateCoord(mesh.vnorm, config)
nNormals = len(mesh.vnorm)
fp.write(
' <source id="%s-Normals">\n' % mesh.name +
' <float_array count="%d" id="%s-Normals-array">\n' % (3*nNormals,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(no)) for no in vnorm]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-Normals-array" stride="3">\n' % (nNormals,mesh.name) +
' <param type="float" name="X"></param>\n' +
' <param type="float" name="Y"></param>\n' +
' <param type="float" name="Z"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.35)
# UV coordinates
nUvVerts = len(mesh.texco)
fp.write(
' <source id="%s-UV">\n' % mesh.name +
' <float_array count="%d" id="%s-UV-array">\n' % (2*nUvVerts,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f " % tuple(uv)) for uv in mesh.texco]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-UV-array" stride="2">\n' % (nUvVerts,mesh.name) +
' <param type="float" name="S"></param>\n' +
' <param type="float" name="T"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.5, 0.7)
# Faces
fp.write(
' <vertices id="%s-Vertex">\n' % mesh.name +
' <input semantic="POSITION" source="#%s-Position"/>\n' % mesh.name +
' </vertices>\n')
checkFaces(mesh, nVerts, nUvVerts)
progress(0.7, 0.9)
writePolylist(fp, mesh, config)
progress(0.9, 0.99)
fp.write(
' </mesh>\n' +
' </geometry>\n')
if shapes is not None:
shaprog = Progress(len(shapes))
for name,shape in shapes:
writeShapeKey(fp, name, shape, mesh, config)
shaprog.step()
progress(1)
def writeShapeKey(fp, name, shape, mesh, config):
if len(shape.verts) == 0:
log.debug("Shapekey %s has zero verts. Ignored" % name)
return
progress = Progress()
# Verts
progress(0)
target = mesh.coord.copy()
target[:] += config.offset
target[shape.verts] += shape.data[np.s_[...]]
target = rotateCoord(config.scale*target, config)
nVerts = len(target)
fp.write(
' <geometry id="%sMeshMorph_%s" name="%s">\n' % (mesh.name, name, name) +
' <mesh>\n' +
' <source id="%sMeshMorph_%s-positions">\n' % (mesh.name, name) +
' <float_array id="%sMeshMorph_%s-positions-array" count="%d">\n' % (mesh.name, name, 3*nVerts) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(co)) for co in target]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor source="#%sMeshMorph_%s-positions-array" count="%d" stride="3">\n' % (mesh.name, name, nVerts) +
' <param name="X" type="float"/>\n' +
' <param name="Y" type="float"/>\n' +
' <param name="Z" type="float"/>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.3)
# Polylist
nFaces = len(mesh.fvert)
fp.write(
' <vertices id="%sMeshMorph_%s-vertices">\n' % (mesh.name, name) +
' <input semantic="POSITION" source="#%sMeshMorph_%s-positions"/>\n' % (mesh.name, name) +
' </vertices>\n' +
' <polylist count="%d">\n' % nFaces +
' <input semantic="VERTEX" source="#%sMeshMorph_%s-vertices" offset="0"/>\n' % (mesh.name, name) +
#' <input semantic="NORMAL" source="#%sMeshMorph_%s-normals" offset="1"/>\n' % (mesh.name, name) +
' <vcount>')
fp.write( ''.join(["4 " for fv in mesh.fvert]) )
fp.write('\n' +
' </vcount>\n' +
' <p>')
fp.write( ''.join([("%d %d %d %d " % tuple(fv)) for fv in mesh.fvert]) )
fp.write('\n' +
' </p>\n' +
' </polylist>\n' +
' </mesh>\n' +
' </geometry>\n')
progress(1)
#
# writePolylist(fp, mesh, config):
#
def writePolylist(fp, mesh, config):
progress = Progress(2)
nFaces = len(mesh.fvert)
fp.write(
' <polylist count="%d">\n' % nFaces +
' <input offset="0" semantic="VERTEX" source="#%s-Vertex"/>\n' % mesh.name)
if config.useNormals:
fp.write(
' <input offset="1" semantic="NORMAL" source="#%s-Normals"/>\n' % mesh.name +
' <input offset="2" semantic="TEXCOORD" source="#%s-UV"/>\n' % mesh.name +
' <vcount>')
else:
fp.write(
' <input offset="1" semantic="TEXCOORD" source="#%s-UV"/>\n' % mesh.name +
' <vcount>')
fp.write( ''.join(["4 " for fv in mesh.fvert]) )
fp.write('\n' +
' </vcount>\n'
' <p>')
progress.step()
for fn,fv in enumerate(mesh.fvert):
fuv = mesh.fuvs[fn]
if config.useNormals:
fp.write( ''.join([("%d %d %d " % (fv[n], fv[n], fuv[n])) for n in range(4)]) )
else:
fp.write( ''.join([("%d %d " % (fv[n], fuv[n])) for n in range(4)]) )
fp.write(
' </p>\n' +
' </polylist>\n')
progress.step()
#
# checkFaces(mesh, nVerts, nUvVerts):
#
def checkFaces(mesh, nVerts, nUvVerts):
# TODO document: what does this do (apart from slowing down export)?
for fn,fvs in enumerate(mesh.fvert):
for n,vn in enumerate(fvs):
uv = mesh.fuvs[fn][n]
if vn > nVerts:
raise NameError("v %d > %d" % (vn, nVerts))
if uv > nUvVerts:
raise NameError("uv %d > %d" % (uv, nUvVerts))
|
naturalness/unnaturalcode
|
unnaturalcode/testdata/makehuman/makehuman/plugins/9_export_collada/dae_geometry.py
|
Python
|
agpl-3.0
| 9,944
|
from django.utils.datastructures import SortedDict
from xml.etree import ElementTree
import datetime
def response_dict(tree):
if isinstance(tree, basestring):
tree = ElementTree.fromstring(tree)
d = SortedDict()
for child in list(tree):
if len(list(child)):
d[child.tag] = response_dict(child)
else:
d[child.tag] = child.text
return d
def twilio_date(d):
parts = d.split()
date_string = ' '.join(parts[:-1])
format = "%a, %d %b %Y %H:%M:%S"
return datetime.datetime.strptime(date_string, format)
def twilio_offset(d):
parts = d.split()
offset = parts[-1]
return datetime.timedelta(hours = int(offset[:-2]), minutes = int(offset[-2:]))
|
gdoermann/django-twilio
|
twil/util.py
|
Python
|
gpl-2.0
| 730
|
import unittest
from data_structures.stack import Stack
class testPop(unittest.TestCase):
def setUp(self):
self.stack = Stack()
def testEmptyList(self):
self.assertRaises(IndexError, self.stack.pop)
def testListOfOne(self):
self.stack = Stack(1)
self.assertEqual(self.stack.pop().val, 1)
self.stack.push("Hello")
self.assertEqual(self.stack.pop().val, "Hello")
def testLongList(self):
self.stack = Stack(10, 11, 12, 13, 14)
self.assertEqual(self.stack.pop().val, 14)
self.assertEqual(self.stack.pop().val, 13)
def tearDown(self):
self.stack = None
class testPush(unittest.TestCase):
def setUp(self):
self.stack = Stack(10, 11, 12, 13, 14)
def testEmpyList(self):
self.stack = Stack()
self.stack.push(10)
self.assertEqual(self.stack.head.val, 10)
def testListOfOne(self):
self.stack = Stack()
self.stack.push(10)
self.stack.push(11)
self.assertEqual(self.stack.head.val, 11)
def testLongList(self):
self.stack.push(15)
self.assertEqual(self.stack.head.val, 15)
if __name__ == "__main__":
unittest.main()
|
markcharyk/data-structures
|
tests/stack_tests.py
|
Python
|
mit
| 1,215
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Plan'
db.create_table(u'membership_plan', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=50)),
('for_employer', self.gf('django.db.models.fields.BooleanField')(default=True)),
('content_description', self.gf('tinymce.models.HTMLField')()),
))
db.send_create_signal(u'membership', ['Plan'])
# Adding model 'PlanPrice'
db.create_table(u'membership_planprice', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('plan', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.Plan'])),
('price', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=9, decimal_places=2)),
('tag', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('is_free', self.gf('django.db.models.fields.BooleanField')(default=False)),
('duration_magnitude', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('duration_unit', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('total_allowed_job_postings', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
))
db.send_create_signal(u'membership', ['PlanPrice'])
# Adding model 'Membership'
db.create_table(u'membership_membership', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('customer_id', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('cc_last4', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)),
('plan_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.PlanPrice'])),
('coupon_code', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.Coupon'], null=True, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('address', self.gf('django.db.models.fields.CharField')(max_length=200)),
('zip_code', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=0)),
('city', self.gf('django.db.models.fields.CharField')(max_length=100)),
('state', self.gf('django.db.models.fields.CharField')(max_length=100)),
('country', self.gf('django.db.models.fields.CharField')(max_length=2)),
('end_date', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('remaining_job_postings', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
))
db.send_create_signal(u'membership', ['Membership'])
# Adding model 'Coupon'
db.create_table(u'membership_coupon', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=10, blank=True)),
('discount', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('claimed_by', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True, blank=True)),
))
db.send_create_signal(u'membership', ['Coupon'])
def backwards(self, orm):
# Deleting model 'Plan'
db.delete_table(u'membership_plan')
# Deleting model 'PlanPrice'
db.delete_table(u'membership_planprice')
# Deleting model 'Membership'
db.delete_table(u'membership_membership')
# Deleting model 'Coupon'
db.delete_table(u'membership_coupon')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'membership.coupon': {
'Meta': {'object_name': 'Coupon'},
'claimed_by': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10', 'blank': 'True'}),
'discount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'membership.membership': {
'Meta': {'object_name': 'Membership'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'cc_last4': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'coupon_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['membership.Coupon']", 'null': 'True', 'blank': 'True'}),
'customer_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'plan_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['membership.PlanPrice']"}),
'remaining_job_postings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'zip_code': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '0'})
},
u'membership.plan': {
'Meta': {'ordering': "['pk']", 'object_name': 'Plan'},
'content_description': ('tinymce.models.HTMLField', [], {}),
'for_employer': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'membership.planprice': {
'Meta': {'object_name': 'PlanPrice'},
'duration_magnitude': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'duration_unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_free': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['membership.Plan']"}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '9', 'decimal_places': '2'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'total_allowed_job_postings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
}
}
complete_apps = ['membership']
|
hellhound/dentexchange
|
dentexchange/apps/membership/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 11,269
|
#-------------------------------------------------------------------------------
# Name: Circle and Sphere Generator
# Purpose: Generates a file with circles and spheres for an assignment
#
# Author: Mr. Seidel
#
# Created: 08/10/2015
# Copyright: (c) Mr. Andrew Seidel 2015
# Licence: GPL 3.0
#-------------------------------------------------------------------------------
import random
random.seed()
numberOfObjects = int(random.random() * 1000) + 1000
outputString = str(numberOfObjects) + "\n"
for i in range(numberOfObjects):
x = int(random.random() * 1000) - 500
y = int(random.random() * 500) - 250
z = int(random.random() * 250) - 125
radius = int(random.random() * 50) + 25
if (int(random.random() * 100) % 2 == 0):
outputString += "Sphere:" + str(radius) + ":" + str(x) + "," + str(y) + "," + str(z) + "\n"
else:
outputString += "Circle:" + str(radius) + ":" + str(x) + "," + str(y) + ",0" + "\n"
outputString += "*"
file = open("input.txt", "w")
file.writelines(outputString)
file.close()
|
mrseidel-classes/ICS4U-Code
|
files/generateInputText.py
|
Python
|
gpl-3.0
| 1,072
|
import zstackwoodpecker.test_state as ts_header
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template2",
path_list=[[TestAction.create_volume, "volume1","=scsi"], \
[TestAction.delete_volume, "volume1"], \
[TestAction.create_volume, "volume2","=scsi"], \
[TestAction.delete_volume, "volume2"], \
[TestAction.create_volume, "volume3","=scsi"], \
[TestAction.delete_volume, "volume3"], \
[TestAction.create_volume, "volume4","=scsi"], \
[TestAction.delete_volume, "volume4"], \
[TestAction.create_volume, "volume5","=scsi"], \
[TestAction.delete_volume, "volume5"], \
[TestAction.create_volume, "volume6","=scsi"], \
[TestAction.delete_volume, "volume6"], \
[TestAction.create_volume, "volume7","=scsi"], \
[TestAction.delete_volume, "volume7"], \
[TestAction.create_volume, "volume8","=scsi"], \
[TestAction.delete_volume, "volume8"], \
[TestAction.migrate_vm, "vm1"], \
[TestAction.migrate_vm, "vm1"], \
[TestAction.create_volume, "volume9","=scsi"], \
[TestAction.attach_volume, "vm1", "volume9"], \
[TestAction.create_volume_backup, "volume9", "backup1"], \
[TestAction.detach_volume, "volume9"], \
[TestAction.create_volume_snapshot, "vm1-root", "snapshot1"], \
[TestAction.create_data_vol_template_from_volume, "volume9", "image1"],\
[TestAction.attach_volume, "vm1", "volume9"], \
[TestAction.detach_volume, "volume9"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.reinit_vm, "vm1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.attach_volume, "vm1", "volume9"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_volume_backup, "backup1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.reboot_vm, "vm1"]])
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/multihosts/volumes/paths/path112.py
|
Python
|
apache-2.0
| 1,747
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the-
# LICENSE file in the root directory of this source tree.
#: Whether the price expiration should be calculated per product
#: instead of per shop
SHUUP_DISCOUNTS_PER_PRODUCT_EXPIRATION_DATES = False
|
shoopio/shoop
|
shuup/discounts/settings.py
|
Python
|
agpl-3.0
| 397
|
"""URLs to run the tests."""
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import ListView
from ..models import Review
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^review-listing/', ListView.as_view(model=Review),
name='review_list'),
url(r'^admin/', include(admin.site.urls)),
url(r'^umedia/', include('user_media.urls')),
url(r'^review/', include('review.urls')),
)
|
un33k/django-review
|
review/tests/urls.py
|
Python
|
mit
| 476
|
from flask import render_template, flash, redirect, request, url_for, abort
from flask_login import login_user, logout_user, login_required, current_user
from . import loans
from forms import LoanApplicationForm, ApproveLoan, RepayLoan
from ..models import db
from ..models import Loan, User
from datetime import date
@loans.route('/new_loan', methods=['GET', 'POST'])
@login_required
def request_loan():
if not current_user.is_borrower:
abort(403)
elif not current_user.is_approved:
abort(404)
elif current_user.is_owing:
flash('You cannot request a new loan if your still due!')
return redirect(url_for('loans.view'))
elif current_user.has_requested_loan:
flash('You cannot request a new loan if your last loan hasnt been approved!')
return redirect(url_for('loans.view'))
else:
form = LoanApplicationForm()
if form.validate_on_submit():
loan = Loan(loan_amt=form.loan_amt.data,
user=current_user._get_current_object())
if loan.loan_amt > loan.user.max_credit_amt:
flash('You can only borrow to a maximum of %s' %
loan.user.max_credit_amt)
return redirect(url_for('loans.request_loan'))
loans.is_requested = True
loan.user.has_requested_loan = True
db.session.add(loan)
db.session.commit()
flash(
'Success.Your Loan Application has been submitted.View it below.')
return redirect(url_for('loans.view'))
return render_template('loans/request_loan.html',
form=form, title="New Loan")
@loans.route('/view_history')
@login_required
def view():
if not current_user.is_borrower:
abort(403)
if not current_user.is_approved:
abort(404)
else:
loans = (Loan.query
.filter(Loan.user_id == current_user.id)
.order_by(Loan.requested_on.desc())
).all()
return render_template('loans/view.html',
loans=loans, title="My Loan Reqests")
@loans.route('/view_payment_history')
@login_required
def view_payment_history():
if not current_user.is_borrower:
abort(403)
if not current_user.is_approved:
abort(404)
else:
loans = (Loan.query
.filter(Loan.user_id == current_user.id)
.order_by(Loan.requested_on.desc())
).all()
return render_template('loans/view-payments.html',
loans=loans, title="My Loan Reqests")
'''View for if the user is credit worthy and can now borrow'''
@loans.route('/repay/loan/<id>', methods=['GET', 'POST'])
@login_required
def repay_loan(id):
if not current_user.is_borrower:
abort(403)
loan = Loan.query.filter_by(id=id).first()
if loan is None:
abort(404)
if not loan.is_approved:
flash('You cannot repay a loan that hasnt been approved')
return redirect(url_for('loans.view'))
else:
form = RepayLoan()
if current_user.is_borrower and form.validate_on_submit():
loan.my_funds = form.my_funds.data
flash('Your payment has been received. Please wait while we confirm it.')
return redirect(url_for('loans.view'))
return render_template('loans/repay-loan.html', form=form, loan=loan)
@loans.route('/clear/loan/balance/<id>', methods=['GET', 'POST'])
@login_required
def clear_loan_balance(id):
if not current_user.is_borrower:
abort(403)
loan = Loan.query.filter_by(id=id).first()
if loan is None:
abort(404)
if not loan.is_approved:
flash('You cannot repay a loan that hasnt been approved')
return redirect(url_for('loans.view'))
form = RepayLoan()
if current_user.is_borrower and form.validate_on_submit():
loan.my_funds = form.my_funds.data
flash('Your payment has been received. Please wait while we confirm it.')
return redirect(url_for('loans.view'))
return render_template('loans/repay-loan.html', form=form, loan=loan)
|
Kimanicodes/wananchi
|
app/loans/views.py
|
Python
|
mit
| 4,187
|
from __future__ import division
import numpy as np
import matplotlib
#matplotlib.use("Agg")
import matplotlib.pyplot as plt
import sys
import pylab
import re
import file_loader as load
import matplotlib.patheffects
from matplotlib.font_manager import FontProperties
from mpl_toolkits.axes_grid.anchored_artists import AnchoredSizeBar
import os
## WIP!!
#reads a special kind of data file printed by protein_microscopy.cpp
#format is:
# --------
# proteinType1 boxName1 n(t=0) n(t=1) n(t=2) ...
# proteinType1 boxName2 n(t=0) n(t=1) n(t=2) ...
# proteinType1 boxName3 n(t=0) n(t=1) n(t=2) ...
#
# proteinType2 boxName1 n(t=0) n(t=1) n(t=2) ...
# proteinType2 boxName2 n(t=0) n(t=1) n(t=2) ...
# proteinType2 boxName3 n(t=0) n(t=1) n(t=2) ...
# --------
#where n(t) is the number of proteins of one type at time t
#opens the file and grabs a particular line matching proteinType and
#boxName. returns list of protein counts at each time.
#box-plot--p-0.50-0.50-0.00-0.00-15.00-exact.dat
sim_type = load.sim_type
def returnData(boxName,proteinType):
#open the data file, grab the line with the correct protein type and box partition, load it as a [string] (so we can use list comprehensions)
filename = job_string + 'box-plot.dat'
with open(filename,"r") as boxData:
proteinsOverTime = [line for line in boxData if (proteinType in line) and (boxName in line)]
#format the string so that it is a list of numbers (split on tab, pop off keywords and newlines, convert str -> float)
proteinsOverTime = proteinsOverTime[0].split('\t')
proteinsOverTime = proteinsOverTime[2:-1]
proteinsOverTime = [float(i) for i in proteinsOverTime]
return proteinsOverTime
#takes input format:
#["proteinType1-boxNum1","proteinType1-boxnum2",proteinType2-boxnum1"...]. will
#return a list of lists in the stacking order specified by the input
#(first entry is at the bottom).
def stackData(plotList):
#parse the input
tempList = []
for proteinData in plotList:
protein, boxName = proteinData.split('-')
tempList += [returnData(boxName,protein)]
#"stack" the lists
stackedPlotList = [tempList[0]]
for i in range(1,len(tempList)):
stackedPlotList += [[j+k for (j,k) in zip(stackedPlotList[i-1],tempList[i])]]
output = np.array(stackedPlotList)
return output/output[len(output[:,0])-1, 0] # normalize output as a fraction of total
def find_period(f):
"""
Find the period of a function that is described by the input
array f, and return indices for a start and end range for one
period. If we cannot find the period accurately, just return
the entire range.
"""
f = -f
# first we look at the fft to get a guess at the period (probably
# not *too* accurate or too bad).
fk = np.fft.fft(f)
fk[0] = 0
kmax = 1
fkmax = np.abs(fk[:int(len(fk)/2)]).max()
for i in xrange(1,int(len(fk)/2)):
if np.abs(fk[i]) == fkmax:
kmax = i
break
#print 'kmax is', kmax
period_estimate = len(f)/kmax
#plt.plot(np.abs(fk))
#plt.figure()
if kmax < 5:
return (0, len(f))
# now we locate the final minimum of the function.
lastmin = len(f)-2
while f[lastmin] > f[lastmin+1] or f[lastmin] > f[lastmin-1]:
lastmin -= 1
# and last (but not least), we locate the second-to-last
# (penultimate) minimum, which should have a very similar value to
# the final minimum.
penultimate_min = lastmin - int(period_estimate*.7)
while f[penultimate_min] > f[penultimate_min+1] or f[penultimate_min] > f[penultimate_min-1] or np.abs(f[penultimate_min]/f[lastmin]-1) > 0.01:
penultimate_min -= 1
#return (0, len(f) - 1)
if penultimate_min < 0:
return (0, len(f))
return (penultimate_min, lastmin)
def main():
filename = job_string + 'box-plot.dat'
print "loading ",filename
with open(filename, "r") as boxData:
fileLines = boxData.readlines()
#get number of boxes and protein types. little hokey but it works. in boxData.readlines(), there is exactly one '\n' newline string
#for each protein type block. therefor, the number of protein types is equal to the number of times "\n" appears by itself in the list.
numProteinTypes = len([line for line in fileLines if line=="\n"])
numNewLines = numProteinTypes
#it follows that the total number of lines in the data file, minus the number of blank lines in the data file, is equal to
#the number of protein types * the number of box types. divide by number of protein types to get number of box types.
numBoxes = (len(fileLines) - numNewLines)/numProteinTypes
#grab the names of the proteins used, and the names of the boxes
proteinTypeList = []
boxList = []
for line in fileLines:
if (line != "\n"):
proteinTypeList += [line.split("\t")[0]]
boxList += [line.split("\t")[1]]
#prune duplicates
proteinTypeList = list(set(proteinTypeList))
boxList = list(set(boxList))
#generate list of proteinType and box combinations to feed into stackData
plotNameList_D = []
plotNameList_E = []
numProteinTypes_D = 0
numProteinTypes_E = 0
for box in boxList:
for proteinType in proteinTypeList:
if "D_" in proteinType:
plotNameList_D += ["%s-%s"%(box,proteinType)]
if "E_" in proteinType:
plotNameList_E += ["%s-%s"%(box,proteinType)]
#print ""
#print "plotNameList before ", plotNameList_D, "\n"
new_plotNameList_D = [0]*len(plotNameList_D)
P_Ord = [3,0,2,1,7,4,6,5,11,8,10,9]
if load.f_param4 == '97.00':
P_Ord = [3,0,2,1,11,8,10,9,15,12,14,13,7,4,6,5]
if load.f_param4 == '96.00':
P_Ord = [15,12,14,13,3,0,2,1,7,4,6,5,11,8,10,9]
for i in range(len(P_Ord)):
new_plotNameList_D[i] = plotNameList_D[P_Ord[i]]
for i in range(len(plotNameList_D)):
plotNameList_D[i] = new_plotNameList_D[i]
#print "plotNameList after ",plotNameList_D,"\n"
plotProteinLabels = ['MinD:ATP (cyto)','MinD:ATP (mem)','MinE:MinD:ATP','MinD:ADP (cyto)']
#pass plotNameList through stackData to generate the list of line data to be plotted
plotCurveList_D = stackData(plotNameList_D)
plotCurveList_E = stackData(plotNameList_E)
#get a time axis for the plot from the length of one of the data sets we have
difD = 2.5 # (um)^2 s^- 1
time_step = .1*load.dx*load.dx/difD #sec
print_denominator = 1000 #This is from the c++ I wanted to format things the same here.
box_time_step = time_step*print_denominator
timeAxis = np.linspace(0,box_time_step*len(plotCurveList_D[0]),len(plotCurveList_D[0]))
#begin messy code (to deal with matplotlib) - don't judge me
# start_time_as_frac_of_ten = float(sys.argv[8])
# end_time_as_frac_of_ten = float(sys.argv[9])
# tot_time = float(len(plotCurveList_D[0]))*box_time_step
start = int(float(sys.argv[8])/box_time_step)#int(tot_time*start_time_as_frac_of_ten/10.0/box_time_step)
end = int(float(sys.argv[9])/box_time_step)#int(tot_time*end_time_as_frac_of_ten/10.0/box_time_step)
# (start, end) = find_period(plotCurveList_D[len(plotCurveList_D)-2])
# (start, end) = find_period(np.array(returnData(boxList[len(boxList)-1], 'D_ND')))
# start = end - 2*(end-start)
# end = int(tot_time/box_time_step)
# if (load.f_shape == 'triangle') or (load.f_param4 == '95.00') or (load.f_param4 == '94.00'):
# start = 0
# end = int(tot_time*.5/box_time_step)
# if ((load.f_param2 == '14.00' or load.f_param2 == '5.50') and load.f_param4 == '99.00'):
# start = 0
# end = int(tot_time/box_time_step)
# periods_file = open('periods.txt','a')
# periods_file.write('Box period= '+str(box_time_step*(end-start)) +' for simulation '+load.f_shape+
# ' '+load.f_param1+' '+load.f_param2+' '+load.f_param3+' '+load.f_param4+' '+
# load.f_param5+', with start = '+str(start*box_time_step)+' and end = '+str(end*box_time_step)+'\n')
# periods_file.close()
# print useful coordination data
period = timeAxis[end-1] - timeAxis[start]
print 'period is', period
firsttime = timeAxis[start]
while firsttime > 9*period:
firsttime -= period
print 'early start time is', firsttime
print 'and end time is ',firsttime+period
print 'and file numbers are', firsttime*2, 'and', (firsttime+period)*2
# now offset time so it starts at zero
timeAxis = timeAxis - timeAxis[start]
#print set(plotCurveList_D[1]).union(set(plotCurveList_D[2]))
#get num on each plot
for proteinType in proteinTypeList:
if "D_" in proteinType:
numProteinTypes_D += 1
if "E_" in proteinType:
numProteinTypes_E +=1
# plot scales. colors limited for now.
# colorScale = ["b","g","r","c","m","y"]
# The tuples elements here are the amount of R,G,B in the color, respectively, on a scale 0-1
col_amount = 1.0
colorScale = ["b",(0.0,0.0,col_amount),(col_amount,0.0,0.0),(0.0,col_amount,0.0),"m","y"]
# alphaScale_D = [n/numProteinTypes for n in range(0,numProteinTypes_D+1)]
alphaScale_D = [0.1,0.25,0.50,1.00]
alphaScale_E = [n/numProteinTypes for n in range(0,numProteinTypes_E+1)]
#generate the plot
#f, (bax,sectionax) = plt.subplots(1, 2)
bax = plt.subplot2grid((2,5), (0,0), colspan=4, rowspan=2)
sectionax = plt.subplot2grid((2,5), (0,4), colspan=1,rowspan=2)
# first plot the section data...
filename = job_string + 'sections.dat'
sectiondata = np.loadtxt(filename)
def plot_sections(sectionax, sectiondata):
dx = load.dx
x = np.arange(sectiondata.shape[1]*1.0)*dx
y = np.arange(sectiondata.shape[0]*1.0)*dx
X,Y = np.meshgrid(x,y)
inmembrane = np.zeros_like(sectiondata)
inmembrane[sectiondata>0] = 1.0
xmax = X[sectiondata>0].max()
xmin = X[sectiondata>0].min()
ymax = Y[sectiondata>0].max()
ymin = Y[sectiondata>0].min()
ymean = (Y*inmembrane).sum()/inmembrane.sum()
xmean = (X*inmembrane).sum()/inmembrane.sum()
yweighted = (Y*sectiondata).sum()/sectiondata.sum()
xweighted = (X*sectiondata).sum()/sectiondata.sum()
levels = [0.5, 1.5, 2.5, 3.5, 4.5]
mycolors = ["w","g","r","m","c","y"]
for i in xrange(min(4, len(boxList))):
if boxList[i] == 'Right':
mycolors[1] = colorScale[i]
if boxList[i] == 'Mid':
mycolors[2] = colorScale[i]
if boxList[i] == 'Left':
mycolors[3] = colorScale[i]
mycolors = colorScale[1:]
if load.f_param4 == '97.00':
mycolors = ['g','r','m','c']
if load.f_param4 == '96.00':
#rightup = 2, rightdown = 1, leftup = 4, leftdown = 3
mycolors = ['g','r','c','m']
#print mycolors
# here we rotate so that the order of sections will match the
# box plot.
xdir, ydir = xweighted - xmean, yweighted - ymean
xdir, ydir = xdir/np.sqrt(xdir**2+ydir**2), ydir/np.sqrt(xdir**2+ydir**2)
extraxspace = 0.5
extrayspace = 0
Yrotated = X*xdir + Y*ydir
Xrotated = X*ydir - Y*xdir
sectionax.contourf(Xrotated, Yrotated, sectiondata, levels=levels, colors=mycolors)
xmin = Xrotated[sectiondata>0].min()
xmax = Xrotated[sectiondata>0].max()
ymin = Yrotated[sectiondata>0].min()
ymax = Yrotated[sectiondata>0].max()
sectionax.set_xlim(xmin-extraxspace, xmax)
sectionax.set_ylim(ymin-extrayspace, ymax)
sectionax.set_aspect('equal')
sectionax.set_frame_on(False)
sectionax.axes.get_xaxis().set_visible(False)
sectionax.axes.get_yaxis().set_visible(False)
sectionax.add_artist(AnchoredSizeBar(
sectionax.transData,
2.13, # length of the bar in the data reference
"2.13$\mu$", # label of the bar
# bbox_to_anchor=(0.,0.,1.,1.),
loc=8, # 'best', # location (lower right)
pad=-(ymax-ymin)/2.0 -.4, borderpad=0.25, sep=3,
frameon=False
))
plot_sections(sectionax, sectiondata)
section_names = ['Bottom Section','Center Section','Top Section']
if load.f_param4 == '97.00':
section_names = ['Lower Section','Middle Left Section','Middle Right Section','Upper Section']
# section_names = ['rightup','mid','left','rightdown']
if load.f_param4 == '96.00':
section_names = ['Lower Left Section','Lower Right Section','Upper Left Section','Upper Right Section']
# section_names = ['rightdown','rightup','leftdown','leftup']
font=FontProperties()
font.set_family('serif')
text_adjust = -.2*box_time_step*(end-start)
j=0
k=0
for i in range(len(plotCurveList_D[:,0])):
if i%(numProteinTypes_D)==0:
j+=1
k=0
if i==0:
bax.plot(timeAxis[start:end],
plotCurveList_D[i, start:end],
color=colorScale[j],alpha=alphaScale_D[k])
y_text_label = i*.8/len(plotCurveList_D[:,0]) + .1*np.floor(i/numProteinTypes_D)
if load.f_param4 == '97.00' or load.f_param4 == '96.00':
y_text_label = i*.8/len(plotCurveList_D[:,0]) + .07*np.floor(i/numProteinTypes_D)
y_label = (plotCurveList_D[i, start+int(1/box_time_step)])/2.0
bax.annotate('%s'%plotProteinLabels[i],xy=(1,y_label),xytext=(text_adjust,y_text_label),
fontsize=7,
fontproperties=font,
arrowprops=dict(facecolor='black',shrink=0.05, width=.3, headwidth=5.))
bax.fill_between(timeAxis[start:end],
[0 for x in range(len(timeAxis))[start:end]],
plotCurveList_D[i, start:end],
alpha=alphaScale_D[k],facecolor=colorScale[j])
elif i!=0:
if i == 1:
k+=1
bax.plot(timeAxis[start:end],
plotCurveList_D[i,start:end],
color=colorScale[j],alpha=alphaScale_D[k])
y_text_label = i*.8/len(plotCurveList_D[:,0]) + .1*np.floor(i/numProteinTypes_D)
y_label = (plotCurveList_D[i, start+int(1/box_time_step)] + plotCurveList_D[i-1, start+int(1/box_time_step)])/2.0
if load.f_param4 == '97.00' or load.f_param4 == '96.00':
y_text_label = i*.8/len(plotCurveList_D[:,0]) + .07*np.floor(i/numProteinTypes_D)
bax.annotate('%s'%plotProteinLabels[i%numProteinTypes_D],xy=(1,y_label),xytext=(text_adjust,y_text_label),
fontsize=7,
fontproperties=font,
arrowprops=dict(facecolor='black',shrink=0.05, width=.3, headwidth=5.))
if (i+1)%(numProteinTypes_D)==0:
bax.text(-0.2,y_text_label+.04,section_names[int(np.floor(i/numProteinTypes_D))],transform=bax.transAxes,fontsize=9,fontproperties=font,)
bax.fill_between(timeAxis[start:end],
plotCurveList_D[i-1, start:end],
plotCurveList_D[i, start:end],
alpha=alphaScale_D[k],facecolor=colorScale[j])
k+=1
bax.set_xlim(timeAxis[start],timeAxis[end-1])
print timeAxis[start]
bax.set_xticklabels(np.arange(timeAxis[start]+100, timeAxis[end-1]+100, 0.5))
#bax.set_xticklabels(np.arange(0, 10, 0.5))
bax.get_yaxis().set_visible(False)
bax.set_ylim(0, 1)
bax.set_title("MinD protein counts over time")
bax.set_xlabel("Time (s)")
rax = bax.twinx()
rax.set_ylabel('Fraction of proteins in each stage and section',labelpad=-15)
rax.yaxis.set_ticklabels([0,"","","","",1.0])
#bax.set_ylabel("Fraction of proteins")
# 'A', xy=(Az, Ax), xytext=(1.2,-3.5),
# path_effects=texteff,
# arrowprops=dict(shrink=0.01, width=1,
# headwidth=hw, path_effects=arroweff))
#bax.legend(plotNameList_D,bbox_to_anchor=(0.3,-0.05,1.0,1.0),loc=4,prop={'size':8}).draw_frame(False)
print load.print_string("box-plot_D","")
plt.savefig(load.print_string("box-plot_D",""))
#plt.show()
plt.figure()
#f, (bax,sectionax) = plt.subplots(1, 2)
bax = plt.subplot2grid((2,5), (0,0), colspan=4, rowspan=2)
sectionax = plt.subplot2grid((2,5), (0,4), colspan=1,rowspan=2)
# First plot the section data...
plot_sections(sectionax, sectiondata)
j=0
k=0
for i in range(len(plotCurveList_E)):
if i%(numProteinTypes_E)==0:
j+=1
k=0
if i==0:
bax.plot(timeAxis[start:end],plotCurveList_E[i][start:end],color=colorScale[j],alpha=alphaScale_E[k])
bax.fill_between(timeAxis[start:end],[0 for x in range(len(timeAxis))[start:end]],plotCurveList_E[i][start:end],alpha=alphaScale_E[k],facecolor=colorScale[j])
elif i!=0:
bax.plot(timeAxis[start:end],plotCurveList_E[i][start:end],color=colorScale[j],alpha=alphaScale_E[k])
bax.fill_between(timeAxis[start:end],plotCurveList_E[i-1][start:end],plotCurveList_E[i][start:end],alpha=alphaScale_E[k],facecolor=colorScale[j])
#print "i is ",i," || k is", k," || j is",j
k+=1
bax.set_xlim(timeAxis[start],timeAxis[end-1])
bax.set_ylim(0, 1)
bax.set_title("MinE protein counts over time")
bax.set_xlabel("Time (s)")
bax.set_ylabel("Fraction of proteins")
bax.legend(plotNameList_E,bbox_to_anchor=(0.3,-0.05,1.0,1.0),loc="lower right",prop={'size':8}).draw_frame(False)
plt.savefig(load.print_string("box-plot_E",""))
return 0
job_string = "data/shape-%s/%s-%s-%s-%s-%s-%s/" % (load.f_shape,load.f_param1,load.f_param2,
load.f_param3,load.f_param4,load.f_param5,sim_type)
print job_string
print sim_type
p = re.compile('[.]')
job_string = p.sub('_',job_string)
dir_name = job_string + 'plots'
if not os.path.exists(dir_name):
print "making directory "+dir_name+" because doesnt exist"
os.makedirs(dir_name)
if __name__ == '__main__':
main()
|
droundy/protein
|
pyplots/box_plot.py
|
Python
|
mit
| 18,421
|
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,sys,json,time,xbmc
from resources.lib.libraries import control
from resources.lib.libraries import subtitles
from resources.lib.libraries import bookmarks
from resources.lib.libraries import trakt
class player(xbmc.Player):
def __init__ (self):
xbmc.Player.__init__(self)
def run(self, content, name, url, year, imdb, tvdb, meta):
if control.window.getProperty('PseudoTVRunning') == 'True':
return control.player.play(url, control.item(path=url))
self.getVideoInfo(content, name, year, imdb, tvdb)
if self.folderPath.startswith('plugin://') and not meta == None:
poster, thumb, meta = self.getMeta(meta)
else:
poster, thumb, meta = self.getLibraryMeta()
item = control.item(path=url, iconImage='DefaultVideo.png', thumbnailImage=thumb)
item.setInfo(type='Video', infoLabels = meta)
try: item.setArt({'poster': poster, 'tvshow.poster': poster, 'season.poster': poster})
except: pass
item.setProperty('Video', 'true')
item.setProperty('IsPlayable', 'true')
control.player.play(url, item)
for i in range(0, 240):
if self.isPlayingVideo(): break
xbmc.sleep(1000)
while self.isPlayingVideo():
try: self.totalTime = self.getTotalTime()
except: pass
try: self.currentTime = self.getTime()
except: pass
xbmc.sleep(1000)
control.window.clearProperty('script.trakt.ids')
time.sleep(5)
def getVideoInfo(self, content, name, year, imdb, tvdb):
try:
self.loadingTime = time.time()
self.totalTime = 0 ; self.currentTime = 0
self.folderPath = control.infoLabel('Container.FolderPath')
self.name = name ; self.year = year ; self.content = content
self.file = self.name + '.strm'
self.file = self.file.translate(None, '\/:*?"<>|').strip('.')
self.imdb = 'tt' + imdb if imdb.isdigit() else imdb
self.tvdb = tvdb if not tvdb == None else '0'
except:
pass
try:
if self.content == 'movie':
self.title = re.compile('(.+?) [(]\d{4}[)]$').findall(self.name)[0]
elif self.content == 'episode':
self.tvshowtitle, self.season, self.episode = re.compile('(.+?) S(\d*)E(\d*)$').findall(self.name)[0]
self.season, self.episode = '%01d' % int(self.season), '%01d' % int(self.episode)
self.file2 = '%s (%s) S%02dE%02d.strm' % (self.tvshowtitle.translate(None, '\/:*?"<>|'), self.year, int(self.season), int(self.episode))
except:
pass
try:
if control.setting('resume_playback') == 'true':
self.offset = bookmarks.getBookmark(self.name, self.imdb)
if self.offset == '0': raise Exception()
minutes, seconds = divmod(float(self.offset), 60) ; hours, minutes = divmod(minutes, 60)
yes = control.yesnoDialog('%s %02d:%02d:%02d' % (control.lang(30461).encode('utf-8'), hours, minutes, seconds), '', '', self.name, control.lang(30463).encode('utf-8'), control.lang(30462).encode('utf-8'))
if yes: self.offset = '0'
except:
pass
try:
if self.content == 'movie':
control.window.setProperty('script.trakt.ids', json.dumps({'imdb': self.imdb}))
elif self.content == 'episode':
control.window.setProperty('script.trakt.ids', json.dumps({'tvdb': self.tvdb}))
except:
pass
def getMeta(self, meta):
try:
meta = json.loads(meta)
poster = meta['poster'] if 'poster' in meta else '0'
thumb = meta['thumb'] if 'thumb' in meta else poster
if poster == '0': poster = control.addonPoster()
return (poster, thumb, meta)
except:
poster, thumb, meta = '', '', {'title': self.name}
return (poster, thumb, meta)
def getLibraryMeta(self):
try:
if self.content == 'movie':
meta = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["title", "originaltitle", "year", "genre", "studio", "country", "runtime", "rating", "votes", "mpaa", "director", "writer", "plot", "plotoutline", "tagline", "thumbnail", "file"]}, "id": 1}' % (self.year, str(int(self.year)+1), str(int(self.year)-1)))
meta = unicode(meta, 'utf-8', errors='ignore')
meta = json.loads(meta)['result']['movies']
meta = [i for i in meta if i['file'].endswith(self.file)][0]
for k, v in meta.iteritems():
if type(v) == list:
try: meta[k] = str(' / '.join([i.encode('utf-8') for i in v]))
except: meta[k] = ''
else:
try: meta[k] = str(v.encode('utf-8'))
except: meta[k] = str(v)
self.DBID = meta['movieid'] ; poster = thumb = meta['thumbnail']
meta = {'title': meta['title'], 'originaltitle': meta['originaltitle'], 'year': meta['year'], 'genre': meta['genre'], 'studio' : meta['studio'], 'country' : meta['country'], 'duration' : meta['runtime'], 'rating': meta['rating'], 'votes': meta['votes'], 'mpaa': meta['mpaa'], 'director': meta['director'], 'writer': meta['writer'], 'plot': meta['plot'], 'plotoutline': meta['plotoutline'], 'tagline': meta['tagline']}
elif self.content == 'episode':
meta = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["title", "season", "episode", "showtitle", "firstaired", "runtime", "rating", "director", "writer", "plot", "thumbnail", "file"]}, "id": 1}' % (self.season, self.episode))
meta = unicode(meta, 'utf-8', errors='ignore')
meta = json.loads(meta)['result']['episodes']
match = [i for i in meta if i['file'].endswith(self.file2)]
match += [i for i in meta if i['file'].endswith(self.file)]
meta = match[0]
for k, v in meta.iteritems():
if type(v) == list:
try: meta[k] = str(' / '.join([i.encode('utf-8') for i in v]))
except: meta[k] = ''
else:
try: meta[k] = str(v.encode('utf-8'))
except: meta[k] = str(v)
self.DBID = meta['episodeid'] ; thumb = meta['thumbnail'] ; showtitle = meta['showtitle']
meta = {'title': meta['title'], 'season' : meta['season'], 'episode': meta['episode'], 'tvshowtitle': meta['showtitle'], 'premiered' : meta['firstaired'], 'duration' : meta['runtime'], 'rating': meta['rating'], 'director': meta['director'], 'writer': meta['writer'], 'plot': meta['plot']}
poster = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter": {"field": "title", "operator": "is", "value": "%s"}, "properties": ["thumbnail"]}, "id": 1}' % showtitle)
poster = unicode(poster, 'utf-8', errors='ignore')
poster = json.loads(poster)['result']['tvshows'][0]['thumbnail']
return (poster, thumb, meta)
except:
poster, thumb, meta = '', '', {'title': self.name}
return (poster, thumb, meta)
def setWatchedStatus(self):
if self.content == 'movie':
try:
control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid" : %s, "playcount" : 1 }, "id": 1 }' % str(self.DBID))
if not self.folderPath.startswith('plugin://'): control.refresh()
except:
pass
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
metaget.get_meta('movie', self.title ,year=self.year)
metaget.change_watched(self.content, '', self.imdb, season='', episode='', year='', watched=7)
except:
pass
try:
if trakt.getTraktAddonMovieInfo() == False: trakt.markMovieAsWatched(self.imdb)
trakt.syncMovies()
except:
pass
elif self.content == 'episode':
try:
control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid" : %s, "playcount" : 1 }, "id": 1 }' % str(self.DBID))
if not self.folderPath.startswith('plugin://'): control.refresh()
except:
pass
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
metaget.get_meta('tvshow', self.tvshowtitle, imdb_id=self.imdb)
metaget.get_episode_meta(self.tvshowtitle, self.imdb, self.season, self.episode)
metaget.change_watched(self.content, '', self.imdb, season=self.season, episode=self.episode, year='', watched=7)
except:
pass
try:
if trakt.getTraktAddonEpisodeInfo() == False: trakt.markEpisodeAsWatched(self.tvdb, self.season, self.episode)
trakt.syncTVShows()
except:
pass
def onPlayBackStarted(self):
for i in range(0, 200):
if control.condVisibility('Window.IsActive(busydialog)') == 1: control.idle()
else: break
control.sleep(100)
if control.setting('playback_info') == 'true':
elapsedTime = '%s %s %s' % (control.lang(30464).encode('utf-8'), int((time.time() - self.loadingTime)), control.lang(30465).encode('utf-8'))
control.infoDialog(elapsedTime, heading=self.name)
try:
if self.offset == '0': raise Exception()
self.seekTime(float(self.offset))
except:
pass
try:
if not control.setting('subtitles') == 'true': raise Exception()
try: subtitle = subtitles.get(self.name, self.imdb, self.season, self.episode)
except: subtitle = subtitles.get(self.name, self.imdb, '', '')
except:
pass
def onPlayBackStopped(self):
try:
bookmarks.deleteBookmark(self.name, self.imdb)
ok = int(self.currentTime) > 180 and (self.currentTime / self.totalTime) <= .92
if ok: bookmarks.addBookmark(self.currentTime, self.name, self.imdb)
except:
pass
try:
ok = self.currentTime / self.totalTime >= .8
if ok: self.setWatchedStatus()
except:
pass
def onPlayBackEnded(self):
self.onPlayBackStopped()
|
azumimuo/family-xbmc-addon
|
plugin.video.specto/resources/lib/libraries/player.py
|
Python
|
gpl-2.0
| 12,057
|
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of PyBOMBS
#
# PyBOMBS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# PyBOMBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyBOMBS; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Package Manager: Manages packages (no shit)
"""
from pybombs import pb_logging
from pybombs.pb_exception import PBException
from pybombs.config_manager import config_manager
from pybombs import recipe
from pybombs import packagers
from pybombs.utils import utils
INSTALL_TYPES = ("any", "source", "binary")
class PackageManagerCache(object):
" Remember what's installed and installable "
def __init__(self):
# Dict: key == package name, value == boolean install status
# If key doesn't exist, we don't know the install/installable status
self.known_installable = {}
# Dict install_type -> dict: package name -> install status
self.known_installed = {k: {} for k in INSTALL_TYPES}
PACKAGE_MANAGER_CACHE = PackageManagerCache()
def _get_valid_install_type(install_type):
" The return value is a valid install type. "
if install_type is None:
return "any"
assert install_type in INSTALL_TYPES
return install_type
class PackageManager(object):
"""
Meta-package manager. This will determine, according to our system
and the configuration, who takes care of managing packages and
then dispatches specific package managers. For example, this might
dispatch an apt backend on Ubuntu and Debian systems, or a
yum/dnf backend on Fedora systems.
"""
def __init__(self,):
# Set up logger:
self.log = pb_logging.logger.getChild("PackageManager")
self.cfg = config_manager
self.pmc = PACKAGE_MANAGER_CACHE
self.prefix_available = self.cfg.get_active_prefix().prefix_dir is not None
# Create a source package manager
if self.prefix_available:
self.src = packagers.Source()
self.prefix = self.cfg.get_active_prefix()
else:
self.log.debug("No prefix specified. Skipping source package manager.")
self.src = None
# Create sorted list of binary package managers
self.binary_pkgrs = packagers.filter_available_packagers(
self.cfg.get('packagers'),
packagers.__dict__.values(),
self.log
)
# Now we can use self.binary_pkgrs, in order, for our commands.
def check_package_flag(self, pkgname, flag):
"""
See if package 'pkgname' has 'flag' set (return the boolean value
of that flag if yes, or None otherwise).
"""
return bool(
self.cfg.get_package_flags(
pkgname,
recipe.get_recipe(pkgname).category
).get(flag)
)
def get_packagers(self, pkgname, install_type=None, ignore_pkg_flag=False):
"""
Return a valid list of packagers for a given package.
This will take care of cases where e.g. a source packager is
required (and then only return that).
"""
install_type = _get_valid_install_type(install_type)
force_build = not ignore_pkg_flag and self.check_package_flag(pkgname, 'forcebuild')
if force_build:
self.log.debug("Package {pkg} is requesting a source build.".format(pkg=pkgname))
if install_type == "source" or (install_type == "any" and force_build):
return [self.src,]
if install_type == "binary" or self.src is None:
if force_build:
self.log.debug(
"Returning no packagers -- package is requesting source build, but binary build is requested."
)
return []
return self.binary_pkgrs
# if install_type == "any":
return [self.src,] + self.binary_pkgrs
def exists(self, name, return_pkgr_name=False):
"""
Check to see if this package is available on this platform.
Returns True or a version string if yes, False if not.
If return_pkgr_name is True, it'll return a list of packagers that
can install this package.
"""
if not return_pkgr_name and name in self.pmc.known_installable:
self.log.obnoxious("{0} has cached installable-status: {1}".format(
name, self.pmc.known_installable.get(name)
))
return True
self.log.debug("Checking if package {0} is installable...".format(name))
if self.check_package_flag(name, 'forceinstalled'):
self.log.debug("Package {0} is forced to state 'installed'.".format(name))
return ['force-installed'] if return_pkgr_name else True
r = recipe.get_recipe(name)
pkgrs = []
for pkgr in self.get_packagers(name):
pkg_version = pkgr.exists(r)
if pkg_version is None or not pkg_version:
continue
else:
self.pmc.known_installable[name] = True
if return_pkgr_name:
pkgrs.append(pkgr.name)
else:
return pkg_version
if return_pkgr_name and len(pkgrs):
self.pmc.known_installable[name] = True
return pkgrs
self.log.debug("Package {0} is not installable.".format(name))
self.pmc.known_installable[name] = False
return False
def installed(self, name, return_pkgr_name=False, install_type=None, ignore_pkg_flag=False):
"""
Check to see if this recipe is installed (identified by its name).
If not, return False. If yes, return value depends on return_pkgr_name
and is either a list of packager name that installed it, or a version
string (if the version string can't be determined, returns True instead).
ignore_pkg_flag is passed to get_packagers().
"""
install_type = _get_valid_install_type(install_type)
if not return_pkgr_name and name in self.pmc.known_installed.get(install_type, {}):
self.log.obnoxious("{0} has cached installed-status: {1}".format(
name, self.pmc.known_installed.get(install_type, {}).get(name)
))
return self.pmc.known_installed.get(install_type, {}).get(name)
self.log.debug("Checking if package {0} is installed...".format(name))
if self.check_package_flag(name, 'forceinstalled'):
self.log.debug("Package {0} is forced to state 'installed'.".format(name))
# TODO maybe we can figure out a version string
return ['force-installed'] if return_pkgr_name else True
r = recipe.get_recipe(name)
pkgrs = []
for pkgr in self.get_packagers(name, install_type, ignore_pkg_flag):
pkg_version = pkgr.installed(r)
if pkg_version is None or not pkg_version:
continue
else:
self.pmc.known_installed[install_type][name] = True
if return_pkgr_name:
pkgrs.append(pkgr.name)
else:
return pkg_version
if return_pkgr_name and len(pkgrs):
return pkgrs
self.pmc.known_installed[install_type][name] = False
self.log.debug("Package {0} is not installed.".format(name))
return False
def install(
self,
name,
install_type=None,
static=False,
verify=False,
fail_silently=False
):
"""
Install the given package. Returns True if successful, False otherwise.
- install_type: Either "binary", "source" or "any" (default).
"any" will pick either binary or source based on various rules, but
will not try both.
- static: If True, will require a source build.
The 'static' option is then set for the package
- verify: If True, a verification test is run after installation
(e.g. run unit tests, exact behaviour depends on recipe)
- fail_silently: If True, no error is produced when a package can't
be installed (will still return False though)
"""
install_type = _get_valid_install_type(install_type)
self.log.debug("install({0}, install_type={1}, static={2})".format(name, install_type, static))
if self.check_package_flag(name, 'forceinstalled'):
self.log.debug("Package {0} is assumed installed.".format(name))
# TODO maybe we can figure out a version string
return True
if static and install_type == "binary":
if not fail_silently:
self.log.error('Binary packager for static build was requested.')
return False
if install_type == "any":
if static:
install_type = "source"
else:
install_type = "binary"
pkgrs = self.get_packagers(name, install_type)
if len(pkgrs) == 0:
if fail_silently:
return False
self.log.error("Can't find any packagers to install {0}".format(name))
raise PBException("No packager available for package {0}".format(name))
if install_type == "source":
if self.installed(name, install_type="binary", ignore_pkg_flag=True):
self.log.warn(
"A source build for package {0} was requested, but binary install was found!".format(name)
)
if not utils.confirm("Install {0} from source despite binary install available?".format(name)):
return False
pkg_optional = self.check_package_flag(name, 'optional')
install_result = self._std_package_operation(
name,
'install',
pkgrs,
verify=verify,
static=static,
)
if not install_result and pkg_optional:
if install_type == "binary":
return False
self.log.warn("Optional package {0} failed to install. Will pretend as if it had worked.".format(name))
self.pmc.known_installed[install_type][name] = True
return True
self.pmc.known_installed[install_type][name] = bool(install_result)
return install_result
def update(self, name, verify=False, install_type=None):
"""
Update the given package. Returns True if successful, False otherwise.
"""
return self._std_package_operation(
name,
'update',
self.get_packagers(name, install_type),
verify=verify,
)
def uninstall(self, name):
"""
Uninstall the given package.
Returns True if successful, False otherwise.
"""
return self._std_package_operation(
name,
'uninstall',
self.get_packagers(name),
)
def _std_package_operation(self, name, operation, pkgrs, verify=False, **kwargs):
"""
Standard package operation: Try an operation on all packagers.
"""
rec = recipe.get_recipe(name)
for pkgr in pkgrs:
self.log.debug("Using packager {0}".format(pkgr.name))
try:
result = getattr(pkgr, operation)(rec, **kwargs)
if result:
if verify and not pkgr.verify(rec):
self.log.warn("Package reported successful {0}, but verification failed.".format(operation))
continue
return True
except PBException as ex:
self.log.error(
"Something went wrong while trying to {0} {1} using {2}: {3}".format(
operation, name, pkgr.name, str(ex).strip()
)
)
return False
|
namccart/pybombs
|
pybombs/package_manager.py
|
Python
|
gpl-3.0
| 12,528
|
#!/usr/bin/env python
"""Batch Load Claims Messages associated with Classification plus Automod Parameters.
Synopsis
========
:samp:`load.py {description.csv}...`
Description
===========
Reads the CSV-format descriptions, either from stdin or files supplied as command-line
arguments. Each file contains rows which provide three separate things:
- name of a file with claim(s) to parse (or the claim itself),
- classification parameters for the claims,
- automod constraints for the claims *[XXX this may be derivable from the claim]*.
This program calls the claim load service, and writes a log of success
and failures.
Before this program can be run, an extract must be used to get the X12N claims
out of the source application's HIPAA gateway. The extract process uses a
request ``.csv`` file, and writes the X12N files, plus the description ``.csv`` file.
The typical use case for this loader is as follows::
claimExtract request.csv | load.py
Options
=======
:file:`*description.csv`* a CSV-format files which identifies claims to load.
If omitted, :file:`stdin` is read.
Environment Variables
=====================
:envvar:`DJANGO_SETTINGS_MODULE` is the Django Settings Module that defines the
database and other runtime environment parameters.
CSV FILE FORMAT
===============
The claim description file has the following format. Either the columns MUST be in
the following order, or the first row MUST have these column titles.
- :samp:`CLAIM-ID`. This is the unique claim ID which will be assigned.
- :samp:`BENEFIT` The Benefit ID to assign to this claim. This is checked against the
TOS/Benefit list.
- :samp:`TYPE-OF-SERVICE` The Type of Service ID to assign to this claim. This is checked against the
TOS/Benefit list.
- :samp:`LOCATION` The codes are ALB, BUF, CE and CW.
Descriptions are Albany, Buffalo, Central NY West, and Central NY East.
- :samp:`TYPE` The codes are I, P, O or D.
Descriptions are In-Patient, Professional, Out-Patient and Dental.
- :samp:`SECONDARY` The codes are M and R.
Descriptions are Medicare and Regular.
- :samp:`GENDER` This is used to define an automod constraint.
Codes are M, F and U.
- :samp:`AGE-FROM` This is used to define an automod constraint.
- :samp:`AGE-TO` This is used to define an automod constraint.
- :samp:`CLAIM-FILE` If present, this is the file that contains the X12 message for this claim.
If omitted, the `CLAIM-TEXT` column must be used to provide
the actual X12N message.
- :samp:`CLAIM-TEXT` If present, this is the text of the X12 message for this claim.
If omitted, the `CLAIM-FILE` column must be used to provide the
name of a file with the actual X12N message.
Other columns are permitted in this file. They are ignored. For example, the following
additional column is often used.
- :samp:`GWID` This is the HIPAA Gateway Transaction ID for the claim, used to retrieve it
from FACETS.
"""
import X12.file
import logging, sys
import xmlrpclib
import csv
wsClaims= xmlrpclib.ServerProxy( "http://slott:slott@localhost:18000/RPC2/claim", allow_none=True )
def loadClaims( claimIter, claimId, properties=None, constraints=None ):
"""Load a collection of claims, all of which have a common set of properties
and automod constraints.
:param claimIter: An iterator over some claims, a list or a file will work.
Often an :mod:`X12.file` is used because it streamlines the file reading.
:param claimId: The base claim id. If multiple claims are loaded, then
claims after the first will have "-*n*" appended to the claim id string.
:param properties: A dict with Claim Properties.
:param constraints: A dict with Automod constraints.
"""
log= logging.getLogger( "web.claims_837.loadClaims" )
count= 0
good= 0
error= 0
id= claimId
for claim in claimIter:
count += 1
log.info( "Parsing claim %d", count )
try:
status= wsClaims.load( claim, id, properties, constraints )
if status[0] == "OK":
good += 1
else:
log.error( status )
error += 1
except xmlrpclib.ProtocolError, e:
log.error( str(e) )
error += 1
id= "%s-%d" % ( claimId, count )
log.info( "Count %d", count )
log.info( "Good %d", good )
log.info( "Error %d", error )
def loadClaimAndDescription( csvFile ):
"""Load a set of claims provided in a CSV-format file.
Each row of the CSV file describes one (rarely multiple) claim.
It provides a complete set of Properties and AutomodConstraints for
the claim.
:param csvFile: an open file (or file-like object) that can be used
by csv.DictReader to get claims, properties and constraints.
"""
log= logging.getLogger( "web.claims_837.loadClaimAndDescription" )
csvReader= csv.DictReader( csvFile )
for row in csvReader:
# If Claim Text: put this in a simple list
if row.has_key("CLAIM-TEXT") and row["CLAIM-TEXT"] is not None:
claims= [ row["CLAIM-TEXT"] ]
# elif Claim File: use X12.file to read the file
elif row.has_key("CLAIM-FILE") and row["CLAIM-FILE"] is not None:
claims= X12.file.genClaims( row["CLAIM-FILE"] )
# else: log a warning -- no claim present
else:
log.warning( "Row %r has no claim" % ( row, ) )
continue
# Build Properties dict
propCols= ( "BENEFIT", "TYPE-OF-SERVICE", "LOCATION", "TYPE", "SECONDARY" )
properties= dict( [ (k,row[k]) for k in propCols ] )
# Build Constraints dict
consCols= ( "GENDER", "AGE-FROM", "AGE-TO" )
constraints= dict( [ (k,row[k]) for k in consCols ] )
# load claims
claimId= row["CLAIM-ID"]
loadClaims( claims, claimId, properties, constraints )
if __name__ == "__main__":
logging.basicConfig( stream=sys.stderr, level=logging.INFO )
#loadClaims( X12.file.genClaims(r"..\..\test\837-example.txt") )
#loadClaims( X12.file.genClaims(r"..\..\test\837I-Examples.txt") )
with open(r"..\..\test\test_description.csv","rb") as claims:
loadClaimAndDescription( claims )
|
sbuss/TigerShark
|
web/claims_837/load.py
|
Python
|
bsd-3-clause
| 6,553
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_atws
----------------------------------
Tests for `atws` module.
"""
import sys
import unittest
from contextlib import contextmanager
import atws
query_test_output=u'<?xml version="1.0" ?>\n<queryxml>\n\t<entity>Ticket</entity>\n\t<query>\n\t\t<field>\n\t\t\tStatus\n\t\t\t<expression op="NotEqual">5</expression>\n\t\t</field>\n\t\t<condition>\n\t\t\t<condition operator="OR">\n\t\t\t\t<field>\n\t\t\t\t\tIssueType\n\t\t\t\t\t<expression op="GreaterThan">345</expression>\n\t\t\t\t</field>\n\t\t\t</condition>\n\t\t</condition>\n\t</query>\n</queryxml>\n'
class TestAtws(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_zone_lookup_failure(self):
try:
_ = atws.connect(username='failed@toresolve.com',
password='notright')
except ValueError as e:
assert 'failed@toresolve.com failed to resolve to a zone' in str(e)
def test_001_query_building_output(self):
query = atws.Query('Ticket')
query.WHERE('Status', query.NotEqual, 5)
query.open_bracket()
query.OR('IssueType', query.GreaterThan, 345)
query_output = query.pretty_print()
assert repr(query_test_output) == repr(query_output)
if __name__ == '__main__':
sys.exit(unittest.main())
|
MattParr/python-atws
|
tests/test_atws.py
|
Python
|
mit
| 1,407
|
#!/usr/bin/env python
# Copyright (c) 2015, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""High availability objects to configure HA for a firewall or Panorama"""
# import modules
import logging
import inspect
import xml.etree.ElementTree as ET
import pan.xapi
from pandevice import getlogger, isstring
from pandevice.base import PanObject, PanDevice, Root, MEMBER, ENTRY
from pandevice.base import VarPath as Var
from pandevice import network, firewall
import pandevice.errors as err
logger = getlogger(__name__)
class HighAvailabilityInterface(PanObject):
"""Base class for high availability interface classes
Do not instantiate this class. Use its subclasses.
"""
HA_SYNC = False
# TODO: Support encryption
def __init__(self, *args, **kwargs):
# Store the 'port' variable
# This is necessary because 'port' is a property
# so that self.old_port can work correctly
# XXX: better to remove the need for old_port in a future version
try:
args = list(args)
port = args.pop(2)
except IndexError:
port = kwargs.pop("port", None)
if type(self) == HighAvailabilityInterface:
raise AssertionError("Do not instantiate a HighAvailabilityInterface. Please use a subclass.")
super(HighAvailabilityInterface, self).__init__(*args, **kwargs)
self._port = port
# This is used by setup_interface method to remove old interfaces
self.old_port = None
@property
def port(self):
return self._port
@port.setter
def port(self, value):
if hasattr(self, "_port"):
if value != self._port:
self.old_port = self._port
self._port = value
@classmethod
def variables(cls):
return (
Var("ip-address"),
Var("netmask"),
Var("port"),
Var("gateway"),
Var("link-speed"),
Var("link-duplex"),
)
def setup_interface(self):
"""Setup the data interface as an HA interface
Use this method to automatically convert the data interface
to 'ha' mode. This must be done *before* this HA interface
is created on the firewall.
"""
pandevice = self.nearest_pandevice()
if pandevice is None:
return None
if isstring(self.port):
intname = self.port
else:
intname = str(self.port)
intconfig_needed = False
inttype = None
if intname.startswith("ethernet"):
intprefix = "ethernet"
inttype = network.EthernetInterface
intconfig_needed = True
elif intname.startswith("ae"):
intprefix = "ae"
inttype = network.AggregateInterface
intconfig_needed = True
elif intname.startswith("management"):
self.link_speed = None
self.link_duplex = None
if intconfig_needed:
apply_needed = False
interface = pandevice.find(intname, (network.EthernetInterface, network.AggregateInterface))
if interface is None:
interface = pandevice.add(inttype(name=intname, mode="ha"))
apply_needed = True
elif interface.mode != "ha":
interface.mode = "ha"
apply_needed = True
if inttype == network.EthernetInterface:
if self.link_speed is not None:
# Transfer the link_speed to the eth interface
if interface.link_speed != self.link_speed:
interface.link_speed = self.link_speed
apply_needed = True
if self.link_duplex is not None:
# Transfer the link_duplex to the eth interface
if interface.link_duplex != self.link_duplex:
interface.link_duplex = self.link_duplex
apply_needed = True
self.link_speed = None
self.link_duplex = None
if apply_needed:
interface.apply()
return interface
def delete_old_interface(self):
"""Delete the data interface previously used by this HA interface
Use this if the 'port' of an HA interface was changed and the old
interface needs to be cleaned up.
"""
if self.old_port is not None:
self.delete_interface(self.old_port)
self.old_port = None
def delete_interface(self, interface=None, pan_device=None):
"""Delete the data interface used by this HA interface
Args:
interface (HighAvailabilityInterface): The HA interface (HA1, HA2, etc)
pan_device (PanDevice): The PanDevice object to apply the change
"""
if pan_device is None:
pan_device = self.nearest_pandevice()
if pan_device is None:
return None
port = interface if interface is not None else self.port
if isstring(port):
intname = port
else:
intname = str(port)
if intname.startswith("ethernet"):
interface = pan_device.find(intname, network.EthernetInterface)
if interface is None:
# Already deleted
return
elif interface.mode == "ha":
interface.delete()
elif intname.startswith("ae"):
interface = pan_device.find(intname, network.AggregateInterface)
if interface is None:
# Already deleted
return
elif interface.mode == "ha":
interface.mode = "tap"
interface.apply()
class HA1(HighAvailabilityInterface):
"""HA1 interface
Args:
ip-address (str): IP of the interface
netmask (str): Netmask of the interface
port (str): Interface to use for this HA interface (eg. ethernet1/5)
gateway (str): Default gateway of the interface
link_speed (str): Link speed
link_duplex (str): Link duplex
"""
# TODO: Encryption
XPATH = "/interface/ha1"
@classmethod
def variables(cls):
return super(HA1, HA1).variables() + (
Var("monitor-hold-time", vartype="int"),
)
class HA1Backup(HighAvailabilityInterface):
"""HA1 Backup interface
Args:
ip-address (str): IP of the interface
netmask (str): Netmask of the interface
port (str): Interface to use for this HA interface (eg. ethernet1/5)
gateway (str): Default gateway of the interface
link_speed (str): Link speed
link_duplex (str): Link duplex
"""
XPATH = "/interface/ha1-backup"
class HA2(HighAvailabilityInterface):
"""HA2 interface
Args:
ip-address (str): IP of the interface
netmask (str): Netmask of the interface
port (str): Interface to use for this HA interface (eg. ethernet1/5)
gateway (str): Default gateway of the interface
link_speed (str): Link speed
link_duplex (str): Link duplex
"""
XPATH = "/interface/ha2"
class HA2Backup(HighAvailabilityInterface):
"""HA2 Backup interface
Args:
ip-address (str): IP of the interface
netmask (str): Netmask of the interface
port (str): Interface to use for this HA interface (eg. ethernet1/5)
gateway (str): Default gateway of the interface
link_speed (str): Link speed
link_duplex (str): Link duplex
"""
XPATH = "/interface/ha2-backup"
class HA3(HighAvailabilityInterface):
"""HA3 interface
Args:
port (str): Interface to use for this HA interface (eg. ethernet1/5)
"""
XPATH = "/interface/ha3"
@classmethod
def variables(cls):
return (
Var("port"),
)
class HighAvailability(PanObject):
"""High availability configuration base object
All high availability configuration is in this object or is a child of this object
Args:
enabled (bool): Enable HA (Default: True)
group_id (int): The group identifier
description (str): Description for HA pairing
config_sync (bool): Enabled configuration synchronization (Default: True)
peer_ip (str): HA Peer's HA1 IP address
mode (str): Mode of HA: 'active-passive' or 'active-active' (Default: 'active-passive')
passive_link_state (str): Passive link state
state_sync (bool): Enabled state synchronization (Default: False)
ha2_keepalive (bool): Enable HA2 keepalives
ha2_keepalive_action (str): HA2 keepalive action
ha2_keepalive_threshold (int): HA2 keepalive threshold
"""
ROOT = Root.DEVICE
XPATH = "/deviceconfig/high-availability"
HA_SYNC = False
CHILDTYPES = (
"ha.HA1",
"ha.HA1Backup",
"ha.HA2",
"ha.HA2Backup",
"ha.HA3",
)
ACTIVE_PASSIVE = "active-passive"
ACTIVE_ACTIVE = "active-active"
@classmethod
def variables(cls):
return (
# Enabled flag
Var("enabled", vartype="bool", default=True),
# Group
Var("group", "group_id", vartype="entry", default=(1,)),
Var("{{group_id}}/description"),
Var("{{group_id}}/configuration-synchronization/enabled", "config_sync", vartype="bool"),
Var("{{group_id}}/peer-ip"),
# HA Mode (A/P, A/A)
Var("{{group_id}}/mode/(active-passive|active-active)", "mode", default="active-passive"),
Var("{{group_id}}/mode/{{mode}}/passive-link-state"),
# State Synchronization
Var("{{group_id}}/state-synchronization/enabled", "state_sync", vartype="bool", default=False),
# HA2 Keep-alive
Var("{{group_id}}/state-synchronization/ha2-keep-alive/enabled", "ha2_keepalive", vartype="bool"),
Var("{{group_id}}/state-synchronization/ha2-keep-alive/action", "ha2_keepalive_action"),
Var("{{group_id}}/state-synchronization/ha2-keep-alive/threshold", "ha2_keepalive_threshold", vartype="int"),
Var("interface", vartype="none"),
Var("interface/ha1", vartype="none"),
Var("interface/ha1-backup", vartype="none"),
Var("interface/ha2", vartype="none"),
Var("interface/ha2-backup", vartype="none"),
Var("interface/ha3", vartype="none"),
)
|
PaloAltoNetworks/terraform-templates
|
pan_guard_duty/lambda_code/pandevice/ha.py
|
Python
|
apache-2.0
| 11,239
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from heronpy.api.stream import Grouping
from integration_test.src.python.integration_test.core import TestTopologyBuilder
from integration_test.src.python.integration_test.common.bolt import CountAggregatorBolt
from integration_test.src.python.integration_test.common.bolt import WordCountBolt
from integration_test.src.python.integration_test.common.spout import ABSpout
def fields_grouping_builder(topology_name, http_server_url):
builder = TestTopologyBuilder(topology_name, http_server_url)
ab_spout = builder.add_spout("ab-spout", ABSpout, 1, max_executions=400)
count_bolt = builder.add_bolt("count-bolt", WordCountBolt,
inputs={ab_spout: Grouping.fields('word')}, par=2)
builder.add_bolt("sum-bolt", CountAggregatorBolt,
inputs={count_bolt: Grouping.NONE}, par=1)
return builder.create_topology()
|
twitter/heron
|
integration_test/src/python/integration_test/topology/fields_grouping/fields_grouping.py
|
Python
|
apache-2.0
| 1,757
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quotas for volumes."""
import datetime
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
LOG = logging.getLogger(__name__)
quota_opts = [
cfg.IntOpt('quota_volumes',
default=10,
help='Number of volumes allowed per project'),
cfg.IntOpt('quota_snapshots',
default=10,
help='Number of volume snapshots allowed per project'),
cfg.IntOpt('quota_gigabytes',
default=1000,
help='Total amount of storage, in gigabytes, allowed '
'for volumes and snapshots per project'),
cfg.IntOpt('reservation_expire',
default=86400,
help='Number of seconds until a reservation expires'),
cfg.IntOpt('until_refresh',
default=0,
help='Count of reservations until usage is refreshed'),
cfg.IntOpt('max_age',
default=0,
help='Number of seconds between subsequent usage refreshes'),
cfg.StrOpt('quota_driver',
default='cinder.quota.DbQuotaDriver',
help='Default driver to use for quota checks'),
cfg.BoolOpt('use_default_quota_class',
default=True,
help='Enables or disables use of default quota class '
'with default quota.'), ]
CONF = cfg.CONF
CONF.register_opts(quota_opts)
class DbQuotaDriver(object):
"""Driver to perform check to enforcement of quotas.
Also allows to obtain quota information.
The default driver utilizes the local database.
"""
def get_by_project(self, context, project_id, resource_name):
"""Get a specific quota by project."""
return db.quota_get(context, project_id, resource_name)
def get_by_class(self, context, quota_class, resource_name):
"""Get a specific quota by quota class."""
return db.quota_class_get(context, quota_class, resource_name)
def get_default(self, context, resource):
"""Get a specific default quota for a resource."""
default_quotas = db.quota_class_get_default(context)
return default_quotas.get(resource.name, resource.default)
def get_defaults(self, context, resources):
"""Given a list of resources, retrieve the default quotas.
Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas,
if it exists.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
"""
quotas = {}
default_quotas = {}
if CONF.use_default_quota_class:
default_quotas = db.quota_class_get_default(context)
for resource in resources.values():
if resource.name not in default_quotas:
LOG.deprecated(_("Default quota for resource: %(res)s is set "
"by the default quota flag: quota_%(res)s, "
"it is now deprecated. Please use the "
"default quota class for default "
"quota.") % {'res': resource.name})
quotas[resource.name] = default_quotas.get(resource.name,
resource.default)
return quotas
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
"""Given list of resources, retrieve the quotas for given quota class.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
quotas = {}
default_quotas = {}
class_quotas = db.quota_class_get_all_by_name(context, quota_class)
if defaults:
default_quotas = db.quota_class_get_default(context)
for resource in resources.values():
if resource.name in class_quotas:
quotas[resource.name] = class_quotas[resource.name]
continue
if defaults:
quotas[resource.name] = default_quotas.get(resource.name,
resource.default)
return quotas
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True,
usages=True):
"""Given a list of resources, retrieve the quotas for the given
project.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified. It
will be ignored if project_id ==
context.project_id.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
quotas = {}
project_quotas = db.quota_get_all_by_project(context, project_id)
if usages:
project_usages = db.quota_usage_get_all_by_project(context,
project_id)
# Get the quotas for the appropriate class. If the project ID
# matches the one in the context, we use the quota_class from
# the context, otherwise, we use the provided quota_class (if
# any)
if project_id == context.project_id:
quota_class = context.quota_class
if quota_class:
class_quotas = db.quota_class_get_all_by_name(context, quota_class)
else:
class_quotas = {}
default_quotas = self.get_defaults(context, resources)
for resource in resources.values():
# Omit default/quota class values
if not defaults and resource.name not in project_quotas:
continue
quotas[resource.name] = dict(
limit=project_quotas.get(
resource.name,
class_quotas.get(resource.name,
default_quotas[resource.name])),
)
# Include usages if desired. This is optional because one
# internal consumer of this interface wants to access the
# usages directly from inside a transaction.
if usages:
usage = project_usages.get(resource.name, {})
quotas[resource.name].update(
in_use=usage.get('in_use', 0),
reserved=usage.get('reserved', 0), )
return quotas
def _get_quotas(self, context, resources, keys, has_sync, project_id=None):
"""A helper method which retrieves the quotas for specific resources.
This specific resource is identified by keys, and which apply to the
current context.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param keys: A list of the desired quotas to retrieve.
:param has_sync: If True, indicates that the resource must
have a sync attribute; if False, indicates
that the resource must NOT have a sync
attribute.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Filter resources
if has_sync:
sync_filt = lambda x: hasattr(x, 'sync')
else:
sync_filt = lambda x: not hasattr(x, 'sync')
desired = set(keys)
sub_resources = dict((k, v) for k, v in resources.items()
if k in desired and sync_filt(v))
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
unknown = desired - set(sub_resources.keys())
raise exception.QuotaResourceUnknown(unknown=sorted(unknown))
# Grab and return the quotas (without usages)
quotas = self.get_project_quotas(context, sub_resources,
project_id,
context.quota_class, usages=False)
return dict((k, v['limit']) for k, v in quotas.items())
def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exception.InvalidQuotaValue(unders=sorted(unders))
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys(),
has_sync=False, project_id=project_id)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
def reserve(self, context, resources, deltas, expire=None,
project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param deltas: A dictionary of the proposed delta changes.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Set up the reservation expiration
if expire is None:
expire = CONF.reservation_expire
if isinstance(expire, (int, long)):
expire = datetime.timedelta(seconds=expire)
if isinstance(expire, datetime.timedelta):
expire = timeutils.utcnow() + expire
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
# Get the applicable quotas.
# NOTE(Vek): We're not worried about races at this point.
# Yes, the admin may be in the process of reducing
# quotas, but that's a pretty rare thing.
quotas = self._get_quotas(context, resources, deltas.keys(),
has_sync=True, project_id=project_id)
# NOTE(Vek): Most of the work here has to be done in the DB
# API, because we have to do it in a transaction,
# which means access to the session. Since the
# session isn't available outside the DBAPI, we
# have to do the work there.
return db.quota_reserve(context, resources, quotas, deltas, expire,
CONF.until_refresh, CONF.max_age,
project_id=project_id)
def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
db.reservation_commit(context, reservations, project_id=project_id)
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
db.reservation_rollback(context, reservations, project_id=project_id)
def destroy_all_by_project(self, context, project_id):
"""Destroy all that is associated with a project.
This includes quotas, usages and reservations.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
db.quota_destroy_all_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
db.reservation_expire(context)
class BaseResource(object):
"""Describe a single resource for quota checking."""
def __init__(self, name, flag=None):
"""Initializes a Resource.
:param name: The name of the resource, i.e., "volumes".
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
self.name = name
self.flag = flag
def quota(self, driver, context, **kwargs):
"""Given a driver and context, obtain the quota for this resource.
:param driver: A quota driver.
:param context: The request context.
:param project_id: The project to obtain the quota value for.
If not provided, it is taken from the
context. If it is given as None, no
project-specific quota will be searched
for.
:param quota_class: The quota class corresponding to the
project, or for which the quota is to be
looked up. If not provided, it is taken
from the context. If it is given as None,
no quota class-specific quota will be
searched for. Note that the quota class
defaults to the value in the context,
which may not correspond to the project if
project_id is not the same as the one in
the context.
"""
# Get the project ID
project_id = kwargs.get('project_id', context.project_id)
# Ditto for the quota class
quota_class = kwargs.get('quota_class', context.quota_class)
# Look up the quota for the project
if project_id:
try:
return driver.get_by_project(context, project_id, self.name)
except exception.ProjectQuotaNotFound:
pass
# Try for the quota class
if quota_class:
try:
return driver.get_by_class(context, quota_class, self.name)
except exception.QuotaClassNotFound:
pass
# OK, return the default
return driver.get_default(context, self)
@property
def default(self):
"""Return the default value of the quota."""
return CONF[self.flag] if self.flag else -1
class ReservableResource(BaseResource):
"""Describe a reservable resource."""
def __init__(self, name, sync, flag=None):
"""Initializes a ReservableResource.
Reservable resources are those resources which directly
correspond to objects in the database, i.e., volumes, gigabytes,
etc. A ReservableResource must be constructed with a usage
synchronization function, which will be called to determine the
current counts of one or more resources.
The usage synchronization function will be passed three
arguments: an admin context, the project ID, and an opaque
session object, which should in turn be passed to the
underlying database function. Synchronization functions
should return a dictionary mapping resource names to the
current in_use count for those resources; more than one
resource and resource count may be returned. Note that
synchronization functions may be associated with more than one
ReservableResource.
:param name: The name of the resource, i.e., "volumes".
:param sync: A dbapi methods name which returns a dictionary
to resynchronize the in_use count for one or more
resources, as described above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(ReservableResource, self).__init__(name, flag=flag)
self.sync = sync
class AbsoluteResource(BaseResource):
"""Describe a non-reservable resource."""
pass
class CountableResource(AbsoluteResource):
"""Describe a resource where counts aren't based only on the project ID."""
def __init__(self, name, count, flag=None):
"""Initializes a CountableResource.
Countable resources are those resources which directly
correspond to objects in the database, i.e., volumes, gigabytes,
etc., but for which a count by project ID is inappropriate. A
CountableResource must be constructed with a counting
function, which will be called to determine the current counts
of the resource.
The counting function will be passed the context, along with
the extra positional and keyword arguments that are passed to
Quota.count(). It should return an integer specifying the
count.
Note that this counting is not performed in a transaction-safe
manner. This resource class is a temporary measure to provide
required functionality, until a better approach to solving
this problem can be evolved.
:param name: The name of the resource, i.e., "volumes".
:param count: A callable which returns the count of the
resource. The arguments passed are as described
above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(CountableResource, self).__init__(name, flag=flag)
self.count = count
class VolumeTypeResource(ReservableResource):
"""ReservableResource for a specific volume type."""
def __init__(self, part_name, volume_type):
"""Initializes a VolumeTypeResource.
:param part_name: The kind of resource, i.e., "volumes".
:param volume_type: The volume type for this resource.
"""
self.volume_type_name = volume_type['name']
self.volume_type_id = volume_type['id']
name = "%s_%s" % (part_name, self.volume_type_name)
super(VolumeTypeResource, self).__init__(name, "_sync_%s" % part_name)
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
if not quota_driver_class:
quota_driver_class = CONF.quota_driver
if isinstance(quota_driver_class, basestring):
quota_driver_class = importutils.import_object(quota_driver_class)
self._resources = {}
self._driver = quota_driver_class
def __contains__(self, resource):
return resource in self.resources
def register_resource(self, resource):
"""Register a resource."""
self._resources[resource.name] = resource
def register_resources(self, resources):
"""Register a list of resources."""
for resource in resources:
self.register_resource(resource)
def get_by_project(self, context, project_id, resource_name):
"""Get a specific quota by project."""
return self._driver.get_by_project(context, project_id, resource_name)
def get_by_class(self, context, quota_class, resource_name):
"""Get a specific quota by quota class."""
return self._driver.get_by_class(context, quota_class, resource_name)
def get_default(self, context, resource):
"""Get a specific default quota for a resource."""
return self._driver.get_default(context, resource)
def get_defaults(self, context):
"""Retrieve the default quotas.
:param context: The request context, for access checks.
"""
return self._driver.get_defaults(context, self.resources)
def get_class_quotas(self, context, quota_class, defaults=True):
"""Retrieve the quotas for the given quota class.
:param context: The request context, for access checks.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
return self._driver.get_class_quotas(context, self.resources,
quota_class, defaults=defaults)
def get_project_quotas(self, context, project_id, quota_class=None,
defaults=True, usages=True):
"""Retrieve the quotas for the given project.
:param context: The request context, for access checks.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
return self._driver.get_project_quotas(context, self.resources,
project_id,
quota_class=quota_class,
defaults=defaults,
usages=usages)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
For countable resources, invokes the count() function and
returns its result. Arguments following the context and
resource are passed directly to the count function declared by
the resource.
:param context: The request context, for access checks.
:param resource: The name of the resource, as a string.
"""
# Get the resource
res = self.resources.get(resource)
if not res or not hasattr(res, 'count'):
raise exception.QuotaResourceUnknown(unknown=[resource])
return res.count(context, *args, **kwargs)
def limit_check(self, context, project_id=None, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction. The
values to check are given as keyword arguments, where the key
identifies the specific quota limit to check, and the value is
the proposed value.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
return self._driver.limit_check(context, self.resources, values,
project_id=project_id)
def reserve(self, context, expire=None, project_id=None, **deltas):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas. The deltas are given as
keyword arguments, and current usage and other reservations
are factored into the quota check.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
reservations = self._driver.reserve(context, self.resources, deltas,
expire=expire,
project_id=project_id)
LOG.debug("Created reservations %s" % reservations)
return reservations
def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
try:
self._driver.commit(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to commit reservations %s") % reservations)
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
try:
self._driver.rollback(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to roll back reservations "
"%s") % reservations)
def destroy_all_by_project(self, context, project_id):
"""Destroy all quotas, usages, and reservations associated with a
project.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
self._driver.destroy_all_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
self._driver.expire(context)
def add_volume_type_opts(self, context, opts, volume_type_id):
"""Add volume type resource options.
Adds elements to the opts hash for volume type quotas.
If a resource is being reserved ('gigabytes', etc) and the volume
type is set up for its own quotas, these reservations are copied
into keys for 'gigabytes_<volume type name>', etc.
:param context: The request context, for access checks.
:param opts: The reservations options hash.
:param volume_type_id: The volume type id for this reservation.
"""
if not volume_type_id:
return
# NOTE(jdg): set inactive to True in volume_type_get, as we
# may be operating on a volume that was created with a type
# that has since been deleted.
volume_type = db.volume_type_get(context, volume_type_id, True)
for quota in ('volumes', 'gigabytes', 'snapshots'):
if quota in opts:
vtype_quota = "%s_%s" % (quota, volume_type['name'])
opts[vtype_quota] = opts[quota]
@property
def resource_names(self):
return sorted(self.resources.keys())
@property
def resources(self):
return self._resources
class VolumeTypeQuotaEngine(QuotaEngine):
"""Represent the set of all quotas."""
@property
def resources(self):
"""Fetches all possible quota resources."""
result = {}
# Global quotas.
argses = [('volumes', '_sync_volumes', 'quota_volumes'),
('snapshots', '_sync_snapshots', 'quota_snapshots'),
('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), ]
for args in argses:
resource = ReservableResource(*args)
result[resource.name] = resource
# Volume type quotas.
volume_types = db.volume_type_get_all(context.get_admin_context(),
False)
for volume_type in volume_types.values():
for part_name in ('volumes', 'gigabytes', 'snapshots'):
resource = VolumeTypeResource(part_name, volume_type)
result[resource.name] = resource
return result
def register_resource(self, resource):
raise NotImplementedError(_("Cannot register resource"))
def register_resources(self, resources):
raise NotImplementedError(_("Cannot register resources"))
QUOTAS = VolumeTypeQuotaEngine()
|
theanalyst/cinder
|
cinder/quota.py
|
Python
|
apache-2.0
| 36,564
|
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.sites.models import Site
from django.http import (HttpResponseRedirect, Http404,
HttpResponsePermanentRedirect)
from django.views.generic.base import TemplateResponseMixin, View, TemplateView
from django.views.generic.edit import FormView
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout as auth_logout
from django.shortcuts import redirect
from django.views.decorators.debug import sensitive_post_parameters
from django.utils.decorators import method_decorator
from ..exceptions import ImmediateHttpResponse
from ..utils import get_form_class, get_request_param
from .utils import (get_next_redirect_url, complete_signup,
get_login_redirect_url, perform_login,
passthrough_next_redirect_url)
from .forms import AddEmailForm, ChangePasswordForm
from .forms import LoginForm, ResetPasswordKeyForm
from .forms import ResetPasswordForm, SetPasswordForm, SignupForm, UserTokenForm
from .utils import sync_user_email_addresses
from .models import EmailAddress, EmailConfirmation
from . import signals
from . import app_settings
from .adapter import get_adapter
sensitive_post_parameters_m = method_decorator(
sensitive_post_parameters('password', 'password1', 'password2'))
def _ajax_response(request, response, form=None):
if request.is_ajax():
if (isinstance(response, HttpResponseRedirect)
or isinstance(response, HttpResponsePermanentRedirect)):
redirect_to = response['Location']
else:
redirect_to = None
response = get_adapter().ajax_response(request,
response,
form=form,
redirect_to=redirect_to)
return response
class RedirectAuthenticatedUserMixin(object):
def dispatch(self, request, *args, **kwargs):
# WORKAROUND: https://code.djangoproject.com/ticket/19316
self.request = request
# (end WORKAROUND)
if request.user.is_authenticated():
redirect_to = self.get_authenticated_redirect_url()
response = HttpResponseRedirect(redirect_to)
return _ajax_response(request, response)
else:
response = super(RedirectAuthenticatedUserMixin,
self).dispatch(request,
*args,
**kwargs)
return response
def get_authenticated_redirect_url(self):
redirect_field_name = self.redirect_field_name
return get_login_redirect_url(self.request,
url=self.get_success_url(),
redirect_field_name=redirect_field_name)
class AjaxCapableProcessFormViewMixin(object):
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
response = self.form_valid(form)
else:
response = self.form_invalid(form)
return _ajax_response(self.request, response, form=form)
class LoginView(RedirectAuthenticatedUserMixin,
AjaxCapableProcessFormViewMixin,
FormView):
form_class = LoginForm
template_name = "account/login.html"
success_url = None
redirect_field_name = "next"
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
return super(LoginView, self).dispatch(request, *args, **kwargs)
def get_form_class(self):
return get_form_class(app_settings.FORMS, 'login', self.form_class)
def form_valid(self, form):
success_url = self.get_success_url()
try:
return form.login(self.request, redirect_url=success_url)
except ImmediateHttpResponse as e:
return e.response
def get_success_url(self):
# Explicitly passed ?next= URL takes precedence
ret = (get_next_redirect_url(self.request,
self.redirect_field_name)
or self.success_url)
return ret
def get_context_data(self, **kwargs):
ret = super(LoginView, self).get_context_data(**kwargs)
signup_url = passthrough_next_redirect_url(self.request,
reverse("account_signup"),
self.redirect_field_name)
redirect_field_value = get_request_param(self.request,
self.redirect_field_name)
ret.update({"signup_url": signup_url,
"site": Site.objects.get_current(),
"redirect_field_name": self.redirect_field_name,
"redirect_field_value": redirect_field_value})
return ret
login = LoginView.as_view()
class CloseableSignupMixin(object):
template_name_signup_closed = "account/signup_closed.html"
def dispatch(self, request, *args, **kwargs):
# WORKAROUND: https://code.djangoproject.com/ticket/19316
self.request = request
# (end WORKAROUND)
try:
if not self.is_open():
return self.closed()
except ImmediateHttpResponse as e:
return e.response
return super(CloseableSignupMixin, self).dispatch(request,
*args,
**kwargs)
def is_open(self):
return get_adapter().is_open_for_signup(self.request)
def closed(self):
response_kwargs = {
"request": self.request,
"template": self.template_name_signup_closed,
}
return self.response_class(**response_kwargs)
class SignupView(RedirectAuthenticatedUserMixin, CloseableSignupMixin,
AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/signup.html"
form_class = SignupForm
redirect_field_name = "next"
success_url = None
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
return super(SignupView, self).dispatch(request, *args, **kwargs)
def get_form_class(self):
return get_form_class(app_settings.FORMS, 'signup', self.form_class)
def get_success_url(self):
# Explicitly passed ?next= URL takes precedence
ret = (get_next_redirect_url(self.request,
self.redirect_field_name)
or self.success_url)
return ret
def form_valid(self, form):
user = form.save(self.request)
return complete_signup(self.request, user,
app_settings.EMAIL_VERIFICATION,
self.get_success_url())
def get_context_data(self, **kwargs):
form = kwargs['form']
form.fields["email"].initial = self.request.session \
.get('account_verified_email', None)
ret = super(SignupView, self).get_context_data(**kwargs)
login_url = passthrough_next_redirect_url(self.request,
reverse("account_login"),
self.redirect_field_name)
redirect_field_name = self.redirect_field_name
redirect_field_value = get_request_param(self.request,
redirect_field_name)
ret.update({"login_url": login_url,
"redirect_field_name": redirect_field_name,
"redirect_field_value": redirect_field_value})
return ret
signup = SignupView.as_view()
class ConfirmEmailView(TemplateResponseMixin, View):
def get_template_names(self):
if self.request.method == 'POST':
return ["account/email_confirmed.html"]
else:
return ["account/email_confirm.html"]
def get(self, *args, **kwargs):
try:
self.object = self.get_object()
if app_settings.CONFIRM_EMAIL_ON_GET:
return self.post(*args, **kwargs)
except Http404:
self.object = None
ctx = self.get_context_data()
return self.render_to_response(ctx)
def post(self, *args, **kwargs):
self.object = confirmation = self.get_object()
confirmation.confirm(self.request)
get_adapter().add_message(self.request,
messages.SUCCESS,
'account/messages/email_confirmed.txt',
{'email': confirmation.email_address.email})
if app_settings.LOGIN_ON_EMAIL_CONFIRMATION:
resp = self.login_on_confirm(confirmation)
if resp is not None:
return resp
# Don't -- allauth doesn't touch is_active so that sys admin can
# use it to block users et al
#
# user = confirmation.email_address.user
# user.is_active = True
# user.save()
redirect_url = self.get_redirect_url()
if not redirect_url:
ctx = self.get_context_data()
return self.render_to_response(ctx)
return redirect(redirect_url)
def login_on_confirm(self, confirmation):
"""
Simply logging in the user may become a security issue. If you
do not take proper care (e.g. don't purge used email
confirmations), a malicious person that got hold of the link
will be able to login over and over again and the user is
unable to do anything about it. Even restoring their own mailbox
security will not help, as the links will still work. For
password reset this is different, this mechanism works only as
long as the attacker has access to the mailbox. If they no
longer has access they cannot issue a password request and
intercept it. Furthermore, all places where the links are
listed (log files, but even Google Analytics) all of a sudden
need to be secured. Purging the email confirmation once
confirmed changes the behavior -- users will not be able to
repeatedly confirm (in case they forgot that they already
clicked the mail).
All in all, opted for storing the user that is in the process
of signing up in the session to avoid all of the above. This
may not 100% work in case the user closes the browser (and the
session gets lost), but at least we're secure.
"""
user_pk = self.request.session.pop('account_user', None)
user = confirmation.email_address.user
if user_pk == user.pk and self.request.user.is_anonymous():
return perform_login(self.request,
user,
app_settings.EmailVerificationMethod.NONE)
return None
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
try:
return queryset.get(key=self.kwargs["key"].lower())
except EmailConfirmation.DoesNotExist:
raise Http404()
def get_queryset(self):
qs = EmailConfirmation.objects.all_valid()
qs = qs.select_related("email_address__user")
return qs
def get_context_data(self, **kwargs):
ctx = kwargs
ctx["confirmation"] = self.object
return ctx
def get_redirect_url(self):
return get_adapter().get_email_confirmation_redirect_url(self.request)
confirm_email = ConfirmEmailView.as_view()
class EmailView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/email.html"
form_class = AddEmailForm
success_url = reverse_lazy('account_email')
def get_form_class(self):
return get_form_class(app_settings.FORMS, 'add_email', self.form_class)
def dispatch(self, request, *args, **kwargs):
sync_user_email_addresses(request.user)
return super(EmailView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(EmailView, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def form_valid(self, form):
email_address = form.save(self.request)
get_adapter().add_message(self.request,
messages.INFO,
'account/messages/'
'email_confirmation_sent.txt',
{'email': form.cleaned_data["email"]})
signals.email_added.send(sender=self.request.user.__class__,
request=self.request,
user=self.request.user,
email_address=email_address)
return super(EmailView, self).form_valid(form)
def post(self, request, *args, **kwargs):
res = None
if "action_add" in request.POST:
res = super(EmailView, self).post(request, *args, **kwargs)
elif request.POST.get("email"):
if "action_send" in request.POST:
res = self._action_send(request)
elif "action_remove" in request.POST:
res = self._action_remove(request)
elif "action_primary" in request.POST:
res = self._action_primary(request)
res = res or HttpResponseRedirect(reverse('account_email'))
# Given that we bypassed AjaxCapableProcessFormViewMixin,
# we'll have to call invoke it manually...
res = _ajax_response(request, res)
return res
def _action_send(self, request, *args, **kwargs):
email = request.POST["email"]
try:
email_address = EmailAddress.objects.get(
user=request.user,
email=email,
)
get_adapter().add_message(request,
messages.INFO,
'account/messages/'
'email_confirmation_sent.txt',
{'email': email})
email_address.send_confirmation(request)
return HttpResponseRedirect(self.get_success_url())
except EmailAddress.DoesNotExist:
pass
def _action_remove(self, request, *args, **kwargs):
email = request.POST["email"]
try:
email_address = EmailAddress.objects.get(
user=request.user,
email=email
)
if email_address.primary:
get_adapter().add_message(request,
messages.ERROR,
'account/messages/'
'cannot_delete_primary_email.txt',
{"email": email})
else:
email_address.delete()
signals.email_removed.send(sender=request.user.__class__,
request=request,
user=request.user,
email_address=email_address)
get_adapter().add_message(request,
messages.SUCCESS,
'account/messages/email_deleted.txt',
{"email": email})
return HttpResponseRedirect(self.get_success_url())
except EmailAddress.DoesNotExist:
pass
def _action_primary(self, request, *args, **kwargs):
email = request.POST["email"]
try:
email_address = EmailAddress.objects.get(
user=request.user,
email=email,
)
# Not primary=True -- Slightly different variation, don't
# require verified unless moving from a verified
# address. Ignore constraint if previous primary email
# address is not verified.
if not email_address.verified and \
EmailAddress.objects.filter(user=request.user,
verified=True).exists():
get_adapter().add_message(request,
messages.ERROR,
'account/messages/'
'unverified_primary_email.txt')
else:
# Sending the old primary address to the signal
# adds a db query.
try:
from_email_address = EmailAddress.objects \
.get(user=request.user, primary=True)
except EmailAddress.DoesNotExist:
from_email_address = None
email_address.set_as_primary()
get_adapter() \
.add_message(request,
messages.SUCCESS,
'account/messages/primary_email_set.txt')
signals.email_changed \
.send(sender=request.user.__class__,
request=request,
user=request.user,
from_email_address=from_email_address,
to_email_address=email_address)
return HttpResponseRedirect(self.get_success_url())
except EmailAddress.DoesNotExist:
pass
def get_context_data(self, **kwargs):
ret = super(EmailView, self).get_context_data(**kwargs)
# NOTE: For backwards compatibility
ret['add_email_form'] = ret.get('form')
# (end NOTE)
return ret
email = login_required(EmailView.as_view())
class PasswordChangeView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/password_change.html"
form_class = ChangePasswordForm
success_url = reverse_lazy("account_change_password")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'change_password',
self.form_class)
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
if not request.user.has_usable_password():
return HttpResponseRedirect(reverse('account_set_password'))
return super(PasswordChangeView, self).dispatch(request, *args,
**kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
get_adapter().add_message(self.request,
messages.SUCCESS,
'account/messages/password_changed.txt')
signals.password_changed.send(sender=self.request.user.__class__,
request=self.request,
user=self.request.user)
return super(PasswordChangeView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(PasswordChangeView, self).get_context_data(**kwargs)
# NOTE: For backwards compatibility
ret['password_change_form'] = ret.get('form')
# (end NOTE)
return ret
password_change = login_required(PasswordChangeView.as_view())
class PasswordSetView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/password_set.html"
form_class = SetPasswordForm
success_url = reverse_lazy("account_set_password")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'set_password',
self.form_class)
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
if request.user.has_usable_password():
return HttpResponseRedirect(reverse('account_change_password'))
return super(PasswordSetView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordSetView, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
get_adapter().add_message(self.request,
messages.SUCCESS,
'account/messages/password_set.txt')
signals.password_set.send(sender=self.request.user.__class__,
request=self.request, user=self.request.user)
return super(PasswordSetView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(PasswordSetView, self).get_context_data(**kwargs)
# NOTE: For backwards compatibility
ret['password_set_form'] = ret.get('form')
# (end NOTE)
return ret
password_set = login_required(PasswordSetView.as_view())
class PasswordResetView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/password_reset.html"
form_class = ResetPasswordForm
success_url = reverse_lazy("account_reset_password_done")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'reset_password',
self.form_class)
def form_valid(self, form):
form.save(self.request)
return super(PasswordResetView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(PasswordResetView, self).get_context_data(**kwargs)
# NOTE: For backwards compatibility
ret['password_reset_form'] = ret.get('form')
# (end NOTE)
return ret
password_reset = PasswordResetView.as_view()
class PasswordResetDoneView(TemplateView):
template_name = "account/password_reset_done.html"
password_reset_done = PasswordResetDoneView.as_view()
class PasswordResetFromKeyView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/password_reset_from_key.html"
form_class = ResetPasswordKeyForm
success_url = reverse_lazy("account_reset_password_from_key_done")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'reset_password_from_key',
self.form_class)
def dispatch(self, request, uidb36, key, **kwargs):
self.request = request
self.key = key
# (Ab)using forms here to be able to handle errors in XHR #890
token_form = UserTokenForm(data={'uidb36': uidb36, 'key': key})
if not token_form.is_valid():
response = self.render_to_response(
self.get_context_data(token_fail=True)
)
return _ajax_response(self.request, response, form=token_form)
else:
self.reset_user = token_form.reset_user
return super(PasswordResetFromKeyView, self).dispatch(request,
uidb36,
key,
**kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordResetFromKeyView, self).get_form_kwargs()
kwargs["user"] = self.reset_user
kwargs["temp_key"] = self.key
return kwargs
def form_valid(self, form):
form.save()
get_adapter().add_message(self.request,
messages.SUCCESS,
'account/messages/password_changed.txt')
signals.password_reset.send(sender=self.reset_user.__class__,
request=self.request,
user=self.reset_user)
return super(PasswordResetFromKeyView, self).form_valid(form)
password_reset_from_key = PasswordResetFromKeyView.as_view()
class PasswordResetFromKeyDoneView(TemplateView):
template_name = "account/password_reset_from_key_done.html"
password_reset_from_key_done = PasswordResetFromKeyDoneView.as_view()
class LogoutView(TemplateResponseMixin, View):
template_name = "account/logout.html"
redirect_field_name = "next"
def get(self, *args, **kwargs):
if app_settings.LOGOUT_ON_GET:
return self.post(*args, **kwargs)
if not self.request.user.is_authenticated():
return redirect(self.get_redirect_url())
ctx = self.get_context_data()
return self.render_to_response(ctx)
def post(self, *args, **kwargs):
url = self.get_redirect_url()
if self.request.user.is_authenticated():
self.logout()
return redirect(url)
def logout(self):
get_adapter().add_message(self.request,
messages.SUCCESS,
'account/messages/logged_out.txt')
auth_logout(self.request)
def get_context_data(self, **kwargs):
ctx = kwargs
redirect_field_value = get_request_param(self.request,
self.redirect_field_name)
ctx.update({
"redirect_field_name": self.redirect_field_name,
"redirect_field_value": redirect_field_value})
return ctx
def get_redirect_url(self):
return (get_next_redirect_url(self.request,
self.redirect_field_name)
or get_adapter().get_logout_redirect_url(self.request))
logout = LogoutView.as_view()
class AccountInactiveView(TemplateView):
template_name = 'account/account_inactive.html'
account_inactive = AccountInactiveView.as_view()
class EmailVerificationSentView(TemplateView):
template_name = 'account/verification_sent.html'
email_verification_sent = EmailVerificationSentView.as_view()
|
ldgarcia/django-allauth
|
allauth/account/views.py
|
Python
|
mit
| 26,390
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast
import sys
from io import BytesIO, TextIOWrapper
import yaml
import yaml.reader
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.parsing.convert_bool import boolean
class AnsibleTextIOWrapper(TextIOWrapper):
def write(self, s):
super(AnsibleTextIOWrapper, self).write(to_text(s, self.encoding, errors='replace'))
def find_globals(g, tree):
"""Uses AST to find globals in an ast tree"""
for child in tree:
if hasattr(child, 'body') and isinstance(child.body, list):
find_globals(g, child.body)
elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
g.add(child.name)
continue
elif isinstance(child, ast.Assign):
try:
g.add(child.targets[0].id)
except (IndexError, AttributeError):
pass
elif isinstance(child, ast.Import):
g.add(child.names[0].name)
elif isinstance(child, ast.ImportFrom):
for name in child.names:
g_name = name.asname or name.name
if g_name == '*':
continue
g.add(g_name)
class CaptureStd():
"""Context manager to handle capturing stderr and stdout"""
def __enter__(self):
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
sys.stdout = self.stdout = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stdout.encoding)
sys.stderr = self.stderr = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stderr.encoding)
return self
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
def get(self):
"""Return ``(stdout, stderr)``"""
return self.stdout.buffer.getvalue(), self.stderr.buffer.getvalue()
def parse_yaml(value, lineno, module, name, load_all=False):
traces = []
errors = []
data = None
if load_all:
loader = yaml.safe_load_all
else:
loader = yaml.safe_load
try:
data = loader(value)
if load_all:
data = list(data)
except yaml.MarkedYAMLError as e:
e.problem_mark.line += lineno - 1
e.problem_mark.name = '%s.%s' % (module, name)
errors.append({
'msg': '%s is not valid YAML' % name,
'line': e.problem_mark.line + 1,
'column': e.problem_mark.column + 1
})
traces.append(e)
except yaml.reader.ReaderError as e:
traces.append(e)
# TODO: Better line/column detection
errors.append({
'msg': ('%s is not valid YAML. Character '
'0x%x at position %d.' % (name, e.character, e.position)),
'line': lineno
})
except yaml.YAMLError as e:
traces.append(e)
errors.append({
'msg': '%s is not valid YAML: %s: %s' % (name, type(e), e),
'line': lineno
})
return data, errors, traces
def is_empty(value):
"""Evaluate null like values excluding False"""
if value is False:
return False
return not bool(value)
def compare_unordered_lists(a, b):
"""Safe list comparisons
Supports:
- unordered lists
- unhashable elements
"""
return len(a) == len(b) and all(x in b for x in a)
class NoArgsAnsibleModule(AnsibleModule):
"""AnsibleModule that does not actually load params. This is used to get access to the
methods within AnsibleModule without having to fake a bunch of data
"""
def _load_params(self):
self.params = {}
|
mheap/ansible
|
test/sanity/validate-modules/utils.py
|
Python
|
gpl-3.0
| 4,467
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ApproveDisplayVideo360AdvertiserLinkProposal
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-analytics-admin
# [START analyticsadmin_v1alpha_generated_AnalyticsAdminService_ApproveDisplayVideo360AdvertiserLinkProposal_sync]
from google.analytics import admin_v1alpha
def sample_approve_display_video360_advertiser_link_proposal():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ApproveDisplayVideo360AdvertiserLinkProposalRequest(
name="name_value",
)
# Make the request
response = client.approve_display_video360_advertiser_link_proposal(request=request)
# Handle the response
print(response)
# [END analyticsadmin_v1alpha_generated_AnalyticsAdminService_ApproveDisplayVideo360AdvertiserLinkProposal_sync]
|
googleapis/python-analytics-admin
|
samples/generated_samples/analyticsadmin_v1alpha_generated_analytics_admin_service_approve_display_video360_advertiser_link_proposal_sync.py
|
Python
|
apache-2.0
| 1,697
|
from sqlalchemy import *
from sqlalchemy.orm import relationship, backref,create_session,Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import sessionmaker
import json
from datetime import datetime, date
##table declaration
from openedoo import config
class Query(object):
def __init__(self,SQLALCHEMY_DATABASE_URI=None,database_name=None,db_uri=None):
#SQLALCHEMY_DATABASE_URI = sql_uri
#self.config_uri = db_uri
#self.engine = create_engine(SQLALCHEMY_DATABASE_URI)
#self.database_name = database_name
config_uri = config.DB_URI
self.engine = create_engine(config.SQLALCHEMY_DATABASE_URI)
self.database_name = config.database_name
self.Base = declarative_base()
self.metadata = MetaData(bind=self.engine)
self.auto_map = automap_base()
def select_db(self,tables,column,page=0,page_size=None,**value_column):
'''equvalent with select * from tables where column = value_column, this didn't support with order by or join table'''
try:
session = sessionmaker()
session.configure(bind=self.engine)
self.Base.metadata.create_all(self.engine)
s = session()
if ('value' in value_column):
kueridb = s.query(tables).filter(column == value_column['value'])
else:
kueridb = s.query(tables)
if page_size != None:
kueridb = kueridb.limit(page_size)
if page != 0:
kueridb = kueridb.offset(page*page_size)
list1 = list(s.execute(kueridb))
engine.dispose()
return list1
except Exception as e:
return False
def update_db(self,tables,column,value_column,dict_update):
'''for update row in tables'''
#namatable = namatable
session = sessionmaker()
session.configure(bind=self.engine)
self.Base.metadata.create_all(self.engine)
s = session()
try:
s.query(tables).filter(column==value_column).update(dict_update)
s.commit()
self.engine.dispose()
return True
except Exception as e:
return e
def delete_db(self,tables,data):
try:
session = sessionmaker()
session.configure(bind=self.engine)
self.Base.metadata.create_all(self.engine)
s = session()
self.Base.metadata.create_all(self.engine)
jack = s.query(tables).get(data)
s.delete(jack)
s.commit()
self.engine.dispose()
return True
except Exception as e:
return False
def insert_db(self,new):
try:
Session = sessionmaker(bind=self.engine)
session = Session()
self.Base.metadata.create_all(self.engine)
session.add(new)
session.commit()
self.engine.dispose()
return True
except Exception as e:
return False
def create_database(self,database_name):
try:
engine_new = create_engine(self.config_uri)
connection_engine = engine_new.connect()
connection_engine.execute("commit")
connection_engine.execute("create database {database}".format(database=database_name))
connection_engine.close()
return True
except Exception as e:
message = {'message':'database exist'}
return message
def drop_table(self,name_table):
sql = text('DROP TABLE IF EXISTS {name_table};'.format(name_table=name_table))
result = self.engine.execute(sql)
return result
def version(self):
query = 'SELECT VERSION()'
connection = create_engine(config_uri).connect()
result = connection.execute(query)
data = []
for value in result:
row = dict(value)
data.append(row)
return data
def raw(self,query=None):
if query == None:
return "query syntax is None"
connection = create_engine(config_uri).connect()
result = connection.execute(query)
data = []
for value in result:
row = dict(value)
data.append(row)
return data
|
openedoo/openedoo
|
openedoo/core/db/__init__.py
|
Python
|
mit
| 3,656
|
# -*- coding: utf-8 -*-
"""
pafy.py.
Python library to download YouTube content and retrieve metadata
https://github.com/np1/pafy
Copyright (C) 2013-2014 np1
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
__version__ = "0.3.74"
__author__ = "np1"
__license__ = "LGPLv3"
import re
import os
import sys
import time
import json
import logging
early_py_version = sys.version_info[:2] < (2, 7)
if sys.version_info[:2] >= (3, 0):
# pylint: disable=E0611,F0401,I0011
from urllib.request import build_opener
from urllib.error import HTTPError, URLError
from urllib.parse import parse_qs, unquote_plus, urlencode, urlparse
uni, pyver = str, 3
else:
from urllib2 import build_opener, HTTPError, URLError
from urllib import unquote_plus, urlencode
from urlparse import parse_qs, urlparse
uni, pyver = unicode, 2
import youtube_dl
if os.environ.get("pafydebug") == "1":
logging.basicConfig(level=logging.DEBUG)
dbg = logging.debug
def parseqs(data):
""" parse_qs, return unicode. """
if type(data) == uni:
return parse_qs(data)
elif pyver == 3:
data = data.decode("utf8")
data = parse_qs(data)
else:
data = parse_qs(data)
out = {}
for k, v in data.items():
k = k.decode("utf8")
out[k] = [x.decode("utf8") for x in v]
data = out
return data
def fetch_decode(url, encoding=None):
""" Fetch url and decode. """
try:
req = g.opener.open(url)
except HTTPError as e:
if e.getcode() == 503:
time.sleep(.5)
return fetch_decode(url, encoding)
else:
raise e
ct = req.headers['content-type']
if encoding:
return req.read().decode(encoding)
elif "charset=" in ct:
dbg("charset: %s", ct)
encoding = re.search(r"charset=([\w-]+)\s*(:?;|$)", ct).group(1)
return req.read().decode(encoding)
else:
dbg("encoding unknown")
return req.read()
def new(url, basic=True, gdata=False, signature=True, size=False,
callback=None):
""" Return a new pafy instance given a url or video id.
NOTE: The signature argument has been deprecated and now has no effect,
it will be removed in a future version.
Optional arguments:
basic - fetch basic metadata and streams
gdata - fetch gdata info (upload date, description, category)
size - fetch the size of each stream (slow)(decrypts urls if needed)
callback - a callback function to receive status strings
If any of the first three above arguments are False, those data items will
be fetched only when first called for.
The defaults are recommended for most cases. If you wish to create
many video objects at once, you may want to set basic to False, eg:
video = pafy.new(basic=False)
This will be quick because no http requests will be made on initialisation.
Setting size to True will override the basic argument and force basic data
to be fetched too (basic data is required to obtain Stream objects).
"""
if not signature:
logging.warning("signature argument has no effect and will be removed"
" in a future version.")
return Pafy(url, basic, gdata, signature, size, callback)
def get_video_gdata(video_id):
""" Return json string containing video metadata from gdata api. """
new.callback("Fetching video gdata")
query = {'part': 'id,snippet,statistics',
'maxResults': 1,
'id': video_id,
'key': g.api_key}
url = g.urls['gdata'] + '?' + urlencode(query)
gdata = fetch_decode(url) # unicode
dbg("Fetched video gdata")
new.callback("Fetched video gdata")
return gdata
def extract_video_id(url):
""" Extract the video id from a url, return video id as str. """
idregx = re.compile(r'[\w-]{11}$')
url = str(url)
if idregx.match(url):
return url # ID of video
if '://' not in url:
url = '//' + url
parsedurl = urlparse(url)
if parsedurl.netloc in ('youtube.com', 'www.youtube.com'):
query = parse_qs(parsedurl.query)
if 'v' in query and idregx.match(query['v'][0]):
return query['v'][0]
elif parsedurl.netloc in ('youtu.be', 'www.youtu.be'):
vidid = parsedurl.path.split('/')[-1] if parsedurl.path else ''
if idregx.match(vidid):
return vidid
err = "Need 11 character video id or the URL of the video. Got %s"
raise ValueError(err % url)
class g(object):
""" Class for holding constants needed throughout the module. """
urls = {
'gdata': "https://www.googleapis.com/youtube/v3/videos",
'watchv': "http://www.youtube.com/watch?v=%s",
'vidcat': "https://www.googleapis.com/youtube/v3/videoCategories",
'playlist': ('http://www.youtube.com/list_ajax?'
'style=json&action_get_list=1&list=%s'),
'thumb': "http://i.ytimg.com/vi/%s/default.jpg",
'bigthumb': "http://i.ytimg.com/vi/%s/sddefault.jpg",
'bigthumbhd': "http://i.ytimg.com/vi/%s/hddefault.jpg",
}
api_key = "AIzaSyCIM4EzNqi1in22f4Z3Ru3iYvLaY8tc3bo"
user_agent = "pafy " + __version__
lifespan = 60 * 60 * 5 # 5 hours
opener = build_opener()
opener.addheaders = [('User-Agent', user_agent)]
cache = {}
ydl_opts = {'quiet': True, 'prefer_insecure': True, 'no_warnings':True}
def remux(infile, outfile, quiet=False, muxer="ffmpeg"):
""" Remux audio. """
from subprocess import call, STDOUT
muxer = muxer if isinstance(muxer, str) else "ffmpeg"
for tool in set([muxer, "ffmpeg", "avconv"]):
cmd = [tool, "-y", "-i", infile, "-acodec", "copy", "-vn", outfile]
try:
with open(os.devnull, "w") as devnull:
call(cmd, stdout=devnull, stderr=STDOUT)
except OSError:
dbg("Failed to remux audio using %s", tool)
else:
os.unlink(infile)
dbg("remuxed audio file using %s" % tool)
if not quiet:
sys.stdout.write("\nAudio remuxed.\n")
break
else:
logging.warning("audio remux failed")
os.rename(infile, outfile)
def cache(name):
""" Returns a sub-cache dictionary under which global key, value pairs
can be stored. Regardless of whether a dictionary already exists for
the given name, the sub-cache is returned by reference.
"""
if name not in g.cache:
g.cache[name] = {}
return g.cache[name]
def prune_files(path, prefix="", age_max=3600 * 24 * 14, count_max=4):
""" Remove oldest files from path that start with prefix.
remove files older than age_max, leave maximum of count_max files.
"""
tempfiles = []
if not os.path.isdir(path):
return
for f in os.listdir(path):
filepath = os.path.join(path, f)
if os.path.isfile(filepath) and f.startswith(prefix):
age = time.time() - os.path.getmtime(filepath)
if age > age_max:
os.unlink(filepath)
else:
tempfiles.append((filepath, age))
tempfiles = sorted(tempfiles, key=lambda x: x[1], reverse=True)
for f in tempfiles[:-count_max]:
os.unlink(f[0])
class Stream(object):
""" YouTube video stream class. """
_fsize = None
def __init__(self, info, parent):
""" Set initial values. """
self._info = info
self._parent = parent
self._filename = self.generate_filename()
def generate_filename(self, meta=False):
""" Generate filename. """
ok = re.compile(r'[^/]')
if os.name == "nt":
ok = re.compile(r'[^\\/:*?"<>|]')
filename = "".join(x if ok.match(x) else "_" for x in self.title)
if meta:
filename += "-%s-%s" % (self._parent.videoid, self.itag)
filename += "." + self.extension
return filename
@property
def rawbitrate(self):
""" Return raw bitrate value. """
return self._info.get('abr', 0) * 1024
@property
def threed(self):
""" Return bool, True if stream is 3D. """
#TODO Figure out how to implement this with youtube-dl
return False
@property
def itag(self):
""" Return itag value of stream. """
return self._info['format_id']
@property
def resolution(self):
""" Return resolution of stream as str. 0x0 if audio. """
height = self._info.get('height') or 0
width = self._info.get('width') or 0
return str(width) + 'x' + str(height)
@property
def dimensions(self):
""" Return dimensions of stream as tuple. (0, 0) if audio. """
height = self._info.get('height') or 0
width = self._info.get('width') or 0
return width, height
@property
def quality(self):
""" Return quality of stream (bitrate or resolution).
eg, 128k or 640x480 (str)
"""
if self.rawbitrate:
quality = self.bitrate
else:
quality = self.resolution
return quality
@property
def title(self):
""" Return YouTube video title as a string. """
return self._parent.title
@property
def extension(self):
""" Return appropriate file extension for stream (str).
Possible values are: 3gp, m4a, m4v, mp4, webm, ogg
"""
return self._info['ext']
@property
def bitrate(self):
""" Return bitrate of an audio stream. """
return str(self._info.get('abr', 0)) + 'k'
@property
def mediatype(self):
""" Return mediatype string (normal, audio or video).
(normal means a stream containing both video and audio.)
"""
if (self._info.get('acodec') != 'none' and
self._info.get('vcodec') == 'none'):
return 'audio'
elif (self._info.get('acodec') == 'none' and
self._info.get('vcodec') != 'none'):
return 'video'
else:
return 'normal'
@property
def notes(self):
""" Return additional notes regarding the stream format. """
return self._info.get('format_note') or ''
@property
def filename(self):
""" Return filename of stream; derived from title and extension. """
return self._filename
@property
def url(self):
""" Return the url, decrypt if required. """
return self._info.get('url')
@property
def url_https(self):
""" Return https url. """
return self.url.replace("http://", "https://")
def __repr__(self):
""" Return string representation. """
out = "%s:%s@%s" % (self.mediatype, self.extension, self.quality)
return out
def get_filesize(self):
""" Return filesize of the stream in bytes. Set member variable. """
# Faster method
if 'filesize' in self._info:
return self._info['filesize']
# Fallback
if not self._fsize:
try:
dbg("Getting stream size")
cl = "content-length"
self._fsize = int(g.opener.open(self.url).headers[cl])
dbg("Got stream size")
except (AttributeError, HTTPError, URLError):
self._fsize = 0
return self._fsize
def cancel(self):
""" Cancel an active download. """
if self._active:
self._active = False
return True
def download(self, filepath="", quiet=False, callback=lambda *x: None,
meta=False, remux_audio=False):
""" Download. Use quiet=True to supress output. Return filename.
Use meta=True to append video id and itag to generated filename
Use remax_audio=True to remux audio file downloads
"""
# pylint: disable=R0912,R0914
# Too many branches, too many local vars
savedir = filename = ""
if filepath and os.path.isdir(filepath):
savedir, filename = filepath, self.generate_filename()
elif filepath:
savedir, filename = os.path.split(filepath)
else:
filename = self.generate_filename(meta=meta)
filepath = os.path.join(savedir, filename)
temp_filepath = filepath + ".temp"
status_string = (' {:,} Bytes [{:.2%}] received. Rate: [{:4.0f} '
'KB/s]. ETA: [{:.0f} secs]')
if early_py_version:
status_string = (' {0:} Bytes [{1:.2%}] received. Rate:'
' [{2:4.0f} KB/s]. ETA: [{3:.0f} secs]')
response = g.opener.open(self.url)
total = int(response.info()['Content-Length'].strip())
chunksize, bytesdone, t0 = 16384, 0, time.time()
fmode, offset = "wb", 0
if os.path.exists(temp_filepath):
if os.stat(temp_filepath).st_size < total:
offset = os.stat(temp_filepath).st_size
fmode = "ab"
outfh = open(temp_filepath, fmode)
if offset:
# partial file exists, resume download
resuming_opener = build_opener()
resuming_opener.addheaders = [('User-Agent', g.user_agent),
("Range", "bytes=%s-" % offset)]
response = resuming_opener.open(self.url)
bytesdone = offset
self._active = True
while self._active:
chunk = response.read(chunksize)
outfh.write(chunk)
elapsed = time.time() - t0
bytesdone += len(chunk)
if elapsed:
rate = ((bytesdone - offset) / 1024) / elapsed
eta = (total - bytesdone) / (rate * 1024)
else: # Avoid ZeroDivisionError
rate = 0
eta = 0
progress_stats = (bytesdone, bytesdone * 1.0 / total, rate, eta)
if not chunk:
outfh.close()
break
if not quiet:
status = status_string.format(*progress_stats)
sys.stdout.write("\r" + status + ' ' * 4 + "\r")
sys.stdout.flush()
if callback:
callback(total, *progress_stats)
if self._active:
if remux_audio and self.mediatype == "audio":
remux(temp_filepath, filepath, quiet=quiet, muxer=remux_audio)
else:
os.rename(temp_filepath, filepath)
return filepath
else: # download incomplete, return temp filepath
outfh.close()
return temp_filepath
class Pafy(object):
""" Class to represent a YouTube video. """
funcmap = {} # keep functions as a class variable
def __init__(self, video_url, basic=True, gdata=False,
signature=True, size=False, callback=None):
""" Set initial values. """
self.version = __version__
self.videoid = extract_video_id(video_url)
self.watchv_url = g.urls['watchv'] % self.videoid
new.callback = callback or (lambda x: None)
self._have_basic = False
self._have_gdata = False
self._description = None
self._likes = None
self._dislikes = None
self._category = None
self._published = None
self._username = None
self._ydl_info = None
self._streams = []
self._oggstreams = []
self._m4astreams = []
self._allstreams = []
self._videostreams = []
self._audiostreams = []
self._title = None
self._rating = None
self._length = None
self._author = None
self._duration = None
self._keywords = None
self._bigthumb = None
self._viewcount = None
self._bigthumbhd = None
self._mix_pl = None
self.expiry = None
self.playlist_meta = None
if basic:
self._fetch_basic()
if gdata:
self._fetch_gdata()
if size:
for s in self.allstreams:
# pylint: disable=W0104
s.get_filesize()
def _fetch_basic(self):
""" Fetch basic data and streams. """
if self._have_basic:
return
with youtube_dl.YoutubeDL(g.ydl_opts) as ydl:
try:
self._ydl_info = ydl.extract_info(self.videoid, download=False)
# Turn into an IOError since that is what pafy previously raised
except youtube_dl.utils.DownloadError as e:
raise IOError(str(e).replace('YouTube said', 'Youtube says'))
new.callback("Fetched video info")
self._title = self._ydl_info['title']
self._author = self._ydl_info['uploader']
self._rating = self._ydl_info['average_rating']
self._length = self._ydl_info['duration']
self._viewcount = self._ydl_info['view_count']
self._likes = self._ydl_info['like_count']
self._dislikes = self._ydl_info['dislike_count']
self._username = self._ydl_info['uploader_id']
self._category = self._ydl_info['categories'][0]
if self._ydl_info['height'] >= 480:
self._bigthumb = g.urls['bigthumb'] % self.videoid
if self._ydl_info['height'] >= 720:
self._bigthumbhd = g.urls['bigthumbhd'] % self.videoid
self.expiry = time.time() + g.lifespan
self._have_basic = True
def _fetch_gdata(self):
""" Extract gdata values, fetch gdata if necessary. """
if self._have_gdata:
return
gdata = get_video_gdata(self.videoid)
item = json.loads(gdata)['items'][0]
snippet = item['snippet']
self._published = uni(snippet['publishedAt'])
self._description = uni(snippet["description"])
self._keywords = [uni(i) for i in snippet['tags']]
self._have_gdata = True
def _process_streams(self):
""" Create Stream object lists from internal stream maps. """
if not self._have_basic:
self._fetch_basic()
allstreams = [Stream(z, self) for z in self._ydl_info['formats']]
self._streams = [i for i in allstreams if i.mediatype == 'normal']
self._audiostreams = [i for i in allstreams if i.mediatype == 'audio']
self._videostreams = [i for i in allstreams if i.mediatype == 'video']
self._m4astreams = [i for i in allstreams if i.extension == 'm4a']
self._oggstreams = [i for i in allstreams if i.extension == 'ogg']
self._allstreams = allstreams
def __repr__(self):
""" Print video metadata. Return utf8 string. """
if self._have_basic:
keys = "Title Author ID Duration Rating Views Thumbnail"
keys = keys.split(" ")
keywords = ", ".join(self.keywords)
info = {"Title": self.title,
"Author": self.author,
"Views": self.viewcount,
"Rating": self.rating,
"Duration": self.duration,
"ID": self.videoid,
"Thumbnail": self.thumb}
nfo = "\n".join(["%s: %s" % (k, info.get(k, "")) for k in keys])
else:
nfo = "Pafy object: %s [%s]" % (self.videoid,
self.title[:45] + "..")
return nfo.encode("utf8", "replace") if pyver == 2 else nfo
@property
def streams(self):
""" The streams for a video. Returns list."""
self._fetch_basic()
return self._streams
@property
def allstreams(self):
""" All stream types for a video. Returns list. """
if not self._allstreams:
self._process_streams()
return self._allstreams
@property
def audiostreams(self):
""" Return a list of audio Stream objects. """
if not self._audiostreams:
self._process_streams()
return self._audiostreams
@property
def videostreams(self):
""" The video streams for a video. Returns list. """
if not self._videostreams:
self._process_streams()
return self._videostreams
@property
def oggstreams(self):
""" Return a list of ogg encoded Stream objects. """
if not self._oggstreams:
self._process_streams()
return self._oggstreams
@property
def m4astreams(self):
""" Return a list of m4a encoded Stream objects. """
if not self._m4astreams:
self._process_streams()
return self._m4astreams
@property
def title(self):
""" Return YouTube video title as a string. """
if not self._title:
self._fetch_basic()
return self._title
@property
def author(self):
""" The uploader of the video. Returns str. """
if not self._author:
self._fetch_basic()
return self._author
@property
def rating(self):
""" Rating for a video. Returns float. """
if not self._rating:
self._fetch_basic()
return self._rating
@property
def length(self):
""" Length of a video in seconds. Returns int. """
if not self._length:
self._fetch_basic()
return self._length
@property
def viewcount(self):
""" Number of views for a video. Returns int. """
if not self._viewcount:
self._fetch_basic()
return self._viewcount
@property
def bigthumb(self):
""" Large thumbnail image url. Returns str. """
self._fetch_basic()
return self._bigthumb
@property
def bigthumbhd(self):
""" Extra large thumbnail image url. Returns str. """
self._fetch_basic()
return self._bigthumbhd
@property
def thumb(self):
""" Thumbnail image url. Returns str. """
return g.urls['thumb'] % self.videoid
@property
def duration(self):
""" Duration of a video (HH:MM:SS). Returns str. """
if not self._length:
self._fetch_basic()
self._duration = time.strftime('%H:%M:%S', time.gmtime(self._length))
self._duration = uni(self._duration)
return self._duration
@property
def keywords(self):
""" Return keywords as list of str. """
if not self._keywords:
self._fetch_gdata()
return self._keywords
@property
def category(self):
""" YouTube category of the video. Returns string. """
if not self._category:
self._fetch_gdata()
return self._category
@property
def description(self):
""" Description of the video. Returns string. """
if not self._description:
self._fetch_gdata()
return self._description
@property
def username(self):
""" Return the username of the uploader. """
if not self._username:
self._fetch_basic()
return self._username
@property
def published(self):
""" The upload date and time of the video. Returns string. """
if not self._published:
self._fetch_gdata()
return self._published.replace(".000Z", "").replace("T", " ")
@property
def likes(self):
""" The number of likes for the video. Returns int. """
if not self._likes:
self._fetch_basic()
return self._likes
@property
def dislikes(self):
""" The number of dislikes for the video. Returns int. """
if not self._dislikes:
self._fetch_basic()
return self._dislikes
@property
def mix(self):
""" The playlist for the related YouTube mix. Returns a dict containing Pafy objects. """
if self._mix_pl is None:
try:
self._mix_pl = get_playlist("RD" + self.videoid)
except IOError:
return None
return self._mix_pl
def _getbest(self, preftype="any", ftypestrict=True, vidonly=False):
"""
Return the highest resolution video available.
Select from video-only streams if vidonly is True
"""
streams = self.videostreams if vidonly else self.streams
if not streams:
return None
def _sortkey(x, key3d=0, keyres=0, keyftype=0):
""" sort function for max(). """
key3d = "3D" not in x.resolution
keyres = int(x.resolution.split("x")[0])
keyftype = preftype == x.extension
strict = (key3d, keyftype, keyres)
nonstrict = (key3d, keyres, keyftype)
return strict if ftypestrict else nonstrict
r = max(streams, key=_sortkey)
if ftypestrict and preftype != "any" and r.extension != preftype:
return None
else:
return r
def getbestvideo(self, preftype="any", ftypestrict=True):
"""
Return the best resolution video-only stream.
set ftypestrict to False to return a non-preferred format if that
has a higher resolution
"""
return self._getbest(preftype, ftypestrict, vidonly=True)
def getbest(self, preftype="any", ftypestrict=True):
"""
Return the highest resolution video+audio stream.
set ftypestrict to False to return a non-preferred format if that
has a higher resolution
"""
return self._getbest(preftype, ftypestrict, vidonly=False)
def getbestaudio(self, preftype="any", ftypestrict=True):
""" Return the highest bitrate audio Stream object."""
if not self.audiostreams:
return None
def _sortkey(x, keybitrate=0, keyftype=0):
""" Sort function for max(). """
keybitrate = int(x.rawbitrate)
keyftype = preftype == x.extension
strict, nonstrict = (keyftype, keybitrate), (keybitrate, keyftype)
return strict if ftypestrict else nonstrict
r = max(self.audiostreams, key=_sortkey)
if ftypestrict and preftype != "any" and r.extension != preftype:
return None
else:
return r
def populate_from_playlist(self, pl_data):
""" Populate Pafy object with items fetched from playlist data. """
self._title = pl_data.get("title")
self._author = pl_data.get("author")
self._length = int(pl_data.get("length_seconds", 0))
self._rating = pl_data.get("rating", 0.0)
self._viewcount = "".join(re.findall(r"\d", pl_data.get("views", "0")))
self._viewcount = int(self._viewcount)
self._thumb = pl_data.get("thumbnail")
self._description = pl_data.get("description")
self.playlist_meta = pl_data
def get_categoryname(cat_id):
""" Returns a list of video category names for one category ID. """
timestamp = time.time()
cat_cache = cache('categories')
cached = cat_cache.get(cat_id, {})
if cached.get('updated', 0) > timestamp - g.lifespan:
return cached.get('title', 'unknown')
# call videoCategories API endpoint to retrieve title
url = g.urls['vidcat']
query = {'id': cat_id,
'part': 'snippet',
'key': g.api_key}
url += "?" + urlencode(query)
catinfo = json.loads(fetch_decode(url))
try:
for item in catinfo.get('items', []):
title = item.get('snippet', {}).get('title', 'unknown')
cat_cache[cat_id] = {'title':title, 'updated':timestamp}
return title
cat_cache[cat_id] = {'updated':timestamp}
return 'unknown'
except Exception:
raise IOError("Error fetching category name for ID %s" % cat_id)
def set_categories(categories):
""" Take a dictionary mapping video category IDs to name and retrieval
time. All items are stored into cache node 'videoCategories', but
for the ones with a retrieval time too long ago, the v3 API is queried
before.
"""
timestamp = time.time()
idlist = [cid for cid, item in categories.items()
if item.get('updated', 0) < timestamp - g.lifespan]
if len(idlist) > 0:
url = g.urls['vidcat']
query = {'id': ','.join(idlist),
'part': 'snippet',
'key': g.api_key}
url += "?" + urlencode(query)
catinfo = json.loads(fetch_decode(url))
try:
for item in catinfo.get('items', []):
cid = item['id']
title = item.get('snippet', {}).get('title', 'unknown')
categories[cid] = {'title':title, 'updated':timestamp}
except Exception:
raise IOError("Error fetching category name for IDs %s" % idlist)
cache('categories').update(categories)
def load_cache(newcache):
"""Loads a dict into pafy's internal cache."""
set_categories(newcache.get('categories', {}))
def dump_cache():
"""Returns pafy's cache for storing by program."""
return g.cache
def get_playlist(playlist_url, basic=False, gdata=False, signature=True,
size=False, callback=lambda x: None):
""" Return a dict containing Pafy objects from a YouTube Playlist.
The returned Pafy objects are initialised using the arguments to
get_playlist() in the manner documented for pafy.new()
"""
# pylint: disable=R0914
# too many local vars
# Normal playlists start with PL, Mixes start with RD + first video ID
idregx = re.compile(r'((?:RD|PL)[-_0-9a-zA-Z]+)$')
playlist_id = None
if idregx.match(playlist_url):
playlist_id = playlist_url # ID of video
if '://' not in playlist_url:
playlist_url = '//' + playlist_url
parsedurl = urlparse(playlist_url)
if parsedurl.netloc in ('youtube.com', 'www.youtube.com'):
query = parse_qs(parsedurl.query)
if 'list' in query and idregx.match(query['list'][0]):
playlist_id = query['list'][0]
if not playlist_id:
err = "Unrecognized playlist url: %s"
raise ValueError(err % playlist_url)
url = g.urls["playlist"] % playlist_id
try:
allinfo = fetch_decode(url) # unicode
allinfo = json.loads(allinfo)
except:
raise IOError("Error fetching playlist %s" % playlist_url)
# playlist specific metadata
playlist = dict(
playlist_id=playlist_id,
likes=allinfo.get('likes'),
title=allinfo.get('title'),
author=allinfo.get('author'),
dislikes=allinfo.get('dislikes'),
description=allinfo.get('description'),
items=[]
)
# playlist items specific metadata
for v in allinfo['video']:
vid_data = dict(
added=v.get('added'),
is_cc=v.get('is_cc'),
is_hd=v.get('is_hd'),
likes=v.get('likes'),
title=v.get('title'),
views=v.get('views'),
rating=v.get('rating'),
author=v.get('author'),
user_id=v.get('user_id'),
privacy=v.get('privacy'),
start=v.get('start', 0.0),
dislikes=v.get('dislikes'),
duration=v.get('duration'),
comments=v.get('comments'),
keywords=v.get('keywords'),
thumbnail=v.get('thumbnail'),
cc_license=v.get('cc_license'),
category_id=v.get('category_id'),
description=v.get('description'),
encrypted_id=v.get('encrypted_id'),
time_created=v.get('time_created'),
time_updated=v.get('time_updated'),
length_seconds=v.get('length_seconds'),
end=v.get('end', v.get('length_seconds'))
)
try:
pafy_obj = new(vid_data['encrypted_id'],
basic=basic,
gdata=gdata,
signature=signature,
size=size,
callback=callback)
except IOError as e:
callback("%s: %s" % (v['title'], e.message))
continue
pafy_obj.populate_from_playlist(vid_data)
playlist['items'].append(dict(pafy=pafy_obj,
playlist_meta=vid_data))
callback("Added video: %s" % v['title'])
return playlist
def set_api_key(key):
"""Sets the api key to be used with youtube."""
g.api_key = key
|
girish946/pafy
|
pafy/pafy.py
|
Python
|
lgpl-3.0
| 33,300
|
# -*- coding: utf-8 -*-
# Parallel Processing Teaching Toolkit
# PyOpenCL - Example 07
# Multiple Kernels Execution
# https://github.com/javierip/parallel-processing-teaching-toolkit
import pyopencl as cl
import numpy as np
import time # For measure the running times
VECTOR_SIZE = 50000 # Elements of vector
# Create four random vectors
a_host = np.random.rand(VECTOR_SIZE).astype(np.float32)
b_host = np.random.rand(VECTOR_SIZE).astype(np.float32)
c_host = np.random.rand(VECTOR_SIZE).astype(np.float32)
d_host = np.random.rand(VECTOR_SIZE).astype(np.float32)
# Create empty vectors for results
res_host_1= np.zeros(VECTOR_SIZE).astype(np.float32)
res_host_2= np.zeros(VECTOR_SIZE).astype(np.float32)
res_host= np.zeros(VECTOR_SIZE).astype(np.float32)
res_gpu_host= np.zeros(VECTOR_SIZE).astype(np.float32)
# Create CL context
platform = cl.get_platforms()[0]
device = platform.get_devices()[0] #get first gpu available
print "Running: ", platform
print "On GPU: ", device
ctx = cl.Context([device])
queue = cl.CommandQueue(ctx)
# Transfer host (CPU) memory to device (GPU) memory
mf = cl.mem_flags
a_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_host)
b_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_host)
c_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=c_host)
d_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=d_host)
# Kernel code
prg = cl.Program(ctx, """
__kernel void sum(__global const float *a_gpu, __global const float *b_gpu, __global float *res_gpu) {
int gid = get_global_id(0);
res_gpu[gid] = a_gpu[gid] + b_gpu[gid];
}
__kernel void multi(__global const float *a_gpu, __global const float *b_gpu, __global float *res_gpu) {
int gid = get_global_id(0);
res_gpu[gid] = a_gpu[gid] * b_gpu[gid];
}
__kernel void div(__global const float *a_gpu, __global const float *b_gpu, __global float *res_gpu) {
int gid = get_global_id(0);
res_gpu[gid] = a_gpu[gid] / b_gpu[gid];
}
""").build()
# Create empty gpu array for the result
res_gpu_1 = cl.Buffer(ctx, mf.WRITE_ONLY, a_host.nbytes)
res_gpu_2 = cl.Buffer(ctx, mf.WRITE_ONLY, a_host.nbytes)
res_gpu = cl.Buffer(ctx, mf.WRITE_ONLY, a_host.nbytes)
tic=time.time()
#Operation using the GPU - call the kernel on the card
prg.multi(queue, a_host.shape, None, a_gpu, b_gpu, res_gpu_1)
prg.div(queue, a_host.shape, None, c_gpu, d_gpu, res_gpu_2)
prg.sum(queue, a_host.shape, None, res_gpu_1, res_gpu_2, res_gpu)
time_gpu=time.time()-tic
#Clear GPU resources
res_gpu_host = np.empty_like(a_host)
cl.enqueue_copy(queue, res_gpu_host, res_gpu)
tic=time.time()
#Operation using the cpu
for i in range(0,VECTOR_SIZE):
res_host_1[i]=a_host[i]*b_host[i]
for i in range(0,VECTOR_SIZE):
res_host_2[i]=c_host[i]/d_host[i]
for i in range(0,VECTOR_SIZE):
res_host[i]=res_host_1[i]+res_host_2[i]
time_cpu=time.time()-tic
# Print the results
print "-" * 80
print "CHECK :" #0 = GOOD
print "-" * 80
print res_gpu_host-res_host
print "-" * 80
print "Vector (a*b+c/d)"
print "Vector Size:", VECTOR_SIZE
print "Time CPU:", time_cpu
print "Time GPU:", time_gpu
|
javierip/parallel-processing-teaching-toolkit
|
04-GPU-accelerators/04-PyOpenCL/07-multi_kernel/multi_kernel.py
|
Python
|
apache-2.0
| 3,109
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Disabled API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
ALIAS = 'os-flavor-disabled'
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
class FlavorDisabledController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = "%s:disabled" % FlavorDisabled.alias
flavor[key] = db_flavor['disabled']
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
resp_obj.attach(xml=FlavorDisabledTemplate())
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
resp_obj.attach(xml=FlavorsDisabledTemplate())
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class FlavorDisabled(extensions.V3APIExtensionBase):
"""Support to show the disabled status of a flavor."""
name = "FlavorDisabled"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/%s/api/v3" % ALIAS
version = 1
def get_controller_extensions(self):
controller = FlavorDisabledController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
def get_resources(self):
return []
def make_flavor(elem):
elem.set('{%s}disabled' % FlavorDisabled.namespace,
'%s:disabled' % FlavorDisabled.alias)
class FlavorDisabledTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavor', selector='flavor')
make_flavor(root)
return xmlutil.SlaveTemplate(root, 1, nsmap={
FlavorDisabled.alias: FlavorDisabled.namespace})
class FlavorsDisabledTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavors')
elem = xmlutil.SubTemplateElement(root, 'flavor', selector='flavors')
make_flavor(elem)
return xmlutil.SlaveTemplate(root, 1, nsmap={
FlavorDisabled.alias: FlavorDisabled.namespace})
|
Brocade-OpenSource/OpenStack-DNRM-Nova
|
nova/api/openstack/compute/plugins/v3/flavor_disabled.py
|
Python
|
apache-2.0
| 3,157
|
data['2012'].mean().plot(kind='bar')
|
jorisvandenbossche/2015-EuroScipy-pandas-tutorial
|
snippets/07 - Case study - air quality data54.py
|
Python
|
bsd-2-clause
| 36
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Versioning plugin for DB Manager
Description : Set up versioning support for a table
Date : Mar 12, 2012
copyright : (C) 2012 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QAction, QApplication
from qgis.PyQt.QtGui import QIcon
# The load function is called when the "db" database or either one of its
# children db objects (table o schema) is selected by the user.
# @param db is the selected database
# @param mainwindow is the DBManager mainwindow
def load(db, mainwindow):
# add the action to the DBManager menu
action = QAction(QIcon(), QApplication.translate("DBManagerPlugin", "&Change Logging…"), db)
mainwindow.registerAction(action, QApplication.translate("DBManagerPlugin", "&Table"), run)
# The run function is called once the user clicks on the action TopoViewer
# (look above at the load function) from the DBManager menu/toolbar.
# @param item is the selected db item (either db, schema or table)
# @param action is the clicked action on the DBManager menu/toolbar
# @param mainwindow is the DBManager mainwindow
def run(item, action, mainwindow):
from .dlg_versioning import DlgVersioning
dlg = DlgVersioning(item, mainwindow)
QApplication.restoreOverrideCursor()
try:
dlg.exec_()
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
|
jef-n/QGIS
|
python/plugins/db_manager/db_plugins/postgis/plugins/versioning/__init__.py
|
Python
|
gpl-2.0
| 2,260
|
# -*- coding: utf-8 -*-
from thefuck.rules.grep_recursive import match, get_new_command
from thefuck.types import Command
def test_match():
assert match(Command('grep blah .', 'grep: .: Is a directory'))
assert match(Command(u'grep café .', 'grep: .: Is a directory'))
assert not match(Command('', ''))
def test_get_new_command():
assert get_new_command(Command('grep blah .', '')) == 'grep -r blah .'
assert get_new_command(Command(u'grep café .', '')) == u'grep -r café .'
|
SimenB/thefuck
|
tests/rules/test_grep_recursive.py
|
Python
|
mit
| 503
|
# coding: utf-8
# (c) 2015-09-26 Teruhisa Okada
import netCDF4
import matplotlib.pyplot as plt
import datetime
import numpy as np
from scipy.interpolate import Rbf
import romspy
class Dataset():
def __init__(self, ncfile, mapfile=None, grdfile=None):
self.ncfile = ncfile
if mapfile is not None:
self.mapfile = mapfile
else:
self.mapfile = 'deg_OsakaBayMap_okada.bln'
self.grdfile = grdfile
self.nc = netCDF4.Dataset(self.ncfile, 'a')
self.X = None
self.Y = None
self.X2 = None
self.Y2 = None
def print_time(self, which='ends', name='ocean_time'):
print "\nprint_time(which={}, name={}, tunit={})".format(which, name, romspy.JST)
nc = self.nc
if which == 'ends':
t = len(nc.dimensions[name])
start = nc.variables[name][0]
end = nc.variables[name][t-1]
print netCDF4.num2date(start, romspy.JST), 0
print netCDF4.num2date(end, romspy.JST), t-1
elif which == 'all':
time = nc.variables[name][:]
for t in range(len(time)):
print netCDF4.num2date(time[t], romspy.JST), t
else:
print 'You should select "ends" or "all"'
def print_varname(self, ndim=None):
print '\nprint_varname(ndim={})'.format(ndim)
if ndim is not None:
for vname in self.nc.variables.keys():
if self.nc.variables[vname].ndim == ndim:
print vname,
print ''
else:
print self.nc.variables.keys()
def get_varname(self, ndim=None):
if ndim is not None:
varnames = []
for vname in self.nc.variables.keys():
if self.nc.variables[vname].ndim == ndim:
varnames.append(vname)
return varnames
else:
return self.nc.variables.keys()
def get_xy(self, method, step=1):
"""
流速はそのまま,コンターは半グリッドずらしたxyを返す関数
2015-11-08 作成
"""
if self.X is None:
if self.grdfile is not None:
grd = netCDF4.Dataset(self.grdfile, 'r')
else:
grd = self.nc
x_rho = grd.variables['lon_rho'][0,:]
y_rho = grd.variables['lat_rho'][:,0]
X, Y = np.meshgrid(x_rho, y_rho)
self.X = X - 0.5 * (x_rho[1] - x_rho[0])
self.Y = Y - 0.5 * (y_rho[1] - y_rho[0])
self.X2 = X
self.Y2 = Y
if method == 'pcolor':
return self.X, self.Y
else:
return self.X2[::step, ::step], self.Y2[::step, ::step]
def get_time(self, time):
if type(time) == datetime.datetime:
t = netCDF4.date2num(time, romspy.JST)
ocean_time = self.nc.variables['ocean_time'][:]
t = np.where(ocean_time==t)[0][0]
elif type(time) == int:
t = time
time = netCDF4.num2date(self.nc.variables['ocean_time'][t], romspy.JST)
else:
print 'ERROR: your type(time) is {}.\ntype(time) must be datetime.datetime or int\n'.format(type(time))
return t, time
def hview(self, vname, **kw):
"""
2015-11-08 ベクトルに対応させるために ax_heatmap と ax_vecmap を追加
"""
time = kw.pop('time', -1)
k = kw.pop('k', 20)
cff = kw.pop('cff', None)
method = kw.pop('method', 'pcolor')
unit = kw.pop('unit', 'g')
levels = kw.pop('levels', None)
if cff is None:
cff = romspy.unit2cff(vname, unit)
if levels is None:
levels = romspy.levels(vname, unit)
print 'cff={}'.format(cff)
if vname == 'velocity':
var = self.nc.variables['u']
else:
var = self.nc.variables[vname]
if var.ndim > 2:
t, dtime = self.get_time(time)
if vname == 'velocity':
self.add_quiver(vname, t, k, **kw)
elif method == 'limit':
self.add_pcolor_limit(vname, t, k, **kw)
else:
if 'rbf' in method:
self.add_contourf_rbf(vname, t, k, cff, levels, **kw)
if 'pcolor' in method:
self.add_pcolor(vname, t, k, cff, levels, **kw)
if 'contour' in method:
self.add_contour(vname, t, k, cff, levels, **kw)
if 'fill' in method:
self.add_contourf(vname, t, k, cff, levels, **kw)
if self.mapfile is not None:
romspy.basemap(self.mapfile)
if k == 20:
plt.text(135.25, 34.25, 'surface layer')
elif k == 1:
plt.text(135.25, 34.25, 'bottom layer')
if var.ndim == 2:
plt.title('Model domein & bathymetry')
elif ('avg' in self.ncfile) or ('dia' in self.ncfile):
time_string = datetime.datetime.strftime(dtime,'%Y-%m')
plt.title('Average ({})'.format(time_string))
else:
plt.title(datetime.datetime.strftime(dtime,'%Y-%m-%d %H:%M:%S'))
return plt.gca()
def add_pcolor(self, vname, t, k, cff, levels, **kw):
"""
コンタープロットのaxを返す関数
2015-11-08 作成
"""
cblabel = kw.pop('cblabel', vname)
X, Y = self.get_xy('pcolor')
var = self.nc.variables[vname]
if var.ndim == 4:
var2d = var[t,k-1,:,:] * cff
elif var.ndim == 3:
var2d = var[t,:,:] * cff
else:
var2d = var[:,:] * cff
var2d = np.ma.array(var2d)
inf = var2d > 10**20
zero = var2d <= 0
var2d[inf] = np.ma.masked
var2d[zero] = np.ma.masked
ax = plt.gca()
if levels is not None:
P = ax.pcolor(X, Y, var2d, vmin=levels[0], vmax=levels[-1])
else:
P = ax.pcolor(X, Y, var2d)
cbar = plt.colorbar(P)
cbar.ax.set_ylabel(cblabel)
return P
def add_pcolor_limit(self, vname, t, k, **kw):
"""
2015-11-30 作成
"""
X, Y = self.get_xy('pcolor')
if vname == 'PO4':
cblabel = kw.pop('cblabel', '{}-limitation'.format(vname))
K_PO4 = kw.pop('K_PO4')
var2d = self.nc.variables[vname][t,k-1,:,:] * K_PO4
var2d = 1.0 / (1.0 + var2d)
elif vname in ['NH4', 'NO3']:
cblabel = kw.pop('cblabel', 'DIN-limitation')
K_NH4 = kw.pop('K_NH4')
K_NO3 = kw.pop('K_NO3')
cff1 = self.nc.variables['NH4'][t,k-1,:,:] * K_NH4
cff2 = self.nc.variables['NO3'][t,k-1,:,:] * K_NO3
inhNH4 = 1.0 / (1.0 + cff1)
L_NH4 = cff1 / (1.0 + cff1)
L_NO3 = cff2 * inhNH4 / (1.0 + cff2)
var2d = L_NH4 + L_NO3
else:
raise AssertionError('vname must be NH4, NO3 or PO4.')
ax = plt.gca()
P = ax.pcolor(X, Y, var2d, vmin=0, vmax=1)
cbar = plt.colorbar(P)
cbar.ax.set_ylabel(cblabel)
return P
def add_contour(self, vname, t, k, cff, levels, **kw):
"""
コンタープロットのaxを返す関数
2015-11-08 作成
"""
fmt = kw.pop('fmt', '%i')
extend = kw.pop('extend', 'max')
X, Y = self.get_xy('contour')
var = self.nc.variables[vname]
if var.ndim == 4:
var2d = var[t,k-1,:,:] * cff
elif var.ndim == 3:
var2d = var[t,:,:] * cff
else:
var2d = var[:,:] * cff
ax = plt.gca()
if levels is not None:
C = ax.contour(X, Y, var2d, levels, colors='w', extend=extend)
else:
C = ax.contour(X, Y, var2d, colors='w')
if fmt is not 'off':
C.clabel(fmt=fmt, colors='k') # ,fontsize=9)
return C
def add_contourf(self, vname, t, k, cff, levels, **kw):
"""
コンタープロットのaxを返す関数
2015-11-08 作成
"""
cblabel = kw.pop('cblabel', vname)
extend = kw.pop('extend', 'max')
X, Y = self.get_xy('contour')
var = self.nc.variables[vname]
if var.ndim == 4:
var2d = var[t,k-1,:,:] * cff
elif var.ndim == 3:
var2d = var[t,:,:] * cff
else:
var2d = var[:,:] * cff
var2d = np.ma.array(var2d)
inf = var2d > 10**20
zero = var2d <= 0
var2d[inf] = np.ma.masked
var2d[zero] = np.ma.masked
print var2d
ax = plt.gca()
if levels is not None:
F = ax.contourf(X, Y, var2d, levels, extend=extend)
else:
F = ax.contourf(X, Y, var2d)
CB = plt.colorbar(F)
CB.ax.set_ylabel(cblabel)
return F
def add_contourf_rbf(self, vname, t, k, cff, levels, **kw):
"""
"""
cblabel = kw.pop('cblabel', vname)
extend = kw.pop('extend', 'max')
X, Y = self.get_xy('contour')
var = self.nc.variables[vname]
if var.ndim == 4:
var2d = var[t,k-1,:,:] * cff
elif var.ndim == 3:
var2d = var[t,:,:] * cff
else:
var2d = var[:,:] * cff
mask1 = (0 <= var2d)
mask2 = (var2d < 10**20)
var1d = var2d[mask1 & mask2].flatten()
x1d = X[mask1 & mask2].flatten()
y1d = Y[mask1 & mask2].flatten()
print var1d, x1d, y1d
rbf = Rbf(x1d, y1d, var1d)
var2d = rbf(X, Y)
ax = plt.gca()
if levels is not None:
F = ax.contourf(X, Y, var2d, levels, extend=extend)
else:
F = ax.contourf(X, Y, var2d)
CB = plt.colorbar(F)
CB.ax.set_ylabel(cblabel)
return F
def add_quiver(self, vname, t, k, **kw):
"""
ベクトルの ax を返す関数
2015-11-08 作成
"""
step = kw.pop('step', 3)
scale = kw.pop('scale', 5)
X, Y = self.get_xy('quiver', step)
if 'u_eastward' in self.nc.variables.keys():
u = self.nc.variables['u_eastward'][t,k-1,::step,::step]
v = self.nc.variables['v_northward'][t,k-1,::step,::step]
else:
u = self.nc.variables['u'][t,k-1,::step,::step]
v = self.nc.variables['v'][t,k-1,::step,::step]
ax = plt.gca()
print X.shape, Y.shape, u.shape, v.shape
if 'u_eastward' in self.nc.variables.keys():
Q = ax.quiver(X, Y, u, v, units='width', angles='xy', scale=scale)
else:
Q = ax.quiver(X[:-1,:], Y[:-1,:], u[:-1,:], v, units='width', angles='xy', scale=scale)
plt.quiverkey(Q, 0.9, 0.1, 1.0/scale, '1 m/s')
return Q
|
okadate/romspy
|
romspy/hview/obsolate/his.py
|
Python
|
mit
| 10,872
|
def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
"AudioStreamOGGVorbis",
]
def get_doc_path():
return "doc_classes"
|
Paulloz/godot
|
modules/stb_vorbis/config.py
|
Python
|
mit
| 200
|
from inspect import currentframe
from inspect import getouterframes
from calmjs import runtime
class BadSimpleRuntime(runtime.DriverRuntime, runtime.Runtime):
def entry_point_load_validated(self, entry_point):
# skip the rest of the checks.
return entry_point.load()
def init_argparser(self, argparser):
level = len(getouterframes(currentframe()))
if level > self.recursionlimit:
# turns out we need to emulate this to make pypy not
# blow up coverage reporting; also make it die
# quicker, and this emulation works good enough as
# it turns out.
raise RuntimeError('maximum recursion depth exceeded')
super(BadSimpleRuntime, self).init_argparser(argparser)
class FakeBootstrapRuntime(runtime.BootstrapRuntime):
pass
fake_bootstrap = FakeBootstrapRuntime()
|
calmjs/calmjs
|
src/calmjs/testing/module3/runtime.py
|
Python
|
gpl-2.0
| 878
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Punctuator model."""
import lingvo.compat as tf
from lingvo.core import py_utils
from lingvo.tasks.mt import model as mt_model
class RNMTModel(mt_model.RNMTModel):
"""The MT model with an inference graph for punctuator."""
def Inference(self):
"""Constructs the inference subgraphs.
Returns:
dict: ``{'subgraph_name': (fetches, feeds)}``
"""
subgraphs = dict()
with tf.name_scope('inference'):
subgraphs['default'] = self._InferenceSubgraph_Default()
return subgraphs
def _InferenceSubgraph_Default(self):
with tf.name_scope('inference'):
src_strings = tf.placeholder(tf.string, shape=[None])
_, src_ids, src_paddings = self.input_generator.tokenizer.StringsToIds(
src_strings, self.input_generator.params.source_max_length)
src_input_map = py_utils.NestedMap(ids=src_ids, paddings=src_paddings)
encoder_outputs = self.enc.FPropDefaultTheta(src_input_map)
decoder_outs = self.dec.BeamSearchDecode(encoder_outputs)
topk_hyps = decoder_outs.topk_hyps
topk_ids = decoder_outs.topk_ids
topk_lens = decoder_outs.topk_lens
# topk_lens - 1 to remove the EOS id.
topk_decoded = self.input_generator.tokenizer.IdsToStrings(
topk_ids, topk_lens - 1)
topk_decoded = tf.reshape(topk_decoded, tf.shape(topk_hyps))
feeds = py_utils.NestedMap({'src_strings': src_strings})
fetches = py_utils.NestedMap({
'src_ids': src_ids,
'topk_decoded': topk_decoded,
'topk_scores': decoder_outs.topk_scores,
'topk_hyps': topk_hyps,
})
return fetches, feeds
|
tensorflow/lingvo
|
lingvo/tasks/punctuator/model.py
|
Python
|
apache-2.0
| 2,347
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class ReferenciasSolicitantes(models.Model):
_name = 'propuestas.referencias_solicitantes'
propuesta_id = fields.One2many('propuestas.propuestas', "referencias_sol_id", string="Propuesta")
codigo_referencias = fields.Char(string='Código de las Referencias', required=True)
nombres = fields.Char(string='Nombres del Avalista', required=True)
apellidos = fields.Char(string='Apellidos del Avalista', required=True)
ci = fields.Integer(string='CI del Avalista', required=True)
direccion_habitacion = fields.Char(string='Direccion de Habitacion', required=True)
telefono_fijo = fields.Integer(string='Telefono Fijo', required=True)
telefono_celular = fields.Integer(string='Telefono Celular', required=True)
codigo_solicitante = fields.Char(string='Código del Solicitante', required=True)
|
sani-coop/tinjaca
|
addons/propuestas_old/models/referencias_solicitante.py
|
Python
|
gpl-2.0
| 894
|
""" Tests for commerce views. """
import json
import ddt
import mock
from django.urls import reverse
from nose.plugins.attrib import attr
from course_modes.models import CourseMode
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class UserMixin(object):
""" Mixin for tests involving users. """
def setUp(self):
super(UserMixin, self).setUp()
self.user = UserFactory()
def _login(self):
""" Log into LMS. """
self.client.login(username=self.user.username, password='test')
@attr(shard=1)
@ddt.ddt
class ReceiptViewTests(UserMixin, ModuleStoreTestCase):
""" Tests for the receipt view. """
def setUp(self):
"""
Add a user and a course
"""
super(ReceiptViewTests, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password='test')
self.course = CourseFactory.create(
org='edX',
course='900',
run='test_run'
)
def test_login_required(self):
""" The view should redirect to the login page if the user is not logged in. """
self.client.logout()
response = self.client.post(reverse('commerce:checkout_receipt'))
self.assertEqual(response.status_code, 302)
def post_to_receipt_page(self, post_data):
""" DRY helper """
response = self.client.post(reverse('commerce:checkout_receipt'), params={'basket_id': 1}, data=post_data)
self.assertEqual(response.status_code, 200)
return response
def test_user_verification_status_success(self):
"""
Test user verification status. If the user enrollment for the course belongs to verified modes
e.g. Verified, Professional then verification is required.
"""
# Enroll as verified in the course with the current user.
CourseEnrollment.enroll(self.user, self.course.id, mode=CourseMode.VERIFIED)
response = self.client.get(reverse('commerce:user_verification_status'), data={'course_id': self.course.id})
json_data = json.loads(response.content)
self.assertEqual(json_data['is_verification_required'], True)
# Enroll as honor in the course with the current user.
CourseEnrollment.enroll(self.user, self.course.id, mode=CourseMode.HONOR)
response = self.client.get(reverse('commerce:user_verification_status'), data={'course_id': self.course.id})
json_data = json.loads(response.content)
self.assertEqual(json_data['is_verification_required'], False)
def test_user_verification_status_failure(self):
"""
Test user verification status failure. View should required HttpResponseBadRequest 400 if course id is missing.
"""
response = self.client.get(reverse('commerce:user_verification_status'))
self.assertEqual(response.status_code, 400)
@ddt.data('decision', 'reason_code', 'signed_field_names', None)
def test_is_cybersource(self, post_key):
"""
Ensure the view uses three specific POST keys to detect a request initiated by Cybersource.
"""
self._login()
post_data = {'decision': 'REJECT', 'reason_code': '200', 'signed_field_names': 'dummy'}
if post_key is not None:
# a key will be missing; we will not expect the receipt page to handle a cybersource decision
del post_data[post_key]
expected_pattern = r"<title>(\s+)Receipt"
else:
expected_pattern = r"<title>(\s+)Payment Failed"
response = self.post_to_receipt_page(post_data)
self.assertRegexpMatches(response.content, expected_pattern)
@ddt.data('ACCEPT', 'REJECT', 'ERROR')
def test_cybersource_decision(self, decision):
"""
Ensure the view renders a page appropriately depending on the Cybersource decision.
"""
self._login()
post_data = {'decision': decision, 'reason_code': '200', 'signed_field_names': 'dummy'}
expected_pattern = r"<title>(\s+)Receipt" if decision == 'ACCEPT' else r"<title>(\s+)Payment Failed"
response = self.post_to_receipt_page(post_data)
self.assertRegexpMatches(response.content, expected_pattern)
@ddt.data(True, False)
@mock.patch('lms.djangoapps.commerce.views.is_user_payment_error')
def test_cybersource_message(self, is_user_message_expected, mock_is_user_payment_error):
"""
Ensure that the page displays the right message for the reason_code (it
may be a user error message or a system error message).
"""
mock_is_user_payment_error.return_value = is_user_message_expected
self._login()
response = self.post_to_receipt_page({'decision': 'REJECT', 'reason_code': '99', 'signed_field_names': 'dummy'})
self.assertTrue(mock_is_user_payment_error.called)
self.assertTrue(mock_is_user_payment_error.call_args[0][0], '99')
user_message = "There was a problem with this transaction"
system_message = "A system error occurred while processing your payment"
self.assertRegexpMatches(response.content, user_message if is_user_message_expected else system_message)
self.assertNotRegexpMatches(response.content, user_message if not is_user_message_expected else system_message)
@with_comprehensive_theme("edx.org")
def test_hide_nav_header(self):
self._login()
post_data = {'decision': 'ACCEPT', 'reason_code': '200', 'signed_field_names': 'dummy'}
response = self.post_to_receipt_page(post_data)
# Verify that the header navigation links are hidden for the edx.org version
self.assertNotContains(response, "How it Works")
self.assertNotContains(response, "Find courses")
self.assertNotContains(response, "Schools & Partners")
|
Stanford-Online/edx-platform
|
lms/djangoapps/commerce/tests/test_views.py
|
Python
|
agpl-3.0
| 6,121
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==========================
Comparing two data sources
==========================
The Comparator component tests two incoming streams to see if the items they
contain match (pass an equality test).
Example Usage
-------------
Compares contents of two files and prints "MISMATCH!" whenever one is found::
class DetectFalse(component):
def main(self):
while 1:
yield 1
if self.dataReady("inbox"):
if not self.recv("inbox"):
print ("MISMATCH!")
Graphline( file1 = RateControlledFileReader(filename="file 1", ...),
file2 = RateControlledFileReader(filename="file 2", ...),
compare = Comparator(),
fdetect = DetectFalse(),
output = ConsoleEchoer(),
linkages = {
("file1","outbox") : ("compare","inA"),
("file2","outbox") : ("compare","inB"),
("compare", "outbox") : ("fdetect", "inbox"),
("fdetect", "outbox") : ("output", "inbox"),
},
).run()
How does it work?
-----------------
The component simply waits until there is data ready on both its "inA" and "inB"
inboxes, then takes an item from each and compares them. The result of the
comparison is sent to the "outbox" outbox.
If data is available at neither, or only one, of the two inboxes, then the
component will wait indefinitely until data is available on both.
If a producerFinished or shutdownMicroprocess message is received on the
"control" inbox, then a producerFinished message is sent out of the "signal"
outbox and the component terminates.
The comparison is done by the combine() method. This method returns the result
of a simple equality test of the two arguments.
You could always subclass this component and reimplement the combine() method to
perform different functions (for example, an 'adder').
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
class Comparator(component):
"""\
Comparator() -> new Comparator component.
Compares items received on "inA" inbox with items received on "inB" inbox.
For each pair, outputs True if items compare equal, otherwise False.
"""
Inboxes = { "inbox" : "NOT USED",
"control" : "NOT USED",
"inA" : "Source 'A' of items to compare",
"inB" : "Source 'B' of items to compare",
}
Outboxes = { "outbox" : "Result of comparison",
"signal" : "NOT USED",
}
def combine(self, valA, valB):
"""\
Returns result of (valA == valB)
Reimplement this method to change the type of comparison from equality testing.
"""
return valA == valB
def mainBody(self):
"""Main loop body."""
if self.dataReady("inA") and self.dataReady("inB"):
self.send(self.combine(self.recv("inA"),self.recv("inB")))
if self.dataReady("control"):
mes = self.recv("control")
if isinstance(mes, shutdownMicroprocess) or isinstance(mes,producerFinished):
self.send(producerFinished(), "signal")
return 0
return 1
import Kamaelia.Support.Deprecate as Deprecate
comparator = Deprecate.makeClassStub(
Comparator,
"Use Kamaelia.Util.Comparator:Comparator instead of Kamaelia.Util.Comparator:comparator",
"WARN"
)
__kamaelia_components__ = ( Comparator, )
|
bbc/kamaelia
|
Code/Python/Kamaelia/Kamaelia/Util/Comparator.py
|
Python
|
apache-2.0
| 4,528
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Voropp(MakefilePackage):
"""Voro++ is a open source software library for the computation of the
Voronoi diagram, a widely-used tessellation that has applications in many
scientific fields."""
homepage = "http://math.lbl.gov/voro++/about.html"
url = "http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz"
version('0.4.6', '2338b824c3b7b25590e18e8df5d68af9')
def edit(self, spec, prefix):
filter_file(r'CC=g\+\+',
'CC={0}'.format(self.compiler.cxx),
'config.mk')
filter_file(r'PREFIX=/usr/local',
'PREFIX={0}'.format(self.prefix),
'config.mk')
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/voropp/package.py
|
Python
|
lgpl-2.1
| 1,943
|
from __future__ import print_function
import inspect
from pdb import pm
from miasm.core.sembuilder import SemBuilder
from miasm.core.locationdb import LocationDB
import miasm.expression.expression as m2_expr
# Test classes
class IR(object):
def __init__(self, loc_db):
self.loc_db = loc_db
IRDst = m2_expr.ExprId("IRDst", 32)
def get_next_instr(self, _):
return m2_expr.LocKey(0)
def get_next_loc_key(self, _):
return m2_expr.LocKey(0)
class Instr(object):
mode = 32
# Test
sb = SemBuilder(m2_expr.__dict__)
@sb.parse
def test(Arg1, Arg2, Arg3):
"Test docstring"
Arg1 = Arg2
value1 = Arg2
value2 = Arg3 + i32(4) - ExprMem(Arg1, 32)
Arg3 = Arg3 if Arg2 + value1 else i32(0) + value2
tmpvar = 'myop'(i32(2))
Arg2 = ('myopsize%d' % Arg1.size)(tmpvar, Arg1)
alias = Arg1[:24]
if not Arg1:
Arg2 = Arg3
else:
alias = {i16(4), i8(5)}
a = m2_expr.ExprId('A', 32)
b = m2_expr.ExprId('B', 32)
c = m2_expr.ExprId('C', 32)
loc_db = LocationDB()
ir = IR(loc_db)
instr = Instr()
res = test(ir, instr, a, b, c)
print("[+] Returned:")
print(res)
print("[+] DocString:", test.__doc__)
print("[+] Cur instr:")
for statement in res[0]:
print(statement)
print("[+] Blocks:")
for irb in res[1]:
print(irb.loc_key)
for assignblk in irb:
for expr in assignblk:
print(expr)
print()
|
serpilliere/miasm
|
test/core/sembuilder.py
|
Python
|
gpl-2.0
| 1,414
|
import sublime
import sublime_plugin
import re
from Expression import expression
class Base(sublime_plugin.EventListener):
def _check_value(self, value, operator, operand):
try:
if operator == sublime.OP_EQUAL:
return value == operand
elif operator == sublime.OP_NOT_EQUAL:
return value != operand
elif operator == sublime.OP_REGEX_MATCH:
return value != None and re.match(operand, value) != None
elif operator == sublime.OP_NOT_REGEX_MATCH:
return value == None or re.match(operand, value) == None
elif operator == sublime.OP_REGEX_CONTAINS:
return value != None and re.search(operand, value) != None
elif operator == sublime.OP_NOT_REGEX_CONTAINS:
return value == None or re.search(operand, value) == None
else:
raise Exception('Unsupported operator: ' + str(operator))
except Exception as error:
print('Failed to check context', operand, value, error)
raise error
def _check_sel(self, name, callback, view, key, operator, operand, match_all):
if key != name:
return None
result = True
for sel in view.sel():
value = callback(view, sel)
result = self._check_value(value, operator, operand)
if not match_all:
return result
if not result:
return False
return True
def _check(self, name, callback, view, key, operator, operand, match_all):
if key != name:
return None
result = True
value = callback(view)
return self._check_value(value, operator, operand)
|
shagabutdinov/sublime-context
|
base.py
|
Python
|
mit
| 1,570
|
#!/usr/bin/python
#
# Copyright (C) 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.hypervisor.hv_lxc"""
import unittest
from ganeti import constants
from ganeti import objects
from ganeti import hypervisor
from ganeti.hypervisor import hv_lxc
import testutils
class TestConsole(unittest.TestCase):
def test(self):
instance = objects.Instance(name="lxc.example.com",
primary_node="node199-uuid")
node = objects.Node(name="node199", uuid="node199-uuid")
cons = hv_lxc.LXCHypervisor.GetInstanceConsole(instance, node, {}, {})
self.assertTrue(cons.Validate())
self.assertEqual(cons.kind, constants.CONS_SSH)
self.assertEqual(cons.host, node.name)
self.assertEqual(cons.command[-1], instance.name)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
apyrgio/snf-ganeti
|
test/py/ganeti.hypervisor.hv_lxc_unittest.py
|
Python
|
bsd-2-clause
| 2,126
|
"""empty message
Revision ID: 56f7e11642a6
Revises: None
Create Date: 2015-12-27 14:22:10.457236
"""
# revision identifiers, used by Alembic.
revision = '56f7e11642a6'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('addresses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('street', sa.String(), nullable=True),
sa.Column('city', sa.String(), nullable=True),
sa.Column('state', sa.String(length=2), nullable=True),
sa.Column('zipcode', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tags',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=30), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_tags_name'), 'tags', ['name'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('social_id', sa.String(length=64), nullable=False),
sa.Column('nickname', sa.String(length=64), nullable=False),
sa.Column('email', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('social_id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_table('cats',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('birthdate', sa.DateTime(), nullable=True),
sa.Column('variety', sa.String(), nullable=True),
sa.Column('female', sa.Boolean(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('last_updated', sa.DateTime(), nullable=True),
sa.Column('image', sa.String(), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.Column('address_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['address_id'], ['addresses.id'], ),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('availability',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('start', sa.DateTime(), nullable=True),
sa.Column('end', sa.DateTime(), nullable=True),
sa.Column('cat_id', sa.Integer(), nullable=True),
sa.Column('create_date', sa.DateTime(), nullable=True),
sa.Column('last_updated', sa.DateTime(), nullable=True),
sa.Column('reservation_taken', sa.DateTime(), nullable=True),
sa.Column('host_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['cat_id'], ['cats.id'], ),
sa.ForeignKeyConstraint(['host_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_availability_host_id'), 'availability', ['host_id'], unique=False)
op.create_table('xref_cat_tag',
sa.Column('tag_id', sa.Integer(), nullable=False),
sa.Column('cat_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['cat_id'], ['cats.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], ),
sa.PrimaryKeyConstraint('tag_id', 'cat_id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('title', sa.String(length=64), nullable=True),
sa.Column('body', sa.String(), nullable=True),
sa.Column('start', sa.DateTime(), nullable=True),
sa.Column('end', sa.DateTime(), nullable=True),
sa.Column('display', sa.Boolean(), nullable=True),
sa.Column('reservation_id', sa.Integer(), nullable=True),
sa.Column('smokingEnviron', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['reservation_id'], ['availability.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('posts')
op.drop_table('xref_cat_tag')
op.drop_index(op.f('ix_availability_host_id'), table_name='availability')
op.drop_table('availability')
op.drop_table('cats')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_tags_name'), table_name='tags')
op.drop_table('tags')
op.drop_table('addresses')
### end Alembic commands ###
|
jasonseminara/Cattr
|
migrations/versions/56f7e11642a6_.py
|
Python
|
mit
| 4,408
|
# Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2011 Brett Smith <tanktarta@blueyonder.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Manages the UI for editing a single macro.
"""
import gnome15.g15locale as g15locale
_ = g15locale.get_translation("gnome15").ugettext
import g15globals
import g15profile
import util.g15scheduler as g15scheduler
import util.g15gconf as g15gconf
import util.g15icontools as g15icontools
import g15uinput
import g15devices
import g15driver
import g15keyio
import g15actions
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import os
import pango
import gconf
import logging
logger = logging.getLogger(__name__)
# Key validation constants
IN_USE = "in-use"
RESERVED_FOR_ACTION = "reserved"
NO_KEYS = "no-keys"
OK = "ok"
class G15MacroEditor():
def __init__(self, parent=None):
"""
Constructor. Create a new macro editor. You must call set_driver()
and set_macro() after constructions to populate the macro key buttons
and the other fields.
"""
self.__gconf_client = gconf.client_get_default()
self.__widget_tree = gtk.Builder()
self.__widget_tree.set_translation_domain("g15-macroeditor")
self.__widget_tree.add_from_file(os.path.join(g15globals.ui_dir, "macro-editor.ui"))
self.__window = self.__widget_tree.get_object("EditMacroDialog")
if self.__window is not None and parent is not None:
self.__window.set_transient_for(parent)
self.adjusting = False
self.editing_macro = None
self.selected_profile = None
self.memory_number = 1
self.close_button = None
# Private
self.__text_buffer = None
self.__rows = None
self.__driver = None
self.__key_buttons = None
self.__load_objects()
self.__load_actions()
self.__create_macro_info_bar()
self.__macro_save_timer = None
# Connect signal handlers
self.__widget_tree.connect_signals(self)
def run(self):
self.__window.run()
self.__window.hide()
def set_driver(self, driver):
"""
Set the driver to use for this macro. This allows the full set of
available keys (and other capabilities) to determined.
Keyword arguments:
driver -- driver
"""
self.__driver = driver
def set_macro(self, macro):
"""
Set the macro to edit. Note, set_driver must have been called first
so it knows which macro keys are available for use for the model
in question.
Keyword arguments:
macro -- macro to edit
"""
if self.__driver is None:
raise Exception("No driver set. Cannot set macro")
self.adjusting = True
try:
self.editing_macro = macro
self.selected_profile = macro.profile
self.memory_number = macro.memory
self.__widget_tree.get_object("KeyBox").set_sensitive(not self.selected_profile.read_only)
keys_frame = self.__widget_tree.get_object("KeysFrame")
self.__allow_combination.set_active(len(self.editing_macro.keys) > 1)
# Build the G-Key selection widget
if self.__rows:
keys_frame.remove(self.__rows)
self.__rows = gtk.VBox()
self.__rows.set_spacing(4)
self.__key_buttons = []
for row in self.__driver.get_key_layout():
hbox = gtk.HBox()
hbox.set_spacing(4)
for key in row:
key_name = g15driver.get_key_names([ key ])
g_button = gtk.ToggleButton(" ".join(key_name))
g_button.key = key
key_active = key in self.editing_macro.keys
g_button.set_active(key_active)
self.__set_button_style(g_button)
g_button.connect("toggled", self._toggle_key, key, self.editing_macro)
self.__key_buttons.append(g_button)
hbox.pack_start(g_button, True, True)
self.__rows.pack_start(hbox, False, False)
keys_frame.add(self.__rows)
keys_frame.show_all()
# Set the activation mode
for index, (activate_on_id, activate_on_name) in enumerate(self.__activate_on_combo.get_model()):
if activate_on_id == self.editing_macro.activate_on:
self.__activate_on_combo.set_active(index)
# Set the repeat mode
for index, (repeat_mode_id, repeat_mode_name) in enumerate(self.__repeat_mode_combo.get_model()):
if repeat_mode_id == self.editing_macro.repeat_mode:
self.__repeat_mode_combo.set_active(index)
# Set the type of macro
for index, (macro_type, macro_type_name) in enumerate(self.__map_type_model):
if macro_type == self.editing_macro.type:
self.__mapped_key_type_combo.set_active(index)
self.__set_available_options()
# Set the other details
for index, row in enumerate(self.__map_type_model):
if row[0] == self.editing_macro.type:
self.__mapped_key_type_combo.set_active(index)
break
self.__load_keys()
if self.editing_macro.type in [ g15profile.MACRO_MOUSE, g15profile.MACRO_JOYSTICK, g15profile.MACRO_DIGITAL_JOYSTICK, g15profile.MACRO_KEYBOARD ]:
for index, row in enumerate(self.__mapped_key_model):
if self.__mapped_key_model[index][0] == self.editing_macro.macro:
self.__select_tree_row(self.__uinput_tree, index)
break
elif self.editing_macro.type == g15profile.MACRO_ACTION:
for index, row in enumerate(self.__action_model):
if self.__action_model[index][0] == self.editing_macro.macro:
self.__select_tree_row(self.__action_tree, index)
break
self.__text_buffer = gtk.TextBuffer()
self.__text_buffer.connect("changed", self._macro_script_changed)
self.__macro_script.set_buffer(self.__text_buffer)
self.__turbo_rate.get_adjustment().set_value(self.editing_macro.repeat_delay)
self.__memory_bank_label.set_text("M%d" % self.memory_number)
self.__macro_name_field.set_text(self.editing_macro.name)
self.__override_default_repeat.set_active(self.editing_macro.repeat_delay != -1)
if self.editing_macro.type == g15profile.MACRO_SIMPLE:
self.__simple_macro.set_text(self.editing_macro.macro)
else:
self.__simple_macro.set_text("")
if self.editing_macro.type == g15profile.MACRO_COMMAND:
cmd = self.editing_macro.macro
background = False
if cmd.endswith("&"):
cmd = cmd[:-1]
background = True
elif cmd == "":
background = True
self.__command.set_text(cmd)
self.__run_in_background.set_active(background)
else:
self.__run_in_background.set_active(False)
self.__command.set_text("")
if self.editing_macro.type == g15profile.MACRO_SCRIPT:
self.__text_buffer.set_text(self.editing_macro.macro)
else:
self.__text_buffer.set_text("")
self.__check_macro(self.editing_macro.keys)
self.__macro_name_field.grab_focus()
finally:
self.adjusting = False
self.editing_macro.name = self.__macro_name_field.get_text()
self.__set_available_options()
"""
Event handlers
"""
def _override_default_repeat_changed(self, widget):
if not self.adjusting:
sel = widget.get_active()
if sel:
self.editing_macro.repeat_delay = 0.1
self.__turbo_rate.get_adjustment().set_value(0.1)
self.__save_macro(self.editing_macro)
self.__set_available_options()
else:
self.editing_macro.repeat_delay = -1.0
self.__set_available_options()
self.__save_macro(self.editing_macro)
def _macro_script_changed(self, text_buffer):
self.editing_macro.macro = text_buffer.get_text(text_buffer.get_start_iter(), text_buffer.get_end_iter())
self.__save_macro(self.editing_macro)
def _show_script_editor(self, widget):
editor = G15MacroScriptEditor(self.__gconf_client, self.__driver, self.editing_macro, self.__window)
if editor.run():
self.__text_buffer.set_text(self.editing_macro.macro)
self.__save_macro(self.editing_macro)
def _turbo_changed(self, widget):
if not self.adjusting:
self.editing_macro.repeat_delay = widget.get_value()
self.__save_macro(self.editing_macro)
def _repeat_mode_selected(self, widget):
if not self.adjusting:
self.editing_macro.repeat_mode = widget.get_model()[widget.get_active()][0]
self.__save_macro(self.editing_macro)
self.__set_available_options()
def _mapped_key_type_changed(self, widget):
if not self.adjusting:
key = self.__map_type_model[widget.get_active()][0]
self.editing_macro.type = key
self.editing_macro.macro = ""
self.adjusting = True
try:
self.__load_keys()
finally:
self.adjusting = False
self.__select_tree_row(self.__uinput_tree, 0)
self.set_macro(self.editing_macro)
self.__set_available_options()
def _clear_filter(self, widget):
self.__filter.set_text("")
def _filter_changed(self, widget):
try:
self.adjusting = True
self.__load_keys()
finally:
self.adjusting = False
self._key_selected(None)
def _simple_macro_changed(self, widget):
self.editing_macro.macro = widget.get_text()
self.__save_macro(self.editing_macro)
def _command_changed(self, widget):
self.__save_command()
def _browse_for_command(self, widget):
dialog = gtk.FileChooserDialog(_("Open.."),
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
file_filter = gtk.FileFilter()
file_filter.set_name(_("All files"))
file_filter.add_pattern("*")
dialog.add_filter(file_filter)
response = dialog.run()
while gtk.events_pending():
gtk.main_iteration(False)
if response == gtk.RESPONSE_OK:
self.__command.set_text(dialog.get_filename())
dialog.destroy()
return False
def _run_in_background_changed(self, widget):
if not self.adjusting:
self.__save_command()
def _allow_combination_changed(self, widget):
if not self.adjusting and not self.__allow_combination.get_active():
for button in self.__key_buttons:
if len(self.editing_macro.keys) > 1:
button.set_active(False)
self.__check_macro(self.editing_macro.keys)
def _macro_name_changed(self, widget):
self.editing_macro.name = widget.get_text()
self.__save_macro(self.editing_macro)
def _toggle_key(self, widget, key, macro):
"""
Event handler invoked when one of the macro key buttons is pressed.
"""
keys = list(macro.keys)
if key in keys:
keys.remove(key)
else:
if not self.adjusting and not self.__allow_combination.get_active():
for button in self.__key_buttons:
if button != widget:
self.adjusting = True
try :
button.set_active(False)
finally:
self.adjusting = False
for ikey in keys:
if ikey != key:
keys.remove(ikey)
keys.append(key)
if not self.selected_profile.are_keys_in_use(self.editing_macro.activate_on,
self.memory_number, keys,
exclude=[self.editing_macro]):
if self.__macro_name_field.get_text() == "" or self.__macro_name_field.get_text().startswith("Macro "):
new_name = " ".join(g15driver.get_key_names(keys))
self.editing_macro.name = _("Macro %s") % new_name
self.__macro_name_field.set_text(self.editing_macro.name)
macro.set_keys(keys)
self.__set_button_style(widget)
if not self.adjusting:
self.__check_macro(keys)
self.__save_macro(self.editing_macro)
def _key_selected(self, widget):
if not self.adjusting:
(model, path) = self.__uinput_tree.get_selection().get_selected()
if path is not None:
key = model[path][0]
self.editing_macro.macro = key
self.__save_macro(self.editing_macro)
def _action_selected(self, widget):
if not self.adjusting:
(model, path) = self.__action_tree.get_selection().get_selected()
if path:
key = model[path][0]
self.editing_macro.macro = key
self.__save_macro(self.editing_macro)
def _activate_on_changed(self, widget):
if not self.adjusting:
self.editing_macro.set_activate_on(widget.get_model()[widget.get_active()][0])
self.__save_macro(self.editing_macro)
if self.editing_macro.activate_on == g15driver.KEY_STATE_HELD:
self.__repeat_mode_combo.set_active(0)
self.__set_available_options()
self.__check_macro(list(self.editing_macro.keys))
"""
Private
"""
def __save_command(self):
macrotext = self.__command.get_text()
if self.__run_in_background.get_active():
macrotext += "&"
self.editing_macro.macro = macrotext
self.__save_macro(self.editing_macro)
def __select_tree_row(self, tree, row):
tree_iter = tree.get_model().iter_nth_child(None, row)
if tree_iter:
tree_path = tree.get_model().get_path(tree_iter)
tree.get_selection().select_path(tree_path)
tree.scroll_to_cell(tree_path)
def __save_macro(self, macro):
"""
Schedule saving of the macro in 2 seconds. This may be called again
before the 2 seconds are up, in which case the timer will reset.
Keyword arguments:
macro -- macro to save
"""
if not self.adjusting:
if self.__macro_save_timer is not None:
self.__macro_save_timer.cancel()
self.__macro_save_timer = g15scheduler.schedule("SaveMacro", 2, self.__do_save_macro, macro)
def __do_save_macro(self, macro):
"""
Actually save the macro. This should not be called directly
Keyword arguments:
macro -- macro to save
"""
if self.__validate_macro(macro.keys) in [ OK, RESERVED_FOR_ACTION ] :
logger.info("Saving macro %s", macro.name)
macro.save()
def __load_actions(self):
self.__action_model.clear()
for action in g15actions.actions:
self.__action_model.append([action, action])
def __load_objects(self):
"""
Load references to the various components contain in the Glade file
"""
self.__macro_script = self.__widget_tree.get_object("MacroScript")
self.__map_type_model = self.__widget_tree.get_object("MapTypeModel")
self.__mapped_key_model = self.__widget_tree.get_object("MappedKeyModel")
self.__mapped_key_type_combo = self.__widget_tree.get_object("MappedKeyTypeCombo")
self.__map_type_model = self.__widget_tree.get_object("MapTypeModel")
self.__simple_macro = self.__widget_tree.get_object("SimpleMacro")
self.__command = self.__widget_tree.get_object("Command")
self.__run_in_background = self.__widget_tree.get_object("RunInBackground")
self.__browse_for_command = self.__widget_tree.get_object("BrowseForCommand")
self.__allow_combination = self.__widget_tree.get_object("AllowCombination")
self.__macro_name_field = self.__widget_tree.get_object("MacroNameField")
self.__macro_warning_box = self.__widget_tree.get_object("MacroWarningBox")
self.__memory_bank_label = self.__widget_tree.get_object("MemoryBankLabel")
self.__uinput_box = self.__widget_tree.get_object("UinputBox")
self.__command_box = self.__widget_tree.get_object("CommandBox")
self.__script_box = self.__widget_tree.get_object("ScriptBox")
self.__simple_box = self.__widget_tree.get_object("SimpleBox")
self.__action_box = self.__widget_tree.get_object("ActionBox")
self.__uinput_tree = self.__widget_tree.get_object("UinputTree")
self.__action_tree = self.__widget_tree.get_object("ActionTree")
self.__action_model = self.__widget_tree.get_object("ActionModel")
self.__repeat_mode_combo = self.__widget_tree.get_object("RepeatModeCombo")
self.__repetition_frame = self.__widget_tree.get_object("RepetitionFrame")
self.__turbo_rate = self.__widget_tree.get_object("TurboRate")
self.__turbo_box = self.__widget_tree.get_object("TurboBox")
self.__filter = self.__widget_tree.get_object("Filter")
self.__override_default_repeat = self.__widget_tree.get_object("OverrideDefaultRepeat")
self.__activate_on_combo = self.__widget_tree.get_object("ActivateOnCombo")
self.__show_script_editor = self.__widget_tree.get_object("ShowScriptEditor")
def __load_keys(self):
"""
Load the available keys for the selected macro type
"""
sel_type = self.__get_selected_type()
filter_text = self.__filter.get_text().strip().lower()
if g15profile.is_uinput_type(sel_type):
(model, path) = self.__uinput_tree.get_selection().get_selected()
sel = None
if path:
sel = model[path][0]
model.clear()
found = False
for n, v in g15uinput.get_buttons(sel_type):
if len(filter_text) == 0 or filter_text in n.lower():
model.append([n, v])
if n == sel:
self.__select_tree_row(self.__uinput_tree, len(model))
found = True
(model, path) = self.__uinput_tree.get_selection().get_selected()
if not found and len(model) > 0:
self.__select_tree_row(self.__uinput_tree, 0)
def __get_selected_type(self):
"""
Get the selected macro type
"""
return self.__map_type_model[self.__mapped_key_type_combo.get_active()][0]
def __set_available_options(self):
"""
Set the sensitive state of various components based on the current
selection of other components.
"""
sel_type = self.__get_selected_type();
uinput_type = g15profile.is_uinput_type(sel_type)
opposite_state = g15driver.KEY_STATE_UP if \
self.editing_macro.activate_on == \
g15driver.KEY_STATE_HELD else \
g15driver.KEY_STATE_HELD
key_conflict = self.selected_profile.get_macro(opposite_state, \
self.editing_macro.memory,
self.editing_macro.keys) is not None
self.__uinput_tree.set_sensitive(uinput_type)
self.__run_in_background.set_sensitive(sel_type == g15profile.MACRO_COMMAND)
self.__command.set_sensitive(sel_type == g15profile.MACRO_COMMAND)
self.__browse_for_command.set_sensitive(sel_type == g15profile.MACRO_COMMAND)
self.__simple_macro.set_sensitive(sel_type == g15profile.MACRO_SIMPLE)
self.__macro_script.set_sensitive(sel_type == g15profile.MACRO_SCRIPT)
self.__action_tree.set_sensitive(sel_type == g15profile.MACRO_ACTION)
self.__activate_on_combo.set_sensitive(not uinput_type and not key_conflict)
self.__repeat_mode_combo.set_sensitive(self.__activate_on_combo.get_active() != 2)
self.__override_default_repeat.set_sensitive(self.editing_macro.repeat_mode != g15profile.NO_REPEAT)
self.__turbo_box.set_sensitive(self.editing_macro.repeat_mode != g15profile.NO_REPEAT and self.__override_default_repeat.get_active())
self.__simple_box.set_visible(sel_type == g15profile.MACRO_SIMPLE)
self.__command_box.set_visible(sel_type == g15profile.MACRO_COMMAND)
self.__action_box.set_visible(sel_type == g15profile.MACRO_ACTION)
self.__script_box.set_visible(sel_type == g15profile.MACRO_SCRIPT)
self.__show_script_editor.set_visible(sel_type == g15profile.MACRO_SCRIPT)
self.__uinput_box.set_visible(uinput_type)
def __validate_macro(self, keys):
"""
Validate the list of keys, checking if they are in use, reserved
for an action, and that some have actually been supplier
Keyword arguments:
keys -- list of keys to validate
"""
if len(keys) > 0:
reserved = g15devices.are_keys_reserved(self.__driver.get_model_name(), keys)
in_use = self.selected_profile.are_keys_in_use(self.editing_macro.activate_on,
self.memory_number,
keys,
exclude=[self.editing_macro])
if in_use:
return IN_USE
elif reserved:
return RESERVED_FOR_ACTION
else:
return OK
else:
return NO_KEYS
def __check_macro(self, keys):
"""
Check with the keys provided are valid for the current state, e.g.
check if another macro or action is using them. Note, this still
allows the change to happen, it will just show a warning and prevent
the window from being closed if
"""
val = self.__validate_macro(keys)
if val == IN_USE:
self.__macro_infobar.set_message_type(gtk.MESSAGE_ERROR)
self.__macro_warning_label.set_text(_("This key combination is already in use with " + \
"another macro. Please choose a different key or combination of keys"))
self.__macro_infobar.set_visible(True)
self.__macro_infobar.show_all()
if self.close_button is not None:
self.close_button.set_sensitive(False)
elif val == RESERVED_FOR_ACTION:
self.__macro_infobar.set_message_type(gtk.MESSAGE_WARNING)
self.__macro_warning_label.set_text(_("This key combination is reserved for use with an action. You " + \
"may use it, but the results are undefined."))
self.__macro_infobar.set_visible(True)
self.__macro_infobar.show_all()
if self.close_button is not None:
self.close_button.set_sensitive(True)
elif val == NO_KEYS:
self.__macro_infobar.set_message_type(gtk.MESSAGE_WARNING)
self.__macro_warning_label.set_text(_("You have not chosen a macro key to assign the action to."))
self.__macro_infobar.set_visible(True)
self.__macro_infobar.show_all()
if self.close_button is not None:
self.close_button.set_sensitive(False)
else:
self.__macro_infobar.set_visible(False)
if self.close_button is not None:
self.close_button.set_sensitive(True)
def __create_macro_info_bar(self):
"""
Creates a component for display information about the current
macro, such as conflicts. The component is added to a placeholder in
the Glade file
"""
self.__macro_infobar = gtk.InfoBar()
self.__macro_infobar.set_size_request(-1, -1)
self.__macro_warning_label = gtk.Label()
self.__macro_warning_label.set_line_wrap(True)
self.__macro_warning_label.set_width_chars(60)
content = self.__macro_infobar.get_content_area()
content.pack_start(self.__macro_warning_label, True, True)
self.__macro_warning_box.pack_start(self.__macro_infobar, True, True)
self.__macro_infobar.set_visible(False)
def __set_button_style(self, button):
"""
Alter the button style based on whether it is active or not
Keyword arguments:
button -- button widget
"""
font = pango.FontDescription("Sans 10")
if button.get_use_stock():
label = button.child.get_children()[1]
elif isinstance(button.child, gtk.Label):
label = button.child
else:
raise ValueError("button does not have a label")
if button.get_active():
font.set_weight(pango.WEIGHT_HEAVY)
else:
font.set_weight(pango.WEIGHT_MEDIUM)
label.modify_font(font)
OP_ICONS = { 'delay' : 'gtk-media-pause',
'press' : 'gtk-go-down',
'upress' : 'gtk-go-down',
'release' : 'gtk-go-up',
'urelease' : 'gtk-go-up',
'execute' : 'gtk-execute',
'label' : 'gtk-underline',
'wait' : 'gtk-stop',
'goto' : [ 'stock_media-prev','media-skip-backward','gtk-media-previous' ] }
class G15MacroScriptEditor():
def __init__(self, gconf_client, driver, editing_macro, parent = None):
self.__gconf_client = gconf_client
self.__driver = driver
self.__clipboard = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD)
self.__recorder = g15keyio.G15KeyRecorder(self.__driver)
self.__recorder.on_stop = self._on_stop_record
self.__recorder.on_add = self._on_record_add
self.__widget_tree = gtk.Builder()
self.__widget_tree.set_translation_domain("g15-macroeditor")
self.__widget_tree.add_from_file(os.path.join(g15globals.ui_dir, "script-editor.ui"))
self._load_objects()
if parent is not None:
self.__window.set_transient_for(parent)
self._load_key_presses()
self._configure_widgets()
self._add_info_box()
self.set_macro(editing_macro)
self._set_available()
# Connect signal handlers
self.__widget_tree.connect_signals(self)
# Configure defaults
self.__output_delays.set_active(g15gconf.get_bool_or_default(self.__gconf_client, "/apps/gnome15/script_editor/record_delays", True))
self.__emit_uinput.set_active(g15gconf.get_bool_or_default(self.__gconf_client, "/apps/gnome15/script_editor/emit_uinput", False))
self.__recorder.output_delays = self.__output_delays.get_active()
self.__recorder.emit_uinput = self.__emit_uinput.get_active()
def set_macro(self, macro):
self.__editing_macro = macro
self.__macros = self.__editing_macro.macro.split("\n")
self.__recorder.clear()
self._rebuild_model()
self._set_available()
def _rebuild_model(self):
self.__script_model.clear()
for macro_text in self.__macros:
split = macro_text.split(" ")
op = split[0].lower()
if len(split) > 1:
val = " ".join(split[1:])
if op in OP_ICONS:
icon = OP_ICONS[op]
icon_path = g15icontools.get_icon_path(icon, 24)
self.__script_model.append([gtk.gdk.pixbuf_new_from_file(icon_path), val, op, True])
self._validate_script()
def _validate_script(self):
msg = self._do_validate_script()
if msg:
self._show_message(gtk.MESSAGE_ERROR, msg)
self.__save_button.set_sensitive(False)
else:
self.__infobar.hide_all()
self.__save_button.set_sensitive(True)
def _do_validate_script(self):
labels = []
for _,val,op,_ in self.__script_model:
if op == "label":
if val in labels:
return "Label <b>%s</b> is defined more than once" % val
labels.append(val)
pressed = {}
for _,val,op,_ in self.__script_model:
if op == "press" or op == "upress":
if val in pressed:
return "More than one key press of <b>%s</b> before a release" % val
pressed[val] = True
elif op == "release" or op == "urelease":
if not val in pressed:
return "Release of <b>%s</b> before it was pressed" % val
del pressed[val]
elif op == "goto":
if not val in labels:
return "Goto <b>%s</b> uses a label that doesn't exist" % val
if len(pressed) > 0:
return "The script leaves <b>%s</b> pressed on completion" % ",".join(pressed.keys())
return None
def run(self):
response = self.__window.run()
self.__window.hide()
if response == gtk.RESPONSE_OK:
buf = ""
for p in self.__macros:
if not buf == "":
buf += "\n"
buf += p
self.__editing_macro.macro = buf
return True
def _add_info_box(self):
self.__infobar = gtk.InfoBar()
self.__infobar.set_size_request(-1, 32)
self.__warning_label = gtk.Label()
self.__warning_label.set_size_request(400, -1)
self.__warning_label.set_line_wrap(True)
self.__warning_label.set_alignment(0.0, 0.0)
self.__warning_image = gtk.Image()
content = self.__infobar.get_content_area()
content.pack_start(self.__warning_image, False, False)
content.pack_start(self.__warning_label, True, True)
self.__info_box_area.pack_start(self.__infobar, False, False)
self.__infobar.hide_all()
def _show_message(self, message_type, text):
print "Showing message",text
self.__infobar.set_message_type(message_type)
self.__warning_label.set_text(text)
self.__warning_label.set_use_markup(True)
if type == gtk.MESSAGE_WARNING:
self.__warning_image.set_from_stock(gtk.STOCK_DIALOG_WARNING, gtk.ICON_SIZE_DIALOG)
# self.main_window.check_resize()
self.__infobar.show_all()
def _load_objects(self):
self.__window = self.__widget_tree.get_object("EditScriptDialog")
self.__script_model = self.__widget_tree.get_object("ScriptModel")
self.__script_tree = self.__widget_tree.get_object("ScriptTree")
self.__set_value_dialog = self.__widget_tree.get_object("SetValueDialog")
self.__set_value = self.__widget_tree.get_object("SetValue")
self.__edit_selected_values = self.__widget_tree.get_object("EditSelectedValues")
self.__delay_adjustment = self.__widget_tree.get_object("DelayAdjustment")
self.__command = self.__widget_tree.get_object("Command")
self.__label = self.__widget_tree.get_object("Label")
self.__goto_label_model = self.__widget_tree.get_object("GotoLabelModel")
self.__goto_label = self.__widget_tree.get_object("GotoLabel")
self.__key_press_model = self.__widget_tree.get_object("KeyPressModel")
self.__record_key = self.__widget_tree.get_object("RecordKey")
self.__emit_uinput = self.__widget_tree.get_object("EmitUInput")
self.__output_delays = self.__widget_tree.get_object("OutputDelays")
self.__record_button = self.__widget_tree.get_object("RecordButton")
self.__stop_button = self.__widget_tree.get_object("StopButton")
self.__record_status = self.__widget_tree.get_object("RecordStatus")
self.__scrip_editor_popup = self.__widget_tree.get_object("ScriptEditorPopup")
self.__info_box_area = self.__widget_tree.get_object("InfoBoxArea")
self.__save_button = self.__widget_tree.get_object("SaveButton")
self.__wait_combo = self.__widget_tree.get_object("WaitCombo")
self.__wait_model = self.__widget_tree.get_object("WaitModel")
def _load_key_presses(self):
self.__key_press_model.clear()
if self.__emit_uinput.get_active():
for n, v in g15uinput.get_buttons(g15uinput.KEYBOARD):
self.__key_press_model.append([n])
else:
for n in g15keyio.get_keysyms():
self.__key_press_model.append([n])
def _configure_widgets(self):
self.__script_tree.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
tree_selection = self.__script_tree.get_selection()
tree_selection.connect("changed", self._on_selection_changed)
def _on_tree_button_press(self, treeview, event):
if event.button == 3:
x = int(event.x)
y = int(event.y)
time = event.time
tree_selection = self.__script_tree.get_selection()
if tree_selection.count_selected_rows() < 2:
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, _, _ = pthinfo
treeview.grab_focus()
treeview.set_cursor( path, col, 0)
self.__scrip_editor_popup.popup( None, None, None, event.button, time)
return True
def _on_cut(self, widget):
self._on_copy(widget)
tree_selection = self.__script_tree.get_selection()
_, selected_paths = tree_selection.get_selected_rows()
for p in reversed(selected_paths):
del self.__macros[p[0]]
self._rebuild_model()
def _on_copy(self, widget):
tree_selection = self.__script_tree.get_selection()
model, selected_paths = tree_selection.get_selected_rows()
buf = ""
for p in selected_paths:
if not buf == "":
buf += "\n"
buf += self._format_row(model[p])
self.__clipboard.set_text(buf)
def _on_paste(self, widget):
self.__clipboard.request_text(self._clipboard_text_received)
def _clipboard_text_received(self, clipboard, text, data):
i = self._get_insert_index()
if text:
for macro_text in text.split("\n"):
split = macro_text.split(" ")
op = split[0].lower()
if len(split) > 1:
val = split[1]
if op in OP_ICONS:
self.__macros.insert(i, macro_text)
i += 1
self._rebuild_model()
def _on_record_add(self, pr, key):
gobject.idle_add(self._set_available)
def _on_selection_changed(self, widget):
self.__edit_selected_values.set_sensitive(self._unique_selected_types() == 1)
def _unique_selected_types(self):
tree_selection = self.__script_tree.get_selection()
model, selected_paths = tree_selection.get_selected_rows()
t = {}
for p in selected_paths:
op = model[p][2]
t[op] = ( t[op] if op in t else 0 ) + 1
return len(t)
def _on_emit_uinput_toggled(self, widget):
self.__recorder.emit_uinput = widget.get_active()
self.__gconf_client.set_bool("/apps/gnome15/script_editor/emit_uinput", widget.get_active())
def _on_deselect_all(self, widget):
self.__script_tree.get_selection().unselect_all()
def _on_edit_selected_values_activate(self, widget):
self.__set_value_dialog.set_transient_for(self.__window)
response = self.__set_value_dialog.run()
self.__set_value_dialog.hide()
if response == gtk.RESPONSE_OK:
tree_selection = self.__script_tree.get_selection()
model, selected_paths = tree_selection.get_selected_rows()
for p in selected_paths:
self.__macros[p[0]] = self._format_row(model[p], self.__set_value.get_text())
self._rebuild_model()
def _format_row(self, row, value = None):
return "%s %s" % (self._format_op(row[2]),value if value is not None else row[1])
def _format_op(self, op):
return op[:1].upper() + op[1:]
def _on_browse_command(self, widget):
dialog = gtk.FileChooserDialog("Choose Command..",
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_transient_for(self.__window)
dialog.set_filename(self.__command.get_text())
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
self.__command.set_text(dialog.get_filename())
def _on_new_goto(self, widget):
self.__goto_label_model.clear()
for _,val,op,_ in self.__script_model:
if op == "label":
self.__goto_label_model.append([val])
if not self.__goto_label.get_active() >= 0 and len(self.__goto_label_model) > 0:
self.__goto_label.set_active(0)
dialog = self.__widget_tree.get_object("AddGotoDialog")
dialog.set_transient_for(self.__window)
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
self._insert_macro("%s %s" % ( self._format_op("goto"), self.__goto_label_model[self.__goto_label.get_active()][0]))
def _on_new_label(self, widget):
dialog = self.__widget_tree.get_object("AddLabelDialog")
dialog.set_transient_for(self.__window)
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
self._insert_macro("%s %s" % ( self._format_op("label"), self.__label.get_text()))
def _on_new_execute(self, widget):
dialog = self.__widget_tree.get_object("AddExecuteDialog")
dialog.set_transient_for(self.__window)
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
self._insert_macro("%s %s" % ( self._format_op("execute"), self.__command.get_text()))
def _on_new_wait(self, widget):
dialog = self.__widget_tree.get_object("AddWaitDialog")
dialog.set_transient_for(self.__window)
if not self.__wait_combo.get_active() >= 0 and len(self.__wait_model) > 0:
self.__wait_combo.set_active(0)
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
self._insert_macro("%s %s" % ( self._format_op("wait"), self.__wait_model[self.__wait_combo.get_active()][0]))
def _on_add_delay(self, widget):
dialog = self.__widget_tree.get_object("AddDelayDialog")
dialog.set_transient_for(self.__window)
response = dialog.run()
dialog.hide()
self._stop_recorder()
if response == gtk.RESPONSE_OK:
self._insert_macro("%s %s" % ( self._format_op("delay"), int(self.__delay_adjustment.get_value())) )
def _on_rows_reordered(self, model, path, iter, new_order):
print "reorder"
# The model will have been updated, so update our text base list from that
for index,row in enumerate(self._script.model):
x = self._format_row(row)
print x
self.__macros[index] = x
self._rebuild_model()
def _get_insert_index(self):
tree_selection = self.__script_tree.get_selection()
_, selected_paths = tree_selection.get_selected_rows()
return len(self.__script_model) if len(selected_paths) == 0 else selected_paths[0][0] + 1
def _on_start_record_button(self, widget):
self.__recorder.start_record()
self._set_available()
def _set_available(self):
self.__record_button.set_sensitive(not self.__recorder.is_recording())
self.__stop_button.set_sensitive(self.__recorder.is_recording())
ops = len(self.__recorder.script)
self.__record_status.set_text(_("Now recording (%d) operations" % ops) if self.__recorder.is_recording() else (_("Will insert %d operations" % ops) if ops > 0 else ""))
def _on_stop_record_button(self, widget):
self.__recorder.stop_record()
def _on_stop_record(self, recorder):
gobject.idle_add(self._set_available)
def _stop_recorder(self):
if self.__recorder.is_recording():
self.__recorder.stop_record()
def _on_output_delays_changed(self, widget):
self.__recorder.output_delays = widget.get_active()
self.__gconf_client.set_bool("/apps/gnome15/script_editor/record_delays", self.__recorder.output_delays)
def _on_record(self, widget):
self.__recorder.clear()
self._set_available()
dialog = self.__widget_tree.get_object("RecordDialog")
dialog.set_transient_for(self.__window)
response = dialog.run()
dialog.hide()
if self.__recorder.is_recording():
self.__recorder.stop_record()
if response == gtk.RESPONSE_OK:
i = self._get_insert_index()
for op, value in self.__recorder.script:
if len(self.__recorder.script) > 0:
macro_text = "%s %s" % ( self._format_op(op), value)
self.__macros.insert(i, macro_text)
i += 1
self._rebuild_model()
def _insert_macro(self, macro_text):
i = self._get_insert_index()
self.__macros.insert(i, macro_text)
self._rebuild_model()
def _on_remove_macro_operations(self, widget):
dialog = self.__widget_tree.get_object("RemoveMacroOperationsDialog")
dialog.set_transient_for(self.__window)
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_OK:
tree_selection = self.__script_tree.get_selection()
_, selected_paths = tree_selection.get_selected_rows()
for p in reversed(selected_paths):
del self.__macros[p[0]]
self._rebuild_model()
def _on_value_edited(self, widget, path, value):
self.__macros[int(path)] = self._format_row(self.__script_model[int(path)], value)
self._rebuild_model()
def _on_select_all_key_operations(self, widget):
self._select_by_op([ "press", "release", "upress", "urelease" ])
def _on_select_all_key_presses(self, widget):
self._select_by_op(["press", "upress" ])
def _on_select_all_key_releases(self, widget):
self._select_by_op(["release", "urelease"])
def _on_select_all_commands(self, widget):
self._select_by_op("execute")
def _on_select_all(self, widget):
self.__script_tree.get_selection().select_all()
def _on_select_all_delays(self, widget):
self._select_by_op("delay")
def _on_macro_operation_cursor_changed(self, widget):
pass
# tree_selection = self.__script_tree.get_selection()
# _, selected_path = tree_selection.get_selected_rows()
# if len(selected_path) == 1:
# selected_index = selected_path[0][0]
# _,val,op,_ = self.__script_model[selected_index]
# print op,val
#
# if op == "press":
# for i in range(selected_index + 1, len(self.__macros)):
# _,row_val,row_op,_ = self.__script_model[i]
# if row_op == "delay":
# self._select_row(i)
# elif row_op == "release" and val == row_val:
# self._select_row(i)
#
# if i + 1 < len(self.__script_model) and \
# self.__script_model[i + 1][2] == "delay":
# self._select_row(i + 1)
#
# break
# elif op == "release":
# if selected_index + 1 < len(self.__script_model) and \
# self.__script_model[selected_index + 1][2] == "delay":
# self._select_row(selected_index + 1)
#
# for i in range(selected_index - 1, 0, -1):
# _,row_val,row_op,_ = self.__script_model[i]
# if row_op == "delay":
# self._select_row(i)
# elif row_op == "press" and val == row_val:
# self._select_row(i)
# break
def _select_by_op(self, show_ops):
tree_selection = self.__script_tree.get_selection()
tree_selection.unselect_all()
for idx, row in enumerate(self.__script_model):
_,_,op,_ = row
if isinstance(show_ops, list) and op in show_ops or op == show_ops:
tree_selection.select_path(self.__script_model.get_path(self.__script_model.get_iter_from_string("%d" % idx)))
def _select_row(self, row):
self.__script_tree.get_selection().select_path(self.__script_model.get_path(self.__script_model.get_iter_from_string("%d" % row)))
if __name__ == "__main__":
me = G15MacroEditor()
if (me.window):
me.window.connect("destroy", gtk.main_quit)
me.window.run()
me.window.hide()
|
madscientist42/gnome15
|
src/gnome15/g15macroeditor.py
|
Python
|
gpl-3.0
| 48,542
|
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.datasets.samples_generator import make_blobs
np.random.seed(42)
# digits = load_digits()
# data = scale(digits.data)
# n_samples, n_features = data.shape
# n_digits = len(np.unique(digits.target))
# labels = digits.target
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
data, labels = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=999)
n_digits = 3
sample_size = 300
# print("n_digits: %d, \t n_samples %d, \t n_features %d"
# % (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
# pca = PCA(n_components=n_digits).fit(data)
# bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
# name="PCA-based",
# data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
# reduced_data = PCA(n_components=2).fit_transform(data)
reduced_data = data
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min(), reduced_data[:, 0].max()
y_min, y_max = reduced_data[:, 1].min(), reduced_data[:, 1].max()
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.savefig("kmeans-demo.png")
plt.clf()
|
CS6780/approx-clustering
|
src/kmeans-demo.py
|
Python
|
mit
| 3,803
|
import os
import platform
import shutil
import stat
import unittest
import unittest.mock
from queue import Queue
from tempfile import mkdtemp
from tests.BearTestHelper import generate_skip_decorator
from bears.vcs.git.GitCommitBear import GitCommitBear
from coala_utils.string_processing.Core import escape
from coalib.misc.Shell import run_shell_command
from coalib.settings.ConfigurationGathering import get_config_directory
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
@generate_skip_decorator(GitCommitBear)
class GitCommitBearTest(unittest.TestCase):
@staticmethod
def run_git_command(*args, stdin=None):
run_shell_command(" ".join(("git",) + args), stdin)
@staticmethod
def git_commit(msg):
# Use stdin mode from git, since -m on Windows cmd does not support
# multiline messages.
GitCommitBearTest.run_git_command("commit",
"--allow-empty",
"--allow-empty-message",
"--file=-",
stdin=msg)
def run_uut(self, *args, **kwargs):
"""
Runs the unit-under-test (via `self.uut.run()`) and collects the
messages of the yielded results as a list.
:param args: Positional arguments to forward to the run function.
:param kwargs: Keyword arguments to forward to the run function.
:return: A list of the message strings.
"""
return list(result.message for result in self.uut.run(*args, **kwargs))
def setUp(self):
self.msg_queue = Queue()
self.section = Section("")
self.uut = GitCommitBear(None, self.section, self.msg_queue)
self._old_cwd = os.getcwd()
self.gitdir = mkdtemp()
os.chdir(self.gitdir)
self.run_git_command("init")
self.run_git_command("config", "user.email coala@coala-analyzer.io")
self.run_git_command("config", "user.name coala")
@staticmethod
def _windows_rmtree_remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def tearDown(self):
os.chdir(self._old_cwd)
if platform.system() == "Windows":
onerror = self._windows_rmtree_remove_readonly
else:
onerror = None
shutil.rmtree(self.gitdir, onerror=onerror)
def test_check_prerequisites(self):
_shutil_which = shutil.which
try:
shutil.which = lambda *args, **kwargs: None
self.assertEqual(GitCommitBear.check_prerequisites(),
"git is not installed.")
shutil.which = lambda *args, **kwargs: "path/to/git"
self.assertTrue(GitCommitBear.check_prerequisites())
finally:
shutil.which = _shutil_which
def test_get_metadata(self):
metadata = GitCommitBear.get_metadata()
self.assertEqual(
metadata.name,
"<Merged signature of 'run', 'check_shortlog', 'check_body'>")
# Test if at least one parameter of each signature is used.
self.assertIn("allow_empty_commit_message", metadata.optional_params)
self.assertIn("shortlog_length", metadata.optional_params)
self.assertIn("body_line_length", metadata.optional_params)
def test_git_failure(self):
# In this case use a reference to a non-existing commit, so just try
# to log all commits on a newly created repository.
self.assertEqual(self.run_uut(), [])
git_error = self.msg_queue.get().message
self.assertEqual(git_error[:4], "git:")
self.assertTrue(self.msg_queue.empty())
def test_empty_message(self):
self.git_commit("")
self.assertEqual(self.run_uut(),
["HEAD commit has no message."])
self.assertTrue(self.msg_queue.empty())
self.assertEqual(self.run_uut(allow_empty_commit_message=True),
[])
self.assertTrue(self.msg_queue.empty())
@unittest.mock.patch("bears.vcs.git.GitCommitBear.run_shell_command",
return_value=("one-liner-message\n", ""))
def test_pure_oneliner_message(self, patch):
self.assertEqual(self.run_uut(), [])
self.assertTrue(self.msg_queue.empty())
def test_shortlog_checks_length(self):
self.git_commit("Commit messages that nearly exceed default limit..")
self.assertEqual(self.run_uut(), [])
self.assertTrue(self.msg_queue.empty())
self.assertEqual(self.run_uut(shortlog_length=17),
["Shortlog of HEAD commit is 33 character(s) "
"longer than the limit (50 > 17)."])
self.assertTrue(self.msg_queue.empty())
self.git_commit("Add a very long shortlog for a bad project history.")
self.assertEqual(self.run_uut(),
["Shortlog of HEAD commit is 1 character(s) longer "
"than the limit (51 > 50)."])
self.assertTrue(self.msg_queue.empty())
def test_shortlog_checks_shortlog_trailing_period(self):
self.git_commit("Shortlog with dot.")
self.assertEqual(self.run_uut(shortlog_trailing_period=True), [])
self.assertEqual(self.run_uut(shortlog_trailing_period=False),
["Shortlog of HEAD commit contains a period at end."])
self.assertEqual(self.run_uut(shortlog_trailing_period=None), [])
self.git_commit("Shortlog without dot")
self.assertEqual(
self.run_uut(shortlog_trailing_period=True),
["Shortlog of HEAD commit contains no period at end."])
self.assertEqual(self.run_uut(shortlog_trailing_period=False), [])
self.assertEqual(self.run_uut(shortlog_trailing_period=None), [])
def test_shortlog_wip_check(self):
self.git_commit("[wip] Shortlog")
self.assertEqual(self.run_uut(shortlog_wip_check=False), [])
self.assertEqual(self.run_uut(shortlog_wip_check=True),
["This commit seems to be marked as work in progress "
"and should not be used in production. Treat "
"carefully."])
self.assertEqual(self.run_uut(shortlog_wip_check=None), [])
self.git_commit("Shortlog as usual")
self.assertEqual(self.run_uut(shortlog_wip_check=True), [])
def test_shortlog_checks_imperative(self):
self.git_commit("tag: Add shortlog in imperative")
self.assertNotIn("Shortlog of HEAD commit isn't imperative mood, "
"bad words are 'Add'",
self.run_uut())
self.git_commit("Added invalid shortlog")
self.assertIn("Shortlog of HEAD commit isn't imperative mood, "
"bad words are 'Added'",
self.run_uut())
self.git_commit("Adding another invalid shortlog")
self.assertIn("Shortlog of HEAD commit isn't imperative mood, "
"bad words are 'Adding'",
self.run_uut())
self.git_commit("Added another invalid shortlog")
self.assertNotIn("Shortlog of HEAD commit isn't imperative mood, "
"bad words are 'Added'",
self.run_uut(shortlog_imperative_check=False))
def test_shortlog_checks_regex(self):
pattern = ".*?: .*[^.]"
self.git_commit("tag: message")
self.assertEqual(self.run_uut(shortlog_regex=pattern), [])
self.git_commit("tag: message invalid.")
self.assertEqual(
self.run_uut(shortlog_regex=pattern),
["Shortlog of HEAD commit does not match given regex."])
self.git_commit("SuCkS cOmPleTely")
self.assertEqual(
self.run_uut(shortlog_regex=pattern),
["Shortlog of HEAD commit does not match given regex."])
# Check for full-matching.
pattern = "abcdefg"
self.git_commit("abcdefg")
self.assertEqual(self.run_uut(shortlog_regex=pattern), [])
self.git_commit("abcdefgNO MATCH")
self.assertEqual(
self.run_uut(shortlog_regex=pattern),
["Shortlog of HEAD commit does not match given regex."])
def test_body_checks(self):
self.git_commit(
"Commits message with a body\n\n"
"nearly exceeding the default length of a body, but not quite. "
"haaaaaands")
self.assertEqual(self.run_uut(), [])
self.assertTrue(self.msg_queue.empty())
self.git_commit("Shortlog only")
self.assertEqual(self.run_uut(), [])
self.assertTrue(self.msg_queue.empty())
# Force a body.
self.git_commit("Shortlog only ...")
self.assertEqual(self.run_uut(force_body=True),
["No commit message body at HEAD."])
self.assertTrue(self.msg_queue.empty())
# Miss a newline between shortlog and body.
self.git_commit("Shortlog\nOops, body too early")
self.assertEqual(self.run_uut(),
["No newline between shortlog and body at HEAD."])
self.assertTrue(self.msg_queue.empty())
# And now too long lines.
self.git_commit("Shortlog\n\n"
"This line is ok.\n"
"This line is by far too long (in this case).\n"
"This one too, blablablablablablablablabla.")
self.assertEqual(self.run_uut(body_line_length=41),
["Body of HEAD commit contains too long lines."])
self.assertTrue(self.msg_queue.empty())
def test_different_path(self):
no_git_dir = mkdtemp()
self.git_commit("Add a very long shortlog for a bad project history.")
os.chdir(no_git_dir)
# When section doesn't have a project_dir
self.assertEqual(self.run_uut(), [])
git_error = self.msg_queue.get().message
self.assertEqual(git_error[:4], "git:")
# when section does have a project_dir
self.section.append(Setting("project_dir", escape(self.gitdir, '\\')))
self.assertEqual(self.run_uut(),
["Shortlog of HEAD commit is 1 character(s) longer "
"than the limit (51 > 50)."])
self.assertEqual(get_config_directory(self.section),
self.gitdir)
os.chdir(self.gitdir)
os.rmdir(no_git_dir)
|
chriscoyfish/coala-bears
|
tests/vcs/git/GitCommitBearTest.py
|
Python
|
agpl-3.0
| 10,610
|
from .core import *
from .fancy import *
from .recurrent import *
from .tensor import *
|
csxeba/ReSkiv
|
brainforge/layers/__init__.py
|
Python
|
gpl-3.0
| 88
|
exec(open('../../../src/scope_manager.py', 'r').read())
t = [0] * 10
output = [0] * 10
def t_map(t):
def λ(x):
return x
t_ta = [0] * 10
s_context()
t_ti = 0
while (t_ti < len(t)):
t_ta[t_ti] = λ(t[t_ti])
t_ti = (t_ti + 1)
r_context()
return t_ta
output = t_map(t)
|
zambonin/UFSC-INE5426
|
test/valid/functional/10.py
|
Python
|
gpl-3.0
| 329
|
# -*- coding: utf-8 -*-
from pyramid.view import view_config
from pyramid.response import Response
import json
from pyramid.renderers import render_to_response
class APIError(Exception):
def __init__(self, code, status, message):
self.code = code
self.status = status
self.message = message
class HTMLError(Exception):
def __init__(self, code, message, description):
self.code = code
self.message = message
self.description = description
@view_config(context=APIError)
def json_exception_view(exc, request):
s = json.dumps({
"status": exc.status,
"message": exc.message,
})
response = Response(s)
response.content_type = "application/json"
response.status_int = exc.code
return response
@view_config(context=HTMLError)
def html_exception_view(exc, request):
response = render_to_response("../templates/error.html", {
"description": exc.description,
"message": exc.message,
}, request)
response.status_int = exc.code
return response
|
ActiDoo/gamification-engine
|
gengine/base/errors.py
|
Python
|
mit
| 1,065
|
import glob
import os
import subprocess
from distutils.core import Command
from setuptools import setup, find_packages
class CheckVersion(Command):
description = 'Confirm that the stored package version is correct'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
with open('FCS_Database/data/ver') as f:
stored_version = f.read().strip()
git_version = subprocess.check_output(
['git', 'describe', '--tags', '--dirty']).strip()
assert stored_version == git_version
print 'the current version is', stored_version
subprocess.call(
('git describe --tags --dirty > FlowAnal/data/ver.tmp'
'&& mv FlowAnal/data/ver.tmp FlowAnal/data/ver '
'|| rm -f FlowAnal/data/ver.tmp'),
shell=True, stderr=open(os.devnull, "w"))
from FlowAnal import __version__
package_data = glob.glob('data/*')
params = {'author': ['David Ng', 'Daniel Herman'],
'author_email': ['ngdavid@uw.edu', 'hermands@uw.edu'],
'description': 'Analysis of clinical flow cytometry designed for hematopathology',
'name': 'FlowAnal',
'packages': find_packages(),
'package_dir': {'FlowAnal': 'FlowAnal'},
'entry_points': {
'console_scripts': ['runme = FlowAnal.scripts.main:main']
},
'version': __version__,
'package_data': {'FlowAnal': package_data},
'test_suite': 'tests',
'cmdclass': {'check_version': CheckVersion}
}
setup(**params)
|
davidpng/FCS_Database
|
setup.py
|
Python
|
gpl-3.0
| 1,597
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
def params():
plotpar = {'axes.labelsize': 18,
'font.size': 18,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
return {'capsize':0, 'fmt':'k.', 'ecolor':'.8'}
class colours(object):
def __init__(self):
self.orange = '#FF9933'
self.lightblue = '#66CCCC'
self.blue = '#0066CC'
self.pink = '#FF33CC'
self.turquoise = '#3399FF'
self.lightgreen = '#99CC99'
self.green = '#009933'
self.maroon = '#CC0066'
self.purple = '#9933FF'
self.red = '#CC0000'
self.lilac = '#CC99FF'
|
RuthAngus/K2rotation
|
code/params.py
|
Python
|
mit
| 777
|
# encoding = utf-8
# ConcreteImplementor 1/2
class DrawingAPI1(object):
def draw_circle(self, x, y, radius):
print('API1.circle at {}:{} radius {}'.format(x, y, radius))
# ConcreteImplementor 2/2
class DrawingAPI2(object):
def draw_circle(self, x, y, radius):
print('API2.circle at {}:{} radius {}'.format(x, y, radius))
# Refined Abstraction
class CircleShape(object):
def __init__(self, x, y, radius, drawing_api):
self._x = x
self._y = y
self._radius = radius
self._drawing_api = drawing_api
# low-level i.e. Implementation specific
def draw(self):
self._drawing_api.draw_circle(self._x, self._y, self._radius)
# high-level i.e. Abstraction specific
def scale(self, pct):
self._radius *= pct
def main():
shapes = (
CircleShape(1, 2, 3, DrawingAPI1()),
CircleShape(5, 7, 11, DrawingAPI2())
)
for shape in shapes:
shape.scale(2.5)
shape.draw()
if __name__ == '__main__':
main()
|
JiangKlijna/design-pattern
|
BridgePattern/Bridge.py
|
Python
|
apache-2.0
| 1,031
|
from datetime import datetime as dt
import numpy as np
import pytest
from arctic.date import DateRange, CLOSED_OPEN, mktz
from arctic.exceptions import NoDataFoundException
def test_delete(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 1, 30, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
deleted = tickstore_lib.delete('SYM')
assert deleted.deleted_count == 2
with pytest.raises(NoDataFoundException):
tickstore_lib.read('SYM', date_range=DateRange(20130102), columns=None)
# Delete with a date-range
tickstore_lib.write('SYM', DUMMY_DATA)
deleted = tickstore_lib.delete(
'SYM',
DateRange(
dt(2013, 1, 1, tzinfo=mktz('Europe/London')),
dt(2013, 1, 2, tzinfo=mktz('Europe/London'))
)
)
assert deleted.deleted_count == 1
df = tickstore_lib.read('SYM', columns=None)
assert np.allclose(df['b'].values, np.array([4.]))
def test_delete_daterange(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 2, 1, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
# Delete with a date-range
deleted = tickstore_lib.delete(
'SYM',
DateRange(
dt(2013, 1, 1, tzinfo=mktz('Europe/London')),
dt(2013, 2, 1, tzinfo=mktz('Europe/London')),
CLOSED_OPEN
)
)
assert deleted.deleted_count == 1
df = tickstore_lib.read('SYM', columns=None)
assert np.allclose(df['b'].values, np.array([4.]))
|
manahl/arctic
|
tests/integration/tickstore/test_ts_delete.py
|
Python
|
lgpl-2.1
| 2,042
|
#!/usr/bin/env python
import re
import os
import sys
from translate import Translator
from tempfile import NamedTemporaryFile
try:
import urllib2 as request
from urllib import quote
except:
from urllib import request
from urllib.parse import quote
from tk import calc_tk
text = os.environ.get('POPCLIP_TEXT', 'hello world')
destlang = os.environ.get('POPCLIP_OPTION_DESTLANG', 'zh-CN')
ttslang = os.environ.get('POPCLIP_OPTION_TTSLANG', 'en')
tts = os.environ.get('POPCLIP_OPTION_TTS', '1')
translator = Translator(to_lang=destlang)
translation = translator.translate(text.replace('\n', ' '))
if sys.getdefaultencoding() != "utf-8":
translation = translation.encode('utf-8')
sys.stdout.write(translation)
def split_trunks(text):
''' stolen from ``bettertranslate`` '''
text = text.replace('\n', '')
sentences = re.split(r'([\,\.\;]+)\s*', text)
trunks = []
for idx, sen in enumerate(sentences):
if trunks and len(trunks[-1]) + 1 + len(sen) < 200:
trunks[-1] += (' ' if trunks[-1][-1] in ',.;' else '') + sen
continue
arr = []
tmp = ''
for w in re.split(' ', sen):
if len(tmp) + 1 + len(w) < 200:
tmp += (' ' if tmp else '') + w
else:
arr.append(tmp)
assert len(w) < 200, 'word too long'
tmp = w
arr.append(tmp)
trunks.extend(arr)
return trunks
if tts == '1':
f = NamedTemporaryFile(delete=False)
for text in split_trunks(text):
r = request.Request(
url=('http://translate.google.com/translate_tts'
'?tl=%s&ie=UTF-8&client=t&tk=%s&q=%s') % (ttslang, calc_tk(text), quote(text, '')),
headers={'User-Agent': 'Mozilla/5.0',
'Referer': 'https://translate.google.com/'})
f.write(request.urlopen(r).read())
f.close()
os.system('afplay ' + f.name)
os.unlink(f.name)
|
scturtle/GoodTranslate
|
goodtranslate.py
|
Python
|
unlicense
| 1,981
|
"""Base module for the DLI Reports app.
Author: Logan Gore
This module creates the app and initializes all startup code.
"""
# System imports
import os
import sys
# Flask imports
from flask import Flask
from flask import flash
from flask import redirect
from flask import render_template
from flask import url_for
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_login import current_user
from flask_login import LoginManager
from flask_wtf.csrf import CsrfProtect
# Other imports
from htmlmin.main import minify
ENVIRON_KEYS = [
'DLI_REPORTS_ADMIN_PASSWORD',
'DLI_REPORTS_SITE_URL',
'DLI_REPORTS_DEV_EMAIL',
]
# Check for environment variables. Exit if they are not set properly
for key in ENVIRON_KEYS:
if not key in os.environ:
sys.stderr.write('Error! Environment variables not set up properly.\n')
sys.stderr.write('Missing variable: {}\n'.format(key))
sys.exit()
# Define the web app
sys.stdout.write('Creating Flask app...')
sys.stdout.flush()
app = Flask(__name__)
sys.stdout.write('Done\n')
# Configurations for the app
sys.stdout.write('Loading config from object...')
sys.stdout.flush()
app.config.from_object('config')
sys.stdout.write('Done\n')
# Define the database
sys.stdout.write('Defining SQLAlchemy database...')
sys.stdout.flush()
db = SQLAlchemy(app)
sys.stdout.write('Done\n')
# Create the login manager
sys.stdout.write('Creating login manager...')
sys.stdout.flush()
login_manager = LoginManager(app)
login_manager.login_view = "/auth/login"
sys.stdout.write('Done\n')
# Configure Flask-Mail
sys.stdout.write('Configuring Mail Server...')
sys.stdout.flush()
mail = Mail(app)
sys.stdout.write('Done\n')
# Enable CSRF protection
sys.stdout.write('Enabling CSRF protection...')
sys.stdout.flush()
csrf = CsrfProtect(app)
sys.stdout.write('Done\n')
# Register error handlers
sys.stdout.write('Registering error handlers...')
sys.stdout.flush()
@app.errorhandler(404)
def not_found(error):
"""Render the default 404 template"""
return render_template('404.html', error=error), 404
@app.errorhandler(500)
def server_error(error):
"""Redirect to the bugsplat page"""
return redirect(url_for('admin.bugsplat', error=error))
sys.stdout.write('Done\n')
# Minify sent HTML strings
sys.stdout.write('Loading HTML minifier...')
@app.after_request
def response_minify(response):
"""Minify HTML response to decrease bandwidth"""
if response.content_type == u'text/html; charset=utf-8':
response.set_data(minify(response.get_data(as_text=True)))
return response
sys.stdout.write('Done\n')
# Set up the tracker for users to see where requests are coming from
sys.stdout.write('Loading user request tracking callback...')
@app.after_request
def user_tracking_callback(response):
"""Print out the name of the user that made this request"""
sys.stdout.write('\tFollowing request made by: ')
if current_user.is_authenticated:
sys.stdout.write('{}\n'.format(current_user.email))
else:
sys.stdout.write('Anonymous Guest\n')
return response
sys.stdout.write('Done\n')
# Define form error handler
sys.stdout.write('Creating form error handler...')
sys.stdout.flush()
def flash_form_errors(form):
"""Flash form errors to the user"""
for field, errors in form.errors.items():
for error in errors:
flash(
"%s: %s" % (getattr(form, field).label.text, error),
"alert-danger",
)
sys.stdout.write('Done\n')
# Import all blueprints from controllers
sys.stdout.write('Importing blueprints from controllers...')
sys.stdout.flush()
from dli_app.controllers import mod_default
from dli_app.mod_account.controllers import mod_account
from dli_app.mod_admin.controllers import mod_admin
from dli_app.mod_auth.controllers import mod_auth
from dli_app.mod_reports.controllers import mod_reports
from dli_app.mod_wiki.controllers import mod_wiki
sys.stdout.write('Done\n')
# Register blueprints
sys.stdout.write('Registering blueprint modules...')
sys.stdout.flush()
app.register_blueprint(mod_default)
app.register_blueprint(mod_account)
app.register_blueprint(mod_admin)
app.register_blueprint(mod_auth)
app.register_blueprint(mod_reports)
app.register_blueprint(mod_wiki)
sys.stdout.write('Done\n')
sys.stdout.write('\nApp done loading.\n')
|
gorel/dli-reports
|
dli_app/__init__.py
|
Python
|
mit
| 4,361
|
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional, Tuple
from unittest import mock
from django.conf import settings
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import do_add_submessage, do_delete_messages, internal_send_private_message
from zerver.lib.retention import (
archive_messages,
clean_archived_data,
get_realms_and_streams_for_archiving,
move_messages_to_archive,
restore_all_data_from_archive,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import queries_captured
from zerver.lib.upload import create_attachment
from zerver.models import (
ArchivedAttachment,
ArchivedMessage,
ArchivedReaction,
ArchivedSubMessage,
ArchivedUserMessage,
ArchiveTransaction,
Attachment,
Message,
Reaction,
Realm,
Stream,
SubMessage,
UserMessage,
get_realm,
get_stream,
get_system_bot,
get_user_profile_by_email,
)
# Class with helper functions useful for testing archiving of reactions:
from zerver.tests.test_reactions import EmojiReactionBase
from zerver.tornado.event_queue import send_event
ZULIP_REALM_DAYS = 30
MIT_REALM_DAYS = 100
class RetentionTestingBase(ZulipTestCase):
def _get_usermessage_ids(self, message_ids: List[int]) -> List[int]:
return list(UserMessage.objects.filter(message_id__in=message_ids).values_list('id', flat=True))
def _verify_archive_data(self, expected_message_ids: List[int],
expected_usermessage_ids: List[int]) -> None:
self.assertEqual(
set(ArchivedMessage.objects.values_list('id', flat=True)),
set(expected_message_ids),
)
self.assertEqual(
set(ArchivedUserMessage.objects.values_list('id', flat=True)),
set(expected_usermessage_ids),
)
# Archived Messages and UserMessages should have been removed from the normal tables:
self.assertEqual(Message.objects.filter(id__in=expected_message_ids).count(), 0)
self.assertEqual(UserMessage.objects.filter(id__in=expected_usermessage_ids).count(), 0)
def _verify_restored_data(self, expected_message_ids: List[int],
expected_usermessage_ids: List[int]) -> None:
# Check that the data was restored:
self.assertEqual(
set(Message.objects.filter(id__in=expected_message_ids).values_list('id', flat=True)),
set(expected_message_ids),
)
self.assertEqual(
set(UserMessage.objects.filter(id__in=expected_usermessage_ids).values_list('id', flat=True)),
set(expected_usermessage_ids),
)
# The Messages and UserMessages should still be in the archive - we don't delete them.
self.assertEqual(
set(ArchivedMessage.objects.values_list('id', flat=True)),
set(expected_message_ids),
)
self.assertEqual(
set(ArchivedUserMessage.objects.values_list('id', flat=True)),
set(expected_usermessage_ids),
)
class ArchiveMessagesTestingBase(RetentionTestingBase):
def setUp(self) -> None:
super().setUp()
self.zulip_realm = get_realm('zulip')
self.mit_realm = get_realm('zephyr')
self._set_realm_message_retention_value(self.zulip_realm, ZULIP_REALM_DAYS)
self._set_realm_message_retention_value(self.mit_realm, MIT_REALM_DAYS)
# Set publication date of all existing messages to "now", so that we have full
# control over what's expired and what isn't.
Message.objects.all().update(date_sent=timezone_now())
def _set_realm_message_retention_value(self, realm: Realm, retention_period: Optional[int]) -> None:
realm.message_retention_days = retention_period
realm.save()
def _set_stream_message_retention_value(self, stream: Stream, retention_period: Optional[int]) -> None:
stream.message_retention_days = retention_period
stream.save()
def _change_messages_date_sent(self, msgs_ids: List[int], date_sent: datetime) -> None:
Message.objects.filter(id__in=msgs_ids).update(date_sent=date_sent)
def _make_mit_messages(self, message_quantity: int, date_sent: datetime) -> Any:
# send messages from mit.edu realm and change messages pub date
sender = self.mit_user('espuser')
recipient = self.mit_user('starnine')
msg_ids = [self.send_personal_message(sender, recipient)
for i in range(message_quantity)]
self._change_messages_date_sent(msg_ids, date_sent)
return msg_ids
def _send_cross_realm_personal_message(self) -> int:
# Send message from bot to users from different realm.
bot_email = 'notification-bot@zulip.com'
get_user_profile_by_email(bot_email)
zulip_user = self.example_user("hamlet")
msg_id = internal_send_private_message(
realm=self.zulip_realm,
sender=get_system_bot(bot_email),
recipient_user=zulip_user,
content='test message',
)
assert msg_id is not None
return msg_id
def _make_expired_zulip_messages(self, message_quantity: int) -> List[int]:
msg_ids = list(Message.objects.order_by('id').filter(
sender__realm=self.zulip_realm).values_list('id', flat=True)[3:3 + message_quantity])
self._change_messages_date_sent(
msg_ids,
timezone_now() - timedelta(ZULIP_REALM_DAYS+1),
)
return msg_ids
def _send_messages_with_attachments(self) -> Dict[str, int]:
user_profile = self.example_user("hamlet")
sample_size = 10
host = user_profile.realm.host
realm_id = get_realm("zulip").id
dummy_files = [
('zulip.txt', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt', sample_size),
('temp_file.py', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py', sample_size),
('abc.py', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py', sample_size),
]
for file_name, path_id, size in dummy_files:
create_attachment(file_name, path_id, user_profile, size)
self.subscribe(user_profile, "Denmark")
body = ("Some files here ... [zulip.txt](http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)" +
" http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py.... Some more...." +
" http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py").format(id=realm_id, host=host)
expired_message_id = self.send_stream_message(user_profile, "Denmark", body)
actual_message_id = self.send_stream_message(user_profile, "Denmark", body)
othello = self.example_user('othello')
other_message_id = self.send_stream_message(othello, "Denmark", body)
self._change_messages_date_sent([expired_message_id], timezone_now() - timedelta(days=MIT_REALM_DAYS + 1))
return {'expired_message_id': expired_message_id, 'actual_message_id': actual_message_id,
'other_user_message_id': other_message_id}
class TestArchiveMessagesGeneral(ArchiveMessagesTestingBase):
def test_no_expired_messages(self) -> None:
archive_messages()
self.assertEqual(ArchivedUserMessage.objects.count(), 0)
self.assertEqual(ArchivedMessage.objects.count(), 0)
def test_expired_messages_in_each_realm(self) -> None:
"""General test for archiving expired messages properly with
multiple realms involved"""
# Make some expired messages in MIT:
expired_mit_msg_ids = self._make_mit_messages(
5,
timezone_now() - timedelta(days=MIT_REALM_DAYS+1),
)
# Make some non-expired messages in MIT:
self._make_mit_messages(4, timezone_now() - timedelta(days=MIT_REALM_DAYS-1))
# Change some Zulip messages to be expired:
expired_zulip_msg_ids = list(Message.objects.order_by('id').filter(
sender__realm=self.zulip_realm).values_list('id', flat=True)[3:10])
self._change_messages_date_sent(
expired_zulip_msg_ids,
timezone_now() - timedelta(ZULIP_REALM_DAYS+1),
)
expired_msg_ids = expired_mit_msg_ids + expired_zulip_msg_ids
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
archive_messages()
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(expired_msg_ids, expired_usermsg_ids)
def test_expired_messages_in_one_realm(self) -> None:
"""Test with a retention policy set for only the MIT realm"""
self._set_realm_message_retention_value(self.zulip_realm, None)
# Make some expired messages in MIT:
expired_mit_msg_ids = self._make_mit_messages(
5,
timezone_now() - timedelta(days=MIT_REALM_DAYS+1),
)
# Make some non-expired messages in MIT:
self._make_mit_messages(4, timezone_now() - timedelta(days=MIT_REALM_DAYS-1))
# Change some Zulip messages date_sent, but the realm has no retention policy,
# so they shouldn't get archived
zulip_msg_ids = list(Message.objects.order_by('id').filter(
sender__realm=self.zulip_realm).values_list('id', flat=True)[3:10])
self._change_messages_date_sent(
zulip_msg_ids,
timezone_now() - timedelta(ZULIP_REALM_DAYS+1),
)
# Only MIT has a retention policy:
expired_msg_ids = expired_mit_msg_ids
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
archive_messages()
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(expired_msg_ids, expired_usermsg_ids)
self._set_realm_message_retention_value(self.zulip_realm, ZULIP_REALM_DAYS)
def test_different_stream_realm_policies(self) -> None:
verona = get_stream("Verona", self.zulip_realm)
hamlet = self.example_user("hamlet")
msg_id = self.send_stream_message(hamlet, "Verona", "test")
usermsg_ids = self._get_usermessage_ids([msg_id])
self._change_messages_date_sent([msg_id], timezone_now() - timedelta(days=2))
# Don't archive if stream's retention policy set to -1:
self._set_realm_message_retention_value(self.zulip_realm, 1)
self._set_stream_message_retention_value(verona, -1)
archive_messages()
self._verify_archive_data([], [])
# Don't archive if stream and realm have no retention policy:
self._set_realm_message_retention_value(self.zulip_realm, None)
self._set_stream_message_retention_value(verona, None)
archive_messages()
self._verify_archive_data([], [])
# Archive if stream has a retention policy set:
self._set_realm_message_retention_value(self.zulip_realm, None)
self._set_stream_message_retention_value(verona, 1)
archive_messages()
self._verify_archive_data([msg_id], usermsg_ids)
def test_cross_realm_personal_message_archiving(self) -> None:
"""Check that cross-realm personal messages get correctly archived. """
msg_ids = [self._send_cross_realm_personal_message() for i in range(1, 7)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
# Make the message expired on the recipient's realm:
self._change_messages_date_sent(msg_ids, timezone_now() - timedelta(ZULIP_REALM_DAYS+1))
archive_messages()
self._verify_archive_data(msg_ids, usermsg_ids)
def test_archiving_interrupted(self) -> None:
""" Check that queries get rolled back to a consistent state
if archiving gets interrupted in the middle of processing a chunk. """
expired_msg_ids = self._make_expired_zulip_messages(7)
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
# Insert an exception near the end of the archiving process of a chunk:
with mock.patch("zerver.lib.retention.delete_messages", side_effect=Exception):
with self.assertRaises(Exception):
archive_messages(chunk_size=1000) # Specify large chunk_size to ensure things happen in a single batch
# Archiving code has been executed, but because we got an exception, things should have been rolled back:
self._verify_archive_data([], [])
self.assertEqual(
set(Message.objects.filter(id__in=expired_msg_ids).values_list('id', flat=True)),
set(expired_msg_ids),
)
self.assertEqual(
set(UserMessage.objects.filter(id__in=expired_usermsg_ids).values_list('id', flat=True)),
set(expired_usermsg_ids),
)
def test_archive_message_tool(self) -> None:
"""End-to-end test of the archiving tool, directly calling
archive_messages."""
# Make some expired messages in MIT:
expired_mit_msg_ids = self._make_mit_messages(
5,
timezone_now() - timedelta(days=MIT_REALM_DAYS+1),
)
# Make some non-expired messages in MIT:
self._make_mit_messages(4, timezone_now() - timedelta(days=MIT_REALM_DAYS-1))
# Change some Zulip messages to be expired:
expired_zulip_msg_ids = self._make_expired_zulip_messages(7)
expired_crossrealm_msg_id = self._send_cross_realm_personal_message()
# Make the message expired in the recipient's realm:
self._change_messages_date_sent(
[expired_crossrealm_msg_id],
timezone_now() - timedelta(ZULIP_REALM_DAYS+1),
)
expired_msg_ids = expired_mit_msg_ids + expired_zulip_msg_ids + [expired_crossrealm_msg_id]
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
archive_messages(chunk_size=2) # Specify low chunk_size to test batching.
# Make sure we archived what needed:
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(expired_msg_ids, expired_usermsg_ids)
def test_archiving_attachments(self) -> None:
"""End-to-end test for the logic for archiving attachments. This test
is hard to read without first reading _send_messages_with_attachments"""
msgs_ids = self._send_messages_with_attachments()
# First, confirm deleting the oldest message
# (`expired_message_id`) creates ArchivedAttachment objects
# and associates that message ID with them, but does not
# delete the Attachment object.
archive_messages()
self.assertEqual(ArchivedAttachment.objects.count(), 3)
self.assertEqual(
list(ArchivedAttachment.objects.distinct('messages__id').values_list('messages__id',
flat=True)),
[msgs_ids['expired_message_id']],
)
self.assertEqual(Attachment.objects.count(), 3)
# Now make `actual_message_id` expired too. We still don't
# delete the Attachment objects.
self._change_messages_date_sent([msgs_ids['actual_message_id']],
timezone_now() - timedelta(days=MIT_REALM_DAYS + 1))
archive_messages()
self.assertEqual(Attachment.objects.count(), 3)
# Finally, make the last message mentioning those attachments
# expired. We should now delete the Attachment objects and
# each ArchivedAttachment object should list all 3 messages.
self._change_messages_date_sent([msgs_ids['other_user_message_id']],
timezone_now() - timedelta(days=MIT_REALM_DAYS + 1))
archive_messages()
self.assertEqual(Attachment.objects.count(), 0)
self.assertEqual(ArchivedAttachment.objects.count(), 3)
self.assertEqual(
list(ArchivedAttachment.objects.distinct('messages__id').order_by('messages__id').values_list(
'messages__id', flat=True)),
sorted(msgs_ids.values()),
)
restore_all_data_from_archive()
# Attachments should have been restored:
self.assertEqual(Attachment.objects.count(), 3)
self.assertEqual(ArchivedAttachment.objects.count(), 3) # Archived data doesn't get deleted by restoring.
self.assertEqual(
list(Attachment.objects.distinct('messages__id').order_by('messages__id').values_list(
'messages__id', flat=True)),
sorted(msgs_ids.values()),
)
def test_restoring_and_rearchiving(self) -> None:
expired_msg_ids = self._make_mit_messages(
7,
timezone_now() - timedelta(days=MIT_REALM_DAYS+1),
)
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
archive_messages(chunk_size=4)
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
transactions = ArchiveTransaction.objects.all()
self.assertEqual(len(transactions), 2) # With chunk_size 4, there should be 2 transactions
restore_all_data_from_archive()
transactions[0].refresh_from_db()
transactions[1].refresh_from_db()
self.assertTrue(transactions[0].restored)
self.assertTrue(transactions[1].restored)
archive_messages(chunk_size=10)
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
transactions = ArchiveTransaction.objects.order_by("id")
self.assertEqual(len(transactions), 3)
archived_messages = ArchivedMessage.objects.filter(id__in=expired_msg_ids)
# Check that the re-archived messages are correctly assigned to the new transaction:
for message in archived_messages:
self.assertEqual(message.archive_transaction_id, transactions[2].id)
class TestArchivingSubMessages(ArchiveMessagesTestingBase):
def test_archiving_submessages(self) -> None:
expired_msg_ids = self._make_expired_zulip_messages(2)
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
do_add_submessage(
realm=self.zulip_realm,
sender_id=cordelia.id,
message_id=expired_msg_ids[0],
msg_type='whatever',
content='{"name": "alice", "salary": 20}',
)
do_add_submessage(
realm=self.zulip_realm,
sender_id=hamlet.id,
message_id=expired_msg_ids[0],
msg_type='whatever',
content='{"name": "john", "salary": 30}',
)
do_add_submessage(
realm=self.zulip_realm,
sender_id=cordelia.id,
message_id=expired_msg_ids[1],
msg_type='whatever',
content='{"name": "jack", "salary": 10}',
)
submessage_ids = list(
SubMessage.objects.filter(message_id__in=expired_msg_ids).values_list('id', flat=True),
)
self.assertEqual(len(submessage_ids), 3)
self.assertEqual(SubMessage.objects.filter(id__in=submessage_ids).count(), 3)
archive_messages()
self.assertEqual(SubMessage.objects.filter(id__in=submessage_ids).count(), 0)
self.assertEqual(
set(ArchivedSubMessage.objects.filter(id__in=submessage_ids).values_list('id', flat=True)),
set(submessage_ids),
)
restore_all_data_from_archive()
self.assertEqual(
set(SubMessage.objects.filter(id__in=submessage_ids).values_list('id', flat=True)),
set(submessage_ids),
)
class TestArchivingReactions(ArchiveMessagesTestingBase, EmojiReactionBase):
def test_archiving_reactions(self) -> None:
expired_msg_ids = self._make_expired_zulip_messages(2)
self.post_zulip_reaction(expired_msg_ids[0], 'hamlet')
self.post_zulip_reaction(expired_msg_ids[0], 'cordelia')
self.post_zulip_reaction(expired_msg_ids[1], 'hamlet')
reaction_ids = list(
Reaction.objects.filter(message_id__in=expired_msg_ids).values_list('id', flat=True),
)
self.assertEqual(len(reaction_ids), 3)
self.assertEqual(Reaction.objects.filter(id__in=reaction_ids).count(), 3)
archive_messages()
self.assertEqual(Reaction.objects.filter(id__in=reaction_ids).count(), 0)
self.assertEqual(
set(ArchivedReaction.objects.filter(id__in=reaction_ids).values_list('id', flat=True)),
set(reaction_ids),
)
restore_all_data_from_archive()
self.assertEqual(
set(Reaction.objects.filter(id__in=reaction_ids).values_list('id', flat=True)),
set(reaction_ids),
)
class MoveMessageToArchiveBase(RetentionTestingBase):
def setUp(self) -> None:
super().setUp()
self.sender = self.example_user('hamlet')
self.recipient = self.example_user('cordelia')
def _create_attachments(self) -> None:
sample_size = 10
realm_id = get_realm("zulip").id
dummy_files = [
('zulip.txt', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt', sample_size),
('temp_file.py', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py', sample_size),
('abc.py', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py', sample_size),
('hello.txt', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/hello.txt', sample_size),
('new.py', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/new.py', sample_size),
]
user_profile = self.example_user('hamlet')
for file_name, path_id, size in dummy_files:
create_attachment(file_name, path_id, user_profile, size)
def _assert_archive_empty(self) -> None:
self.assertFalse(ArchivedUserMessage.objects.exists())
self.assertFalse(ArchivedMessage.objects.exists())
self.assertFalse(ArchivedAttachment.objects.exists())
class MoveMessageToArchiveGeneral(MoveMessageToArchiveBase):
def test_personal_messages_archiving(self) -> None:
msg_ids = [self.send_personal_message(self.sender, self.recipient)
for i in range(0, 3)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(msg_ids, usermsg_ids)
def test_move_messages_to_archive_with_realm_argument(self) -> None:
realm = get_realm("zulip")
msg_ids = [self.send_personal_message(self.sender, self.recipient)
for i in range(0, 3)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids, realm=realm)
self._verify_archive_data(msg_ids, usermsg_ids)
archive_transaction = ArchiveTransaction.objects.last()
self.assertEqual(archive_transaction.realm, realm)
def test_stream_messages_archiving(self) -> None:
msg_ids = [self.send_stream_message(self.sender, "Verona")
for i in range(0, 3)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(msg_ids, usermsg_ids)
def test_archiving_messages_second_time(self) -> None:
msg_ids = [self.send_stream_message(self.sender, "Verona")
for i in range(0, 3)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
with self.assertRaises(Message.DoesNotExist):
move_messages_to_archive(message_ids=msg_ids)
def test_archiving_messages_multiple_realms(self) -> None:
"""
Verifies that move_messages_to_archive works correctly
if called on messages in multiple realms.
"""
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
zulip_msg_ids = [self.send_personal_message(iago, othello)
for i in range(0, 3)]
leary_msg_ids = [self.send_personal_message(cordelia, king)
for i in range(0, 3)]
msg_ids = zulip_msg_ids + leary_msg_ids
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(msg_ids, usermsg_ids)
def test_archiving_messages_with_attachment(self) -> None:
self._create_attachments()
realm_id = get_realm("zulip").id
host = get_realm("zulip").host
body1 = """Some files here ...[zulip.txt](
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py ....
Some more.... http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py
""".format(id=realm_id, host=host)
body2 = """Some files here
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt ...
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/hello.txt ....
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/new.py ....
""".format(id=realm_id, host=host)
msg_ids = [
self.send_personal_message(self.sender, self.recipient, body1),
self.send_personal_message(self.sender, self.recipient, body2),
]
attachment_id_to_message_ids: Dict[int, List[int]] = {}
attachment_ids = list(
Attachment.objects.filter(messages__id__in=msg_ids).values_list("id", flat=True),
)
for attachment_id in attachment_ids:
attachment_id_to_message_ids[attachment_id] = list(
Message.objects.filter(attachment__id=attachment_id).values_list("id", flat=True),
)
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
self.assertFalse(Attachment.objects.exists())
archived_attachment_ids = list(
ArchivedAttachment.objects.filter(messages__id__in=msg_ids).values_list("id", flat=True),
)
self.assertEqual(set(attachment_ids), set(archived_attachment_ids))
for attachment_id in archived_attachment_ids:
self.assertEqual(
set(attachment_id_to_message_ids[attachment_id]),
set(ArchivedMessage.objects.filter(
archivedattachment__id=attachment_id).values_list("id", flat=True)),
)
restore_all_data_from_archive()
self._verify_restored_data(msg_ids, usermsg_ids)
restored_attachment_ids = list(
Attachment.objects.filter(messages__id__in=msg_ids).values_list("id", flat=True),
)
self.assertEqual(set(attachment_ids), set(restored_attachment_ids))
for attachment_id in restored_attachment_ids:
self.assertEqual(
set(attachment_id_to_message_ids[attachment_id]),
set(Message.objects.filter(attachment__id=attachment_id).values_list("id", flat=True)),
)
def test_archiving_message_with_shared_attachment(self) -> None:
# Make sure that attachments still in use in other messages don't get deleted:
self._create_attachments()
realm_id = get_realm("zulip").id
host = get_realm("zulip").host
body = """Some files here ...[zulip.txt](
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py ....
Some more.... http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py ...
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/new.py ....
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/hello.txt ....
""".format(id=realm_id, host=host)
msg_id = self.send_personal_message(self.sender, self.recipient, body)
# Simulate a reply with the same contents.
reply_msg_id = self.send_personal_message(
from_user=self.recipient,
to_user=self.sender,
content=body,
)
usermsg_ids = self._get_usermessage_ids([msg_id])
attachment_ids = list(
Attachment.objects.filter(messages__id=msg_id).values_list("id", flat=True),
)
self._assert_archive_empty()
# Archive one of the messages:
move_messages_to_archive(message_ids=[msg_id])
self._verify_archive_data([msg_id], usermsg_ids)
# Attachments shouldn't have been deleted, as the second message links to them:
self.assertEqual(Attachment.objects.count(), 5)
self.assertEqual(
set(ArchivedAttachment.objects.filter(messages__id=msg_id).values_list("id", flat=True)),
set(attachment_ids),
)
# Restore the first message:
restore_all_data_from_archive()
# Archive the second:
move_messages_to_archive(message_ids=[reply_msg_id])
# The restored messages links to the Attachments, so they shouldn't be deleted:
self.assertEqual(Attachment.objects.count(), 5)
# Archive the first message again:
move_messages_to_archive(message_ids=[msg_id])
# Now the attachment should have been deleted:
self.assertEqual(Attachment.objects.count(), 0)
# Restore everything:
restore_all_data_from_archive()
self.assertEqual(
set(Attachment.objects.filter(messages__id=msg_id).values_list("id", flat=True)),
set(attachment_ids),
)
class MoveMessageToArchiveWithSubMessages(MoveMessageToArchiveBase):
def test_archiving_message_with_submessages(self) -> None:
msg_id = self.send_stream_message(self.sender, "Verona")
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
do_add_submessage(
realm=get_realm('zulip'),
sender_id=cordelia.id,
message_id=msg_id,
msg_type='whatever',
content='{"name": "alice", "salary": 20}',
)
do_add_submessage(
realm=get_realm('zulip'),
sender_id=hamlet.id,
message_id=msg_id,
msg_type='whatever',
content='{"name": "john", "salary": 30}',
)
submessage_ids = list(
SubMessage.objects.filter(message_id=msg_id).values_list('id', flat=True),
)
self.assertEqual(SubMessage.objects.filter(id__in=submessage_ids).count(), 2)
move_messages_to_archive(message_ids=[msg_id])
self.assertEqual(
set(ArchivedSubMessage.objects.filter(message_id=msg_id).values_list("id", flat=True)),
set(submessage_ids),
)
self.assertEqual(SubMessage.objects.filter(id__in=submessage_ids).count(), 0)
restore_all_data_from_archive()
self.assertEqual(
set(SubMessage.objects.filter(id__in=submessage_ids).values_list('id', flat=True)),
set(submessage_ids),
)
class MoveMessageToArchiveWithReactions(MoveMessageToArchiveBase, EmojiReactionBase):
def test_archiving_message_with_reactions(self) -> None:
msg_id = self.send_stream_message(self.sender, "Verona")
self.post_zulip_reaction(msg_id, 'hamlet')
self.post_zulip_reaction(msg_id, 'cordelia')
reaction_ids = list(
Reaction.objects.filter(message_id=msg_id).values_list('id', flat=True),
)
self.assertEqual(Reaction.objects.filter(id__in=reaction_ids).count(), 2)
move_messages_to_archive(message_ids=[msg_id])
self.assertEqual(
set(ArchivedReaction.objects.filter(message_id=msg_id).values_list("id", flat=True)),
set(reaction_ids),
)
self.assertEqual(Reaction.objects.filter(id__in=reaction_ids).count(), 0)
restore_all_data_from_archive()
self.assertEqual(
set(Reaction.objects.filter(id__in=reaction_ids).values_list('id', flat=True)),
set(reaction_ids),
)
class TestCleaningArchive(ArchiveMessagesTestingBase):
def test_clean_archived_data(self) -> None:
self._make_expired_zulip_messages(7)
archive_messages(chunk_size=2) # Small chunk size to have multiple transactions
transactions = list(ArchiveTransaction.objects.all())
for transaction in transactions[0:-1]:
transaction.timestamp = timezone_now() - timedelta(
days=settings.ARCHIVED_DATA_VACUUMING_DELAY_DAYS + 1)
transaction.save()
message_ids_to_clean = list(ArchivedMessage.objects.filter(
archive_transaction__in=transactions[0:-1]).values_list('id', flat=True))
clean_archived_data()
remaining_transactions = list(ArchiveTransaction.objects.all())
self.assertEqual(len(remaining_transactions), 1)
# All transactions except the last one were deleted:
self.assertEqual(remaining_transactions[0].id, transactions[-1].id)
# And corresponding ArchivedMessages should have been deleted:
self.assertFalse(ArchivedMessage.objects.filter(id__in=message_ids_to_clean).exists())
self.assertFalse(ArchivedUserMessage.objects.filter(message_id__in=message_ids_to_clean).exists())
for message in ArchivedMessage.objects.all():
self.assertEqual(message.archive_transaction_id, remaining_transactions[0].id)
class TestGetRealmAndStreamsForArchiving(ZulipTestCase):
def fix_ordering_of_result(self, result: List[Tuple[Realm, List[Stream]]]) -> None:
"""
This is a helper for giving the struture returned by get_realms_and_streams_for_archiving
a consistent ordering.
"""
# Sort the list of tuples by realm id:
result.sort(key=lambda x: x[0].id)
# Now we sort the lists of streams in each tuple:
for realm, streams_list in result:
streams_list.sort(key=lambda stream: stream.id)
def simple_get_realms_and_streams_for_archiving(self) -> List[Tuple[Realm, List[Stream]]]:
"""
This is an implementation of the function we're testing, but using the obvious,
unoptimized algorithm. We can use this for additional verification of correctness,
by comparing the output of the two implementations.
"""
result = []
for realm in Realm.objects.all():
if realm.message_retention_days is not None:
streams = Stream.objects.filter(realm=realm).exclude(message_retention_days=-1)
result.append((realm, list(streams)))
else:
streams = Stream.objects.filter(realm=realm).exclude(message_retention_days__isnull=True) \
.exclude(message_retention_days=-1)
if streams.exists():
result.append((realm, list(streams)))
return result
def test_get_realms_and_streams_for_archiving(self) -> None:
zulip_realm = get_realm("zulip")
zulip_realm.message_retention_days = 10
zulip_realm.save()
verona = get_stream("Verona", zulip_realm)
verona.message_retention_days = -1 # Block archiving for this stream
verona.save()
denmark = get_stream("Denmark", zulip_realm)
denmark.message_retention_days = 1
denmark.save()
zephyr_realm = get_realm("zephyr")
zephyr_realm.message_retention_days = None
zephyr_realm.save()
self.make_stream("normal stream", realm=zephyr_realm)
archiving_blocked_zephyr_stream = self.make_stream("no archiving", realm=zephyr_realm)
archiving_blocked_zephyr_stream.message_retention_days = -1
archiving_blocked_zephyr_stream.save()
archiving_enabled_zephyr_stream = self.make_stream("with archiving", realm=zephyr_realm)
archiving_enabled_zephyr_stream.message_retention_days = 1
archiving_enabled_zephyr_stream.save()
Realm.objects.create(string_id="no_archiving", invite_required=False, message_retention_days=None)
empty_realm_with_archiving = Realm.objects.create(string_id="with_archiving", invite_required=False,
message_retention_days=1)
# We construct a list representing how the result of get_realms_and_streams_for_archiving should be.
# One nuisance is that the ordering of the elements in the result structure is not deterministic,
# so we use a helper to order both structures in a consistent manner. This wouldn't be necessary
# if python had a true "unordered list" data structure. Set doesn't do the job, because it requires
# elements to be hashable.
expected_result = [
(zulip_realm, list(Stream.objects.filter(realm=zulip_realm).exclude(id=verona.id))),
(zephyr_realm, [archiving_enabled_zephyr_stream]),
(empty_realm_with_archiving, []),
]
self.fix_ordering_of_result(expected_result)
simple_algorithm_result = self.simple_get_realms_and_streams_for_archiving()
self.fix_ordering_of_result(simple_algorithm_result)
result = get_realms_and_streams_for_archiving()
self.fix_ordering_of_result(result)
self.assert_length(result, len(expected_result))
self.assertEqual(result, expected_result)
self.assert_length(result, len(simple_algorithm_result))
self.assertEqual(result, simple_algorithm_result)
class TestDoDeleteMessages(ZulipTestCase):
def test_do_delete_messages_multiple(self) -> None:
realm = get_realm("zulip")
cordelia = self.example_user('cordelia')
message_ids = [self.send_stream_message(cordelia, "Denmark", str(i)) for i in range(0, 10)]
messages = Message.objects.filter(id__in=message_ids)
with queries_captured() as queries:
do_delete_messages(realm, messages)
self.assertFalse(Message.objects.filter(id__in=message_ids).exists())
self.assert_length(queries, 18)
archived_messages = ArchivedMessage.objects.filter(id__in=message_ids)
self.assertEqual(archived_messages.count(), len(message_ids))
self.assertEqual(len({message.archive_transaction_id for message in archived_messages}), 1)
def test_old_event_format_processed_correctly(self) -> None:
"""
do_delete_messages used to send events with users in dict format {"id": <int>}.
We have a block in process_notification to deal with that old format, that should be
deleted in a later release. This test is meant to ensure correctness of that block.
"""
realm = get_realm("zulip")
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
message_id = self.send_personal_message(cordelia, hamlet)
message = Message.objects.get(id=message_id)
event = {
'type': 'delete_message',
'sender': message.sender.email,
'sender_id': message.sender_id,
'message_id': message.id,
'message_type': "private",
'recipient_id': message.recipient_id,
}
move_messages_to_archive([message_id])
# We only send the event to see no exception is thrown - as it would be if the block
# in process_notification to handle this old format of "users to notify" wasn't correct.
send_event(realm, event, [{"id": cordelia.id}, {"id": hamlet.id}])
|
shubhamdhama/zulip
|
zerver/tests/test_retention.py
|
Python
|
apache-2.0
| 40,723
|
"""KitnIRC - A Python IRC Bot Framework."""
import logging
from kitnirc import client
from kitnirc import events
from kitnirc import modular
from kitnirc import user
__version__ = "0.3.1"
# Prevents output of "no handler found" if no other log handlers are added
_log = logging.getLogger("kitnirc")
_log.addHandler(logging.NullHandler())
__all__ = [
"client",
"events",
"modular",
"user",
]
# vim: set ts=4 sts=4 sw=4 et:
|
ayust/kitnirc
|
kitnirc/__init__.py
|
Python
|
mit
| 444
|
# -*- coding: utf-8 -*-
# Originally written by Kevin Breen (@KevTheHermit):
# https://github.com/kevthehermit/RATDecoders/blob/master/BlueBanana.py
import string
from zipfile import ZipFile
from io import StringIO
from Crypto.Cipher import AES
def decrypt_aes(key, data):
cipher = AES.new(key)
return cipher.decrypt(data)
def decrypt_conf(conFile):
key1 = '15af8sd4s1c5s511'
key2 = '4e3f5a4c592b243f'
first = decrypt_aes(key1, conFile.decode('hex'))
second = decrypt_aes(key2, first[:-16].decode('hex'))
return second
def extract_config(raw_conf):
conf = {}
clean = [x for x in raw_conf if x in string.printable]
fields = clean.split('<separator>')
conf['Domain'] = fields[0]
conf['Password'] = fields[1]
conf['Port1'] = fields[2]
conf['Port2'] = fields[3]
if len(fields) > 4:
conf['InstallName'] = fields[4]
conf['JarName'] = fields[5]
return conf
def config(data):
new_zip = StringIO(data)
with ZipFile(new_zip) as zip_handle:
for name in zip_handle.namelist():
# This file contains the encrypted config.
if name == 'config.txt':
conf_data = zip_handle.read(name)
if conf_data:
raw_conf = decrypt_conf(conf_data)
conf = extract_config(raw_conf)
return conf
|
kevthehermit/viper
|
viper/modules/rats/bluebanana.py
|
Python
|
bsd-3-clause
| 1,334
|
from openerp.tests.common import TransactionCase
class TestPropagateDocuments(TransactionCase):
def setUp(self):
super(TestPropagateDocuments, self).setUp()
self.Requisition = self.env['purchase.requisition']
self.requisition = self.Requisition.new({})
self.supplier = self.browse_ref('base.res_partner_1')
self.doc1 = self.browse_ref('purchase_transport_document.CMR')
self.doc2 = self.browse_ref(
'purchase_transport_document.bill_of_lading')
def test_it_propagates_no_documents(self):
order_data = self.Requisition._prepare_purchase_order(self.requisition,
self.supplier)
self.assertFalse(order_data.get('transport_document_ids'))
def test_it_propagates_one_document(self):
self.requisition.transport_document_ids = self.doc1
order_data = self.Requisition._prepare_purchase_order(self.requisition,
self.supplier)
self.assertEqual(
order_data['transport_document_ids'],
[(4, self.doc1.id)]
)
def test_it_propagates_two_documents(self):
self.requisition.transport_document_ids = self.doc1 | self.doc2
order_data = self.Requisition._prepare_purchase_order(self.requisition,
self.supplier)
self.assertEqual(
order_data['transport_document_ids'],
[(4, self.doc1.id), (4, self.doc2.id)]
)
|
SerpentCS/purchase-workflow
|
purchase_requisition_transport_document/tests/test_generate_po.py
|
Python
|
agpl-3.0
| 1,592
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os, base64, re
import hashlib
import mimetypes
from frappe.utils import get_site_path, get_hook_method, get_files_path, random_string, encode, cstr, call_hook_method, cint
from frappe import _
from frappe import conf
from copy import copy
import urllib
class MaxFileSizeReachedError(frappe.ValidationError): pass
def get_file_url(file_data_name):
data = frappe.db.get_value("File", file_data_name, ["file_name", "file_url"], as_dict=True)
return data.file_url or data.file_name
def upload():
# get record details
dt = frappe.form_dict.doctype
dn = frappe.form_dict.docname
folder = frappe.form_dict.folder
file_url = frappe.form_dict.file_url
filename = frappe.form_dict.filename
is_private = cint(frappe.form_dict.is_private)
if not filename and not file_url:
frappe.msgprint(_("Please select a file or url"),
raise_exception=True)
# save
if frappe.form_dict.filedata:
filedata = save_uploaded(dt, dn, folder, is_private)
elif file_url:
filedata = save_url(file_url, filename, dt, dn, folder, is_private)
comment = {}
if dt and dn:
comment = frappe.get_doc(dt, dn).add_comment("Attachment",
_("Added {0}").format("<a href='{file_url}' target='_blank'>{file_name}</a>{icon}".format(**{
"icon": ' <i class="icon icon-lock text-warning"></i>' if filedata.is_private else "",
"file_url": filedata.file_url.replace("#", "%23") if filedata.file_name else filedata.file_url,
"is_private": filedata.is_private,
"file_name": filedata.file_name or filedata.file_url
})))
return {
"name": filedata.name,
"file_name": filedata.file_name,
"file_url": filedata.file_url,
"is_private":filedata.is_private,
"comment": comment.as_dict() if comment else {}
}
def save_uploaded(dt, dn, folder, is_private):
fname, content = get_uploaded_content()
if content:
return save_file(fname, content, dt, dn, folder, is_private=is_private);
else:
raise Exception
def save_url(file_url, filename, dt, dn, folder, is_private):
# if not (file_url.startswith("http://") or file_url.startswith("https://")):
# frappe.msgprint("URL must start with 'http://' or 'https://'")
# return None, None
file_url = urllib.unquote(file_url)
f = frappe.get_doc({
"doctype": "File",
"file_url": file_url,
"file_name": filename,
"attached_to_doctype": dt,
"attached_to_name": dn,
"folder": folder,
"is_private": is_private
})
f.flags.ignore_permissions = True
try:
f.insert();
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_uploaded_content():
# should not be unicode when reading a file, hence using frappe.form
if 'filedata' in frappe.form_dict:
if "," in frappe.form_dict.filedata:
frappe.form_dict.filedata = frappe.form_dict.filedata.rsplit(",", 1)[1]
frappe.uploaded_content = base64.b64decode(frappe.form_dict.filedata)
frappe.uploaded_filename = frappe.form_dict.filename
return frappe.uploaded_filename, frappe.uploaded_content
else:
frappe.msgprint(_('No file attached'))
return None, None
def extract_images_from_doc(doc, fieldname):
content = doc.get(fieldname)
content = extract_images_from_html(doc, content)
if frappe.flags.has_dataurl:
doc.set(fieldname, content)
def extract_images_from_html(doc, content):
frappe.flags.has_dataurl = False
def _save_file(match):
data = match.group(1)
data = data.split("data:")[1]
headers, content = data.split(",")
if "filename=" in headers:
filename = headers.split("filename=")[-1]
# decode filename
if not isinstance(filename, unicode):
filename = unicode(filename, 'utf-8')
else:
mtype = headers.split(";")[0]
filename = get_random_filename(content_type=mtype)
doctype = doc.parenttype if doc.parent else doc.doctype
name = doc.parent or doc.name
# TODO fix this
file_url = save_file(filename, content, doctype, name, decode=True).get("file_url")
if not frappe.flags.has_dataurl:
frappe.flags.has_dataurl = True
return '<img src="{file_url}"'.format(file_url=file_url)
if content:
content = re.sub('<img[^>]*src\s*=\s*["\'](?=data:)(.*?)["\']', _save_file, content)
return content
def get_random_filename(extn=None, content_type=None):
if extn:
if not extn.startswith("."):
extn = "." + extn
elif content_type:
extn = mimetypes.guess_extension(content_type)
return random_string(7) + (extn or "")
def save_file(fname, content, dt, dn, folder=None, decode=False, is_private=0):
if decode:
if isinstance(content, unicode):
content = content.encode("utf-8")
if "," in content:
content = content.split(",")[1]
content = base64.b64decode(content)
file_size = check_max_file_size(content)
content_hash = get_content_hash(content)
content_type = mimetypes.guess_type(fname)[0]
fname = get_file_name(fname, content_hash[-6:])
file_data = get_file_data_from_hash(content_hash, is_private=is_private)
if not file_data:
call_hook_method("before_write_file", file_size=file_size)
write_file_method = get_hook_method('write_file', fallback=save_file_on_filesystem)
file_data = write_file_method(fname, content, content_type=content_type, is_private=is_private)
file_data = copy(file_data)
file_data.update({
"doctype": "File",
"attached_to_doctype": dt,
"attached_to_name": dn,
"folder": folder,
"file_size": file_size,
"content_hash": content_hash,
"is_private": is_private
})
f = frappe.get_doc(file_data)
f.flags.ignore_permissions = True
try:
f.insert()
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_file_data_from_hash(content_hash, is_private=0):
for name in frappe.db.sql_list("select name from `tabFile` where content_hash=%s and is_private=%s", (content_hash, is_private)):
b = frappe.get_doc('File', name)
return {k:b.get(k) for k in frappe.get_hooks()['write_file_keys']}
return False
def save_file_on_filesystem(fname, content, content_type=None, is_private=0):
fpath = write_file(content, fname, is_private)
if is_private:
file_url = "/private/files/{0}".format(fname)
else:
file_url = "/files/{0}".format(fname)
return {
'file_name': os.path.basename(fpath),
'file_url': file_url
}
def get_max_file_size():
return conf.get('max_file_size') or 10485760
def check_max_file_size(content):
max_file_size = get_max_file_size()
file_size = len(content)
if file_size > max_file_size:
frappe.msgprint(_("File size exceeded the maximum allowed size of {0} MB").format(
max_file_size / 1048576),
raise_exception=MaxFileSizeReachedError)
return file_size
def write_file(content, fname, is_private=0):
"""write file to disk with a random name (to compare)"""
file_path = get_files_path(is_private=is_private)
# create directory (if not exists)
frappe.create_folder(file_path)
# write the file
with open(os.path.join(file_path.encode('utf-8'), fname.encode('utf-8')), 'w+') as f:
f.write(content)
return get_files_path(fname, is_private=is_private)
def remove_all(dt, dn):
"""remove all files in a transaction"""
try:
for fid in frappe.db.sql_list("""select name from `tabFile` where
attached_to_doctype=%s and attached_to_name=%s""", (dt, dn)):
remove_file(fid, dt, dn)
except Exception, e:
if e.args[0]!=1054: raise # (temp till for patched)
def remove_file_by_url(file_url, doctype=None, name=None):
if doctype and name:
fid = frappe.db.get_value("File", {"file_url": file_url,
"attached_to_doctype": doctype, "attached_to_name": name})
else:
fid = frappe.db.get_value("File", {"file_url": file_url})
if fid:
return remove_file(fid)
def remove_file(fid, attached_to_doctype=None, attached_to_name=None):
"""Remove file and File entry"""
file_name = None
if not (attached_to_doctype and attached_to_name):
attached = frappe.db.get_value("File", fid,
["attached_to_doctype", "attached_to_name", "file_name"])
if attached:
attached_to_doctype, attached_to_name, file_name = attached
ignore_permissions, comment = False, None
if attached_to_doctype and attached_to_name:
doc = frappe.get_doc(attached_to_doctype, attached_to_name)
ignore_permissions = doc.has_permission("write") or False
if frappe.flags.in_web_form:
ignore_permissions = True
if not file_name:
file_name = frappe.db.get_value("File", fid, "file_name")
comment = doc.add_comment("Attachment Removed", _("Removed {0}").format(file_name))
frappe.delete_doc("File", fid, ignore_permissions=ignore_permissions)
return comment
def delete_file_data_content(doc, only_thumbnail=False):
method = get_hook_method('delete_file_data_content', fallback=delete_file_from_filesystem)
method(doc, only_thumbnail=only_thumbnail)
def delete_file_from_filesystem(doc, only_thumbnail=False):
"""Delete file, thumbnail from File document"""
if only_thumbnail:
delete_file(doc.thumbnail_url)
else:
delete_file(doc.file_url)
delete_file(doc.thumbnail_url)
def delete_file(path):
"""Delete file from `public folder`"""
if path:
if ".." in path.split("/"):
frappe.msgprint(_("It is risky to delete this file: {0}. Please contact your System Manager.").format(path))
parts = os.path.split(path.strip("/"))
if parts[0]=="files":
path = frappe.utils.get_site_path("public", "files", parts[-1])
else:
path = frappe.utils.get_site_path("private", "files", parts[-1])
path = encode(path)
if os.path.exists(path):
os.remove(path)
def get_file(fname):
"""Returns [`file_name`, `content`] for given file name `fname`"""
file_path = get_file_path(fname)
# read the file
with open(encode(file_path), 'r') as f:
content = f.read()
return [file_path.rsplit("/", 1)[-1], content]
def get_file_path(file_name):
"""Returns file path from given file name"""
f = frappe.db.sql("""select file_url from `tabFile`
where name=%s or file_name=%s""", (file_name, file_name))
if f:
file_name = f[0][0]
file_path = file_name
if "/" not in file_path:
file_path = "/files/" + file_path
if file_path.startswith("/private/files/"):
file_path = get_files_path(*file_path.split("/private/files/", 1)[1].split("/"), is_private=1)
elif file_path.startswith("/files/"):
file_path = get_files_path(*file_path.split("/files/", 1)[1].split("/"))
else:
frappe.throw(_("There is some problem with the file url: {0}").format(file_path))
return file_path
def get_content_hash(content):
return hashlib.md5(content).hexdigest()
def get_file_name(fname, optional_suffix):
# convert to unicode
fname = cstr(fname)
n_records = frappe.db.sql("select name from `tabFile` where file_name=%s", fname)
if len(n_records) > 0 or os.path.exists(encode(get_files_path(fname))):
f = fname.rsplit('.', 1)
if len(f) == 1:
partial, extn = f[0], ""
else:
partial, extn = f[0], "." + f[1]
return '{partial}{suffix}{extn}'.format(partial=partial, extn=extn, suffix=optional_suffix)
return fname
|
indautgrp/frappe
|
frappe/utils/file_manager.py
|
Python
|
mit
| 11,027
|
#!/usr/bin/python
# Copyright (c) 2015 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import re
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_flavor_facts
short_description: Retrieve facts about one or more flavors
author: "David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Retrieve facts about available OpenStack instance flavors. By default,
facts about ALL flavors are retrieved. Filters can be applied to get
facts for only matching flavors. For example, you can filter on the
amount of RAM available to the flavor, or the number of virtual CPUs
available to the flavor, or both. When specifying multiple filters,
*ALL* filters must match on a flavor before that flavor is returned as
a fact.
notes:
- This module creates a new top-level C(openstack_flavors) fact, which
contains a list of unsorted flavors.
requirements:
- "python >= 2.6"
- "shade"
options:
name:
description:
- A flavor name. Cannot be used with I(ram) or I(vcpus).
required: false
default: None
ram:
description:
- "A string used for filtering flavors based on the amount of RAM
(in MB) desired. This string accepts the following special values:
'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
(return flavors with the maximum amount of RAM)."
- "A specific amount of RAM may also be specified. Any flavors with this
exact amount of RAM will be returned."
- "A range of acceptable RAM may be given using a special syntax. Simply
prefix the amount of RAM with one of these acceptable range values:
'<', '>', '<=', '>='. These values represent less than, greater than,
less than or equal to, and greater than or equal to, respectively."
required: false
default: false
vcpus:
description:
- A string used for filtering flavors based on the number of virtual
CPUs desired. Format is the same as the I(ram) parameter.
required: false
default: false
limit:
description:
- Limits the number of flavors returned. All matching flavors are
returned by default.
required: false
default: None
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all available flavors
- os_flavor_facts:
cloud: mycloud
# Gather facts for the flavor named "xlarge-flavor"
- os_flavor_facts:
cloud: mycloud
name: "xlarge-flavor"
# Get all flavors that have exactly 512 MB of RAM.
- os_flavor_facts:
cloud: mycloud
ram: "512"
# Get all flavors that have 1024 MB or more of RAM.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
# option will guarantee only a single flavor is returned.
- os_flavor_facts:
cloud: mycloud
ram: "MIN"
limit: 1
# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
'''
RETURN = '''
openstack_flavors:
description: Dictionary describing the flavors.
returned: On success.
type: dictionary
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
ram=dict(required=False, default=None),
vcpus=dict(required=False, default=None),
limit=dict(required=False, default=None, type='int'),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'ram'],
['name', 'vcpus'],
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
vcpus = module.params['vcpus']
ram = module.params['ram']
limit = module.params['limit']
try:
cloud = shade.openstack_cloud(**module.params)
if name:
flavors = cloud.search_flavors(filters={'name': name})
else:
flavors = cloud.list_flavors()
filters = {}
if vcpus:
filters['vcpus'] = vcpus
if ram:
filters['ram'] = ram
if filters:
flavors = cloud.range_search(flavors, filters)
if limit is not None:
flavors = flavors[:limit]
module.exit_json(changed=False,
ansible_facts=dict(openstack_flavors=flavors))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
ritzk/ansible-modules-extras
|
cloud/openstack/os_flavor_facts.py
|
Python
|
gpl-3.0
| 6,598
|
import zlib as _zlib
from .abc import Codec
from .compat import ndarray_copy, ensure_contiguous_ndarray
class Zlib(Codec):
"""Codec providing compression using zlib via the Python standard library.
Parameters
----------
level : int
Compression level.
"""
codec_id = 'zlib'
def __init__(self, level=1):
self.level = level
def encode(self, buf):
# normalise inputs
buf = ensure_contiguous_ndarray(buf)
# do compression
return _zlib.compress(buf, self.level)
# noinspection PyMethodMayBeStatic
def decode(self, buf, out=None):
# normalise inputs
buf = ensure_contiguous_ndarray(buf)
if out is not None:
out = ensure_contiguous_ndarray(out)
# do decompression
dec = _zlib.decompress(buf)
# handle destination - Python standard library zlib module does not
# support direct decompression into buffer, so we have to copy into
# out if given
return ndarray_copy(dec, out)
|
zarr-developers/numcodecs
|
numcodecs/zlib.py
|
Python
|
mit
| 1,052
|
__version__ = '0.1.0'
default_app_config = 'quill.apps.QuillConfig'
|
gsmke/django-quill
|
quill/__init__.py
|
Python
|
bsd-3-clause
| 68
|
from .distances import *
from .staticmaps import *
|
ercas/route_distances
|
route_distances/__init__.py
|
Python
|
mit
| 51
|
# -*- coding: utf-8 -*-
# Copyright (c) 2007 The PIDA Project
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
def any_f():
pass
def any_f2(a):
pass
@any_f2
def dec_f(a):
pass
dec_f = any_f2(dec_f)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
fermat618/pida
|
pida-plugins/python/test_python.py
|
Python
|
gpl-2.0
| 1,271
|
from django.db import models
class Municipality(models.Model):
''' Gemeinde '''
key = models.CharField(max_length=8) # Gemeindeschluessel
name = models.CharField(max_length=200) # Anschrift
city = models.CharField(max_length=200) # Ort
zipcode = models.CharField(max_length=5) # PLZ
street = models.CharField(max_length=200)
lat = models.CharField(max_length=32) # Breitengrad
lng = models.CharField(max_length=32) # Laengengrad
state = models.CharField(max_length=32) # Bundesland
def __unicode__(self):
return self.name
class Meta(object):
ordering = ('key',)
class RegistrationOffice(models.Model):
''' Einwohnermeldeamt '''
name = models.CharField(max_length=200) # Anschrift
city = models.CharField(max_length=200) # Ort
zipcode = models.CharField(max_length=5) # PLZ
street = models.CharField(max_length=200)
lat = models.CharField(max_length=32) # Breitengrad
lng = models.CharField(max_length=32) # Laengengrad
state = models.CharField(max_length=32) # Bundesland
def __unicode__(self):
return self.name
class Meta(object):
ordering = ('city',)
class Zipcode(models.Model):
''' Postleitzahl '''
zipcode = models.CharField(max_length=5)
municipalities = models.ManyToManyField(
Municipality, related_name='zipcodes')
registrationoffices = models.ManyToManyField(
RegistrationOffice, related_name='zipcodes')
def __unicode__(self):
return self.zipcode
class Meta(object):
ordering = ('zipcode',)
|
jbspeakr/datensparsam
|
dtnsprsm/apps/api/models.py
|
Python
|
mit
| 1,599
|
from django.apps import AppConfig
from watson import search as watson
class ItemsAppConfig(AppConfig):
name = "items"
def ready(self):
ItemModel = self.get_model("ItemPost")
watson.register(ItemModel, fields=["title", "booklist_set__bookname", ])
|
deadlylaid/book_connect
|
wef/items/apps.py
|
Python
|
mit
| 275
|
"""
WSGI config for pjlong_home project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pjlong_home.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
pjlong/pjlong-home
|
pjlong_home/wsgi.py
|
Python
|
mit
| 397
|
class Solution:
def countAndSay(self, n):
arr = ["1"]
for i in range(2, n):
prev = arr[i-2]
res = ""
for x in prev:
arr.append(res)
return arr[n-1]
|
zuun77/givemegoogletshirts
|
leetcode/python/38_count-and-say.py
|
Python
|
apache-2.0
| 226
|
""""
Module with an abstract factory class.
"""
from abc import ABCMeta, abstractmethod
from abs_data import AbsData
class AbsFileParserFactory(metaclass=ABCMeta):
""""
Factory class.
"""
@abstractmethod
def create_data(self, identifier: str, content: str) -> AbsData:
"""
Instantiates an object derived from the AbsData class.
"""
pass
|
fpbfabio/dblp_data_processing
|
abs_file_parser_factory.py
|
Python
|
apache-2.0
| 394
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Giorgio Casaburi and Greg Caporaso"
__copyright__ = "Copyright 2013, The miMAP project"
__credits__ = "Giorgio Casaburi", "Greg Caporaso"
__license__ = "GPL"
__version__ = "0.0.0-dev"
__maintainer__ = "Giorgio Casaburi"
__email__ = "casaburi@ceinge.unina.it"
from shutil import rmtree
from os.path import join, split, splitext, exists
from glob import glob
from tempfile import gettempdir
from pyqi.util import pyqi_system_call, remove_files
from pyqi.core.command import (Command, CommandIn, CommandOut,
ParameterCollection)
class biomtocorediversityanalyses(Command):
BriefDescription = "This command allows to run core diversity analysis using as input a biom table (i.e. output from fasta_to_closed_reference_otu_picking.py script)"
LongDescription = "A command for running core diversity analyses in order to obtain the alpha and beta diversity using a miRNAs biom table as input. Alpha diversity is performed with observed species metric while the beta diversity with Bray-curtis metric. THIS CODE IS CURRENTLY UNTESTED. YOU SHOULD NOT USE THIS VERSION OF THE CODE. THIS MESSAGE WILL BE REMOVED WHEN TESTS ARE ADDED."
CommandIns = ParameterCollection([
CommandIn(Name='input_file', DataType=str,
Description='directory containing the input biom table', Required=True),
CommandIn(Name='output_dir', DataType=str,
Description='the path where the output of core diversity analysis should be written', Required=True),
CommandIn(Name='mapping_file', DataType=str,
Description='the path where the mapping file is located', Required=True),
CommandIn(Name='sampling_depth', DataType=int,
Description='Sequencing depth to use for even sub-sampling and maximum rarefaction depth. You should review the output of print_biom_table_summary.py on the miRNAs biom table to decide on this value', Required=True),
CommandIn(Name='jobs_to_start', DataType=int,
Description='the number of jobs you want to run in parallel', Default=1),
CommandIn(Name='category', DataType=str,
Description='The metadata category or categories to compare (i.e. column headers in the mapping file)', Required = False)
])
CommandOuts = ParameterCollection([
CommandOut(Name='status', DataType=str,
Description='the final result'),
CommandOut(Name='error', DataType=str,
Description='the error result')
])
# Qiime is required to be installed by the User so that every scripts can be called in the command line within the User $HOME.
# Scripts included in Qiime
core_diversity_analyses_path = "core_diversity_analyses.py"
# Temporary folder to store the files:
temp_dir = gettempdir()
verbose = True
def run(self, **kwargs):
input_fp = kwargs['input_file']
output_dir = kwargs['output_dir']
#Mapping file
mapping_file_fp = kwargs['mapping_file']
input_mapping_file_pattern = join(mapping_file_fp,'mapping_file.txt')
temp_files_to_remove = []
temp_dirs_to_remove = []
input_filename = split(input_fp)[1]
input_basename = splitext(input_filename)[0]
#Create and call the core_diversity_analysis.py command and run it using a miRNAs biom table
command = "%s -i %s -m %s -e %s -o %s -a -O %s -c %s --suppress_otu_category_significance --nonphylogenetic_diversity" % (self.core_diversity_analyses_path, input_fp, mapping_file_fp, int(kwargs["sampling_depth"]), output_dir, int(kwargs["jobs_to_start"]), str(kwargs["category"]))
if self.verbose:
print command
stdout, stderr, ret_val = pyqi_system_call(command)
if ret_val != 0:
return {"status":ret_val,
"error":stderr}
# clean up (to do)
CommandConstructor = biomtocorediversityanalyses
|
gregcaporaso/miMAP
|
biom_to_core_diversity_analyses.py
|
Python
|
bsd-3-clause
| 4,207
|
##########################################################################
# Ganga Project. http://cern.ch/ganga
#
# $Id$
##########################################################################
from GangaCore.Core.exceptions import ApplicationConfigurationError
from GangaCore.GPIDev.Base.Proxy import isType
from GangaCore.GPIDev.Schema import Schema, Version, SimpleItem
from GangaCore.GPIDev.Adapters.ISplitter import ISplitter
from GangaCore.GPIDev.Lib.Dataset import GangaDataset
class GangaDatasetSplitter(ISplitter):
""" Split job based on files given in GangaDataset inputdata field """
_name = "GangaDatasetSplitter"
_schema = Schema(Version(1, 0), {
'files_per_subjob': SimpleItem(defvalue=5, doc='the number of files per subjob', typelist=[int]),
'maxFiles': SimpleItem(defvalue=-1,
doc='Maximum number of files to use in a masterjob (None or -1 = all files)',
typelist=[int, None]),
})
def split(self, job):
subjobs = []
if not job.inputdata or not isType(job.inputdata, GangaDataset):
raise ApplicationConfigurationError(
"No GangaDataset given for GangaDatasetSplitter")
# find the full file list
full_list = []
for f in job.inputdata.files:
if f.containsWildcards():
# we have a wildcard so grab the subfiles
for sf in f.getSubFiles(process_wildcards=True):
full_list.append(sf)
else:
# no wildcards so just add the file
full_list.append(f)
if len(full_list) == 0:
raise ApplicationConfigurationError(
"GangaDatasetSplitter couldn't find any files to split over")
masterType = type(job.inputdata)
# split based on all the sub files
fid = 0
subjobs = []
filesToRun = len(full_list)
if not self.maxFiles == -1:
filesToRun = min(self.maxFiles, filesToRun)
while fid < filesToRun:
j = self.createSubjob(job)
j.inputdata = masterType()
j.inputdata.treat_as_inputfiles = job.inputdata.treat_as_inputfiles
for sf in full_list[fid:fid + self.files_per_subjob]:
j.inputdata.files.append(sf)
fid += self.files_per_subjob
subjobs.append(j)
return subjobs
|
ganga-devs/ganga
|
ganga/GangaCore/Lib/Splitters/GangaDatasetSplitter.py
|
Python
|
gpl-3.0
| 2,451
|
#! -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import errno
import hashlib
import json
import os
import shutil
import tempfile as sys_tempfile
import unittest
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import MultiPartParser, parse_header
from django.test import TestCase, client
from django.test import override_settings
from django.utils.encoding import force_bytes
from django.utils.http import urlquote
from django.utils.six import BytesIO, StringIO
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
MEDIA_ROOT = sys_tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload')
@override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE_CLASSES=())
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super(FileUploadTests, cls).setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super(FileUploadTests, cls).tearDownClass()
def test_simple_upload(self):
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
response = self.client.post('/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
tdir = tempfile.gettempdir()
file = tempfile.NamedTemporaryFile
with file(suffix=".file1", dir=tdir) as file1, file(suffix=".file2", dir=tdir) as file2:
file1.write(b'a' * (2 ** 21))
file1.seek(0)
file2.write(b'a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in list(post_data):
try:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = hashlib.sha1(force_bytes(post_data[key])).hexdigest()
response = self.client.post('/verify/', post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
'Content-Type: application/octet-stream',
'Content-Transfer-Encoding: base64',
'']))
payload.write(b"\r\n" + encode(force_bytes(content)) + b"\r\n")
payload.write('--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo_content/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['file'], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload(
"Big data" * 68000, encode=base64.encodestring)
def test_unicode_file_name(self):
tdir = sys_tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tdir, True)
# This file contains chinese symbols and an accented char in the name.
with open(os.path.join(tdir, UNICODE_FILENAME), 'w+b') as file1:
file1.write(b'b' * (2 ** 10))
file1.seek(0)
post_data = {
'file_unicode': file1,
}
response = self.client.post('/unicode_name/', post_data)
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % urlquote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % urlquote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-syle.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = client.FakePayload()
for i, name in enumerate(scary_file_names):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
received = json.loads(response.content.decode('utf-8'))
for i, name in enumerate(scary_file_names):
got = received["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = 'f' * 300
cases = [
# field name, filename, expected
('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]),
('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]),
('no_extension', long_str, long_str[:255]),
('no_filename', '.%s' % long_str, '.%s' % long_str[:254]),
('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]),
]
payload = client.FakePayload()
for name, filename, _ in cases:
payload.write("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="{}"; filename="{}"',
'Content-Type: application/octet-stream',
'',
'Oops.',
''
]).format(name, filename))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
result = json.loads(response.content.decode('utf-8'))
for name, _, expected in cases:
got = result[name]
self.assertEqual(expected, got, 'Mismatch for {}'.format(name))
self.assertLess(len(got), 256,
"Got a long file name (%s characters)." % len(got))
def test_file_content(self):
tdir = tempfile.gettempdir()
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra", dir=tdir) as no_content_type, \
file(suffix=".ctype_extra", dir=tdir) as simple_file:
no_content_type.write(b'no content')
no_content_type.seek(0)
simple_file.write(b'text content')
simple_file.seek(0)
simple_file.content_type = 'text/plain'
string_io = StringIO('string content')
bytes_io = BytesIO(b'binary content')
response = self.client.post('/echo_content/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
'string': string_io,
'binary': bytes_io,
})
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['no_content_type'], 'no content')
self.assertEqual(received['simple_file'], 'text content')
self.assertEqual(received['string'], 'string content')
self.assertEqual(received['binary'], 'binary content')
def test_content_type_extra(self):
"""Uploaded files may have content type parameters available."""
tdir = tempfile.gettempdir()
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra", dir=tdir) as no_content_type, file(suffix=".ctype_extra", dir=tdir) as simple_file:
no_content_type.write(b'something')
no_content_type.seek(0)
simple_file.write(b'something')
simple_file.seek(0)
simple_file.content_type = 'text/plain; test-key=test_value'
response = self.client.post('/echo_content_type_extra/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
})
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['no_content_type'], {})
self.assertEqual(received['simple_file'], {'test-key': 'test_value'})
def test_truncated_multipart_handled_gracefully(self):
"""
If passed an incomplete multipart message, MultiPartParser does not
attempt to read beyond the end of the stream, and simply will handle
the part that can be parsed gracefully.
"""
payload_str = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="foo.txt"',
'Content-Type: application/octet-stream',
'',
'file contents'
'--' + client.BOUNDARY + '--',
'',
])
payload = client.FakePayload(payload_str[:-10])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
got = json.loads(self.client.request(**r).content.decode('utf-8'))
self.assertEqual(got, {})
def test_empty_multipart_handled_gracefully(self):
"""
If passed an empty multipart message, MultiPartParser will return
an empty QueryDict.
"""
r = {
'CONTENT_LENGTH': 0,
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(b''),
}
got = json.loads(self.client.request(**r).content.decode('utf-8'))
self.assertEqual(got, {})
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
smallfile.write(b'a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile.write(b'a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
response = self.client.post('/quota/', {'f': smallfile})
got = json.loads(response.content.decode('utf-8'))
self.assertIn('f', got)
# Large files don't go through.
response = self.client.post("/quota/", {'f': bigfile})
got = json.loads(response.content.decode('utf-8'))
self.assertNotIn('f', got)
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
file.write(b'a' * (2 ** 21))
file.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
self.assertRaises(
AttributeError,
self.client.post,
'/quota/broken/',
{'f': file}
)
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
file1.write(b'a' * (2 ** 23))
file1.seek(0)
file2.write(b'a' * (2 * 2 ** 18))
file2.seek(0)
file2a.write(b'a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/getlist_count/', {
'file1': file1,
'field1': 'test',
'field2': 'test3',
'field3': 'test5',
'field4': 'test6',
'field5': 'test7',
'file2': (file2, file2a)
})
got = json.loads(response.content.decode('utf-8'))
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_fileuploads_closed_at_request_end(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/t/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# Check that the files got actually parsed.
self.assertTrue(hasattr(request, '_files'))
file = request._files['file']
self.assertTrue(file.closed)
files = request._files.getlist('file2')
self.assertTrue(files[0].closed)
self.assertTrue(files[1].closed)
def test_no_parsing_triggered_by_fd_closing(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/f/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# Check that the fd closing logic doesn't trigger parsing of the stream
self.assertFalse(hasattr(request, '_files'))
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super(POSTAccessingHandler, self).handle_uncaught_exception(request, resolver, exc_info)
request.POST # evaluate
return ret
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload(b'a').read(2)
except Exception as err:
reference_error = err
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
try:
self.client.post('/upload_errors/', post_data)
except reference_error.__class__ as err:
self.assertFalse(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception as err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
def test_filename_case_preservation(self):
"""
The storage backend shouldn't mess with the case of the filenames
uploaded.
"""
# Synthesize the contents of a file upload with a mixed case filename
# so we don't have to carry such a file in the Django tests source code
# tree.
vars = {'boundary': 'oUrBoUnDaRyStRiNg'}
post_data = [
'--%(boundary)s',
'Content-Disposition: form-data; name="file_field"; filename="MiXeD_cAsE.txt"',
'Content-Type: application/octet-stream',
'',
'file contents\n'
'',
'--%(boundary)s--\r\n',
]
response = self.client.post(
'/filename_case/',
'\r\n'.join(post_data) % vars,
'multipart/form-data; boundary=%(boundary)s' % vars
)
self.assertEqual(response.status_code, 200)
id = int(response.content)
obj = FileModel.objects.get(pk=id)
# The name of the file uploaded and the file stored in the server-side
# shouldn't differ.
self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt')
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class DirectoryCreationTests(TestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
@classmethod
def setUpClass(cls):
super(DirectoryCreationTests, cls).setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super(DirectoryCreationTests, cls).tearDownClass()
def setUp(self):
self.obj = FileModel()
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(MEDIA_ROOT, 0o500)
self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
try:
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'))
except OSError as err:
self.assertEqual(err.errno, errno.EACCES)
except Exception:
self.fail("OSError [Errno %s] not raised." % errno.EACCES)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
open(UPLOAD_TO, 'wb').close()
self.addCleanup(os.remove, UPLOAD_TO)
with self.assertRaises(IOError) as exc_info:
with SimpleUploadedFile('foo.txt', b'x') as file:
self.obj.testfile.save('foo.txt', file)
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEqual(exc_info.exception.args[0],
"%s exists and is not a directory." % UPLOAD_TO)
class MultiParserTests(unittest.TestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
def test_rfc2231_parsing(self):
test_data = (
(b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***"),
(b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html"),
(b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
|
iambibhas/django
|
tests/file_uploads/tests.py
|
Python
|
bsd-3-clause
| 22,802
|
import sys
from ctypes import create_string_buffer
from ._libsoc import (
BITS_8, BITS_16, BPW_ERROR,
MODE_0, MODE_1, MODE_2, MODE_3, MODE_ERROR, api
)
PY3 = sys.version_info >= (3, 0)
class SPI(object):
def __init__(self, spidev_device, chip_select, mode, speed, bpw):
if not isinstance(spidev_device, int):
raise TypeError('Invalid spi device id must be an "int"')
if not isinstance(chip_select, int):
raise TypeError('Invalid spi chip select must be an "int"')
if mode not in (MODE_0, MODE_1, MODE_2, MODE_3):
raise ValueError('Invalid mode: %d' % mode)
if not isinstance(speed, int):
raise TypeError('Invalid speed must be an "int"')
if bpw not in (BITS_8, BITS_16):
raise ValueError('Invalid bits per word: %d' % bpw)
self.device = spidev_device
self.chip = chip_select
self.mode = mode
self.speed = speed
self.bpw = bpw
self._spi = None
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def open(self):
assert self._spi is None
self._spi = api.libsoc_spi_init(self.device, self.chip)
if self._spi == 0:
raise IOError('Unable to open spi device(%d)' % self.device)
self.set_mode(self.mode)
if self.get_mode() != self.mode:
raise IOError('Set mode incorrectly')
self.set_speed(self.speed)
if self.get_speed() != self.speed:
raise IOError('Set speed incorrectly')
self.set_bits_per_word(self.bpw)
if self.get_bits_per_word() != self.bpw:
raise IOError('Set bits per word incorrectly')
def close(self):
if self._spi:
api.libsoc_spi_free(self._spi)
self._spi = None
def set_debug(enabled):
v = 0
if enabled:
v = 1
api.libsoc_set_debug(v)
def set_bits_per_word(self, bpw):
if bpw not in (BITS_8, BITS_16):
raise ValueError('Invalid bits per word: %d' % bpw)
self.bpw = bpw
api.libsoc_spi_set_bits_per_word(self._spi, self.bpw)
def get_bits_per_word(self):
b = api.libsoc_spi_get_bits_per_word(self._spi)
if b == BPW_ERROR:
raise IOError('bits per word not recognized')
return b
def set_mode(self, mode):
assert self._spi is not None
if mode not in (MODE_0, MODE_1, MODE_2, MODE_3):
raise ValueError('Invalid mode: %d' % mode)
self.mode = mode
api.libsoc_spi_set_mode(self._spi, self.mode)
def get_mode(self):
m = api.libsoc_spi_get_mode(self._spi)
if m == MODE_ERROR:
raise IOError('mode not recognized')
return m
def set_speed(self, speed):
if not isinstance(speed, int):
raise TypeError('Invalid speed must be an "int"')
self.speed = speed
api.libsoc_spi_set_speed(self._spi, self.speed)
def get_speed(self):
s = api.libsoc_spi_get_speed(self._spi)
if s == -1:
raise IOError('failed reading speed')
return s
def read(self, num_bytes):
assert num_bytes > 0
buff = create_string_buffer(num_bytes)
if api.libsoc_spi_read(self._spi, buff, num_bytes) == -1:
raise IOError('Error reading spi device')
return buff.raw
def write(self, byte_array):
assert len(byte_array) > 0
if PY3:
buff = bytes(byte_array)
else:
buff = ''.join(map(chr, byte_array))
api.libsoc_spi_write(self._spi, buff, len(buff))
def rw(self, num_bytes, byte_array):
assert num_bytes > 0
assert len(byte_array) > 0
rbuff = create_string_buffer(num_bytes)
if PY3:
wbuff = bytes(byte_array)
else:
wbuff = ''.join(map(chr, byte_array))
if api.libsoc_spi_rw(self._spi, wbuff, rbuff, num_bytes) != 0:
raise IOError('Error rw spi device')
return rbuff.raw
|
janick/libsoc
|
bindings/python/spi.py
|
Python
|
lgpl-2.1
| 4,256
|
import hashlib
import num.elip as elip
import num.enc as enc
def clockbase():
"""
256 bit hex: 4 x 16 byte long from float using clock (process time) + time (UTC epoch time)
Note: not enough clock precision on Linuxes to be unique between two immediate calls
"""
from struct import pack
from time import time, clock
return pack('<dddd', clock(), time(), clock(), time()).encode('hex')
def clockrnd():
"""
512 bit int: random delay while hashing data,
return result of 192-1725 time-based hashes.
execution time on 2.8GHz Core2: 1.8-15.7ms
"""
loopcount = 64 + int(hashlib.sha256(clockbase()).hexdigest()[:3], 16)/8 # 64-575 loops, random
hash1 = hash2 = int(clockbase()+clockbase(), 16)
for i in xrange(loopcount):
hash1 ^= int(hashlib.sha512(clockbase() + hashlib.sha512(clockbase()).hexdigest()).hexdigest(), 16)
hash2 ^= int(hashlib.sha512((hex(hash1)) + ('%d' % hash1)).hexdigest(), 16)
return hash1 ^ hash2
def entropy(entropy):
"""
512 bit random number from mouse co-ords and timer
"""
hashes = clockrnd()
x = []
y = []
for coord in entropy:
hashes ^= clockrnd()
for char in str(coord[0]):
x.append(char)
for char in str(coord[1]):
y.append(char)
hashes ^= clockrnd()
mouse = enc.sxor(x,y)
return hashes ^ int(hashlib.sha512(str(mouse)*8).hexdigest(), 16)
def randomKey(entropy):
"""
256 bit number from equally strong urandom, user entropy, and timer parts
"""
if entropy.bit_length() < 250:
print('Insufficient entropy parameter to generate key')
return False
from random import SystemRandom
osrndi = SystemRandom()
entstr = enc.encode(entropy, 16) + enc.encode(osrndi.getrandbits(512), 256) + str(clockrnd())
osrnd = SystemRandom(entstr)
privkey = 0
while privkey < 1 or privkey > elip.N:
privkey = enc.decode(hashlib.sha256(enc.encode(osrnd.getrandbits(512), 256)).digest(), 256) ^ osrnd.getrandbits(256)
for lbit in xrange(clockrnd() % 64 + 64):
clockstr = hex(clockrnd()) + str(clockrnd()) + entstr
# Slice a moving 256 bit window out of SHA512
clock32 = hashlib.sha512(clockstr).digest()[1+(lbit % 29): 33+(lbit % 29)]
randhash = hashlib.sha512(enc.encode(osrnd.getrandbits(512), 256)).digest()[0+(lbit % 31): 32+(lbit % 31)]
privkey ^= enc.decode(randhash, 256) ^ enc.decode(clock32, 256) ^ osrndi.getrandbits(256)
osrnd = SystemRandom(hashlib.sha512(clock32 + randhash + entstr).digest()) # reseed
return privkey
|
inuitwallet/bippy
|
num/rand.py
|
Python
|
mit
| 2,418
|
#!/usr/bin/env python3
import json
import fileinput
import argparse
import os
import time
from batman import batman
from alfred import alfred
from rrddb import rrd
from nodedb import NodeDB
from d3mapbuilder import D3MapBuilder
# Force encoding to UTF-8
import locale # Ensures that subsequent open()s
locale.getpreferredencoding = lambda _=None: 'UTF-8' # are UTF-8 encoded.
import sys
#sys.stdin = open('/dev/stdin', 'r')
#sys.stdout = open('/dev/stdout', 'w')
#sys.stderr = open('/dev/stderr', 'w')
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aliases',
help='read aliases from FILE',
action='append',
metavar='FILE')
parser.add_argument('-m', '--mesh', action='append',
help='batman mesh interface')
parser.add_argument('-A', '--alfred', action='store_true',
help='retrieve aliases from alfred')
parser.add_argument('-d', '--destination-directory', action='store',
help='destination directory for generated files',required=True)
args = parser.parse_args()
options = vars(args)
db = NodeDB(int(time.time()))
if options['mesh']:
for mesh_interface in options['mesh']:
bm = batman(mesh_interface)
db.parse_vis_data(bm.vis_data(options['alfred']))
for gw in bm.gateway_list():
db.mark_gateways(gw['mac'])
else:
bm = batman()
db.parse_vis_data(bm.vis_data(batadv_vis=True))
for gw in bm.gateway_list():
db.mark_gateways([gw['mac']])
if options['aliases']:
for aliases in options['aliases']:
db.import_aliases(json.load(open(aliases)))
if options['alfred']:
af = alfred()
db.import_aliases(af.aliases())
db.load_state("state.json")
# remove nodes that have been offline for more than 10 days
db.prune_offline(time.time() - 10*86400)
db.add_alfred_versions()
db.dump_state("state.json")
scriptdir = os.path.dirname(os.path.realpath(__file__))
m = D3MapBuilder(db)
#Write nodes json
nodes_json = open(options['destination_directory'] + '/nodes.json.new','w')
nodes_json.write(m.build())
nodes_json.close()
#Move to destination
os.rename(options['destination_directory'] + '/nodes.json.new',options['destination_directory'] + '/nodes.json')
rrd = rrd(scriptdir + "/nodedb/", options['destination_directory'] + "/nodes")
rrd.update_database(db)
#rrd.update_images()
|
ff-kbu/ffmap-backend
|
bat2nodes.py
|
Python
|
bsd-3-clause
| 2,385
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Template tags for working with django_openstack.
"""
from django import template
from django.conf import settings
register = template.Library()
class SiteBrandingNode(template.Node):
def render(self, context):
return settings.SITE_BRANDING
@register.tag
def site_branding(parser, token):
return SiteBrandingNode()
# TODO(jeffjapan): This is just an assignment tag version of the above, replace
# when the dashboard is upgraded to a django version that
# supports the @assignment_tag decorator syntax instead.
class SaveBrandingNode(template.Node):
def __init__(self, var_name):
self.var_name = var_name
def render(self, context):
context[self.var_name] = settings.SITE_BRANDING
return ""
@register.tag
def save_site_branding(parser, token):
tagname = token.contents.split()
return SaveBrandingNode(tagname[-1])
|
canarie/openstack-dashboard
|
django-openstack/src/django_openstack/templatetags/templatetags/django_openstack_tags.py
|
Python
|
apache-2.0
| 1,694
|
data = [
{
"name": "post_problem",
"url": "/api/problems/",
"method": "post",
"payload": {
"token": "ADMIN@TOKEN",
"title": "problem A",
"score_type": 0
},
"files": {
"pdf": "./api/problem/problem.pdf"
},
"response_status": 200,
"response_data": {
"msg": {"id": 1, "score_type": 0, "title": "problem A", "executes": [], "testdata": [], "verdict": {"execute_type_id": 2, "id": 1, "file_name": "main.cpp",}}
}
},
{
"name": "post_clarification",
"url": "/api/clarifications/",
"method": "post",
"payload": {
"token": "ADMIN@TOKEN",
"problem_id": 1,
"question": "How are you"
},
"response_status": 200,
"response_data":{
"msg": {"reply_type": 0, "user_id": 1, "question": "How are you", "problem_id": 1, "reply": "", "id": 1}
}
},
{
"name": "post_clarification_general",
"url": "/api/clarifications/",
"method": "post",
"payload": {
"token": "ADMIN@TOKEN",
"problem_id": 0,
"question": "How are you"
},
"response_status": 200,
"response_data":{
"msg": {"reply_type": 0, "user_id": 1, "question": "How are you", "problem_id": 0, "reply": "", "id": 2}
}
},
{
"name": "post_clarification_no_exist",
"url": "/api/clarifications/",
"method": "post",
"payload": {
"token": "ADMIN@TOKEN",
"problem_id": 2,
"question": "How are you"
},
"response_status": 404,
"response_data":{
"msg": "Not Found"
}
},
{
"name": "put_clarification_no_reply",
"url": "/api/clarifications/1/",
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"reply_type": 1,
"reply": ""
},
"response_status": 400,
"response_data":{
"msg": 'value of reply: "" should not be empty value'
}
},
{
"name": "put_clarification",
"url": "/api/clarifications/1/",
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"reply_type": 1,
"reply": "I'm fine."
},
"response_status": 200,
"response_data":{
"msg": {"problem_id": 1, "user_id": 1, "question": "How are you", "id": 1, "reply_type": 1, "reply": "I'm fine."}
}
},
{
"name": "put_clarification_again",
"url": "/api/clarifications/1/",
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"reply_type": 1,
"reply": "I'm fine."
},
"response_status": 403,
"response_data":{
"msg": "Permission Denied"
}
},
{
"name": "put_clarification_no_exist",
"url": "/api/clarifications/999/",
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"reply_type": 1,
"reply": "I'm fine."
},
"response_status": 404,
"response_data":{
"msg": "Not Found"
}
}
]
|
Tocknicsu/nctuoj_contest
|
test/api/clarification/clarification.py
|
Python
|
apache-2.0
| 3,334
|
# from datasets.digit.svm import SVM as model
from datasets.digit.dataset import load_data
from datasets.digit.model import SimpleClassifier as model
|
hoxmark/TDT4501-Specialization-Project
|
reinforcement/datasets/digit/__init__.py
|
Python
|
mit
| 151
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.