repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
cwadding/sensit-python | sensit/http_client/request_handler.py | 2 | 1451 | import urllib
import json
# RequestHandler takes care of encoding the request body into format given by options
class RequestHandler():
@staticmethod
def renderKey(parents):
depth, new = 0, ''
for x in parents:
old = "[%s]" if depth > 0 else "%s"
new += old % x
depth += 1
return new
@staticmethod
def urlencode(data, parents=None, pairs=None):
if pairs is None:
pairs = {}
if parents is None:
parents = []
if isinstance(data, dict):
for key, value in data.items():
RequestHandler.urlencode(value, parents + [key], pairs)
elif isinstance(data, list):
for key, value in enumerate(data):
RequestHandler.urlencode(value, parents + [key], pairs)
else:
pairs[RequestHandler.renderKey(parents)] = data
return pairs
@staticmethod
def set_body(request):
typ = request['request_type'] if 'request_type' in request else 'form'
# Encoding request body into JSON format
if typ == 'json':
request['data'] = json.dumps(request['data'])
request['headers']['content-type'] = 'application/json'
# Encoding body into form-urlencoded format
if typ == 'form':
request['data'] = RequestHandler.urlencode(request['data'])
request['headers']['content-type'] = 'application/x-www-form-urlencoded'
if typ == 'raw':
if 'content-type' in request['headers']:
del request['headers']['content-type']
if 'request_type' in request:
del request['request_type']
return request
| mit |
imsparsh/python-for-android | python-modules/twisted/twisted/conch/ssh/common.py | 56 | 2643 | # -*- test-case-name: twisted.conch.test.test_ssh -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Common functions for the SSH classes.
Maintainer: Paul Swartz
"""
import struct, warnings
try:
from Crypto import Util
except ImportError:
warnings.warn("PyCrypto not installed, but continuing anyways!",
RuntimeWarning)
from twisted.python import randbytes
def NS(t):
"""
net string
"""
return struct.pack('!L',len(t)) + t
def getNS(s, count=1):
"""
get net string
"""
ns = []
c = 0
for i in range(count):
l, = struct.unpack('!L',s[c:c+4])
ns.append(s[c+4:4+l+c])
c += 4 + l
return tuple(ns) + (s[c:],)
def MP(number):
if number==0: return '\000'*4
assert number>0
bn = Util.number.long_to_bytes(number)
if ord(bn[0])&128:
bn = '\000' + bn
return struct.pack('>L',len(bn)) + bn
def getMP(data, count=1):
"""
Get multiple precision integer out of the string. A multiple precision
integer is stored as a 4-byte length followed by length bytes of the
integer. If count is specified, get count integers out of the string.
The return value is a tuple of count integers followed by the rest of
the data.
"""
mp = []
c = 0
for i in range(count):
length, = struct.unpack('>L',data[c:c+4])
mp.append(Util.number.bytes_to_long(data[c+4:c+4+length]))
c += 4 + length
return tuple(mp) + (data[c:],)
def _MPpow(x, y, z):
"""return the MP version of (x**y)%z
"""
return MP(pow(x,y,z))
def ffs(c, s):
"""
first from second
goes through the first list, looking for items in the second, returns the first one
"""
for i in c:
if i in s: return i
getMP_py = getMP
MP_py = MP
_MPpow_py = _MPpow
pyPow = pow
def _fastgetMP(data, count=1):
mp = []
c = 0
for i in range(count):
length = struct.unpack('!L', data[c:c+4])[0]
mp.append(long(gmpy.mpz(data[c + 4:c + 4 + length][::-1] + '\x00', 256)))
c += length + 4
return tuple(mp) + (data[c:],)
def _fastMP(i):
i2 = gmpy.mpz(i).binary()[::-1]
return struct.pack('!L', len(i2)) + i2
def _fastMPpow(x, y, z=None):
r = pyPow(gmpy.mpz(x),y,z).binary()[::-1]
return struct.pack('!L', len(r)) + r
def _fastpow(x, y, z=None):
return pyPow(gmpy.mpz(x), y, z)
def install():
global getMP, MP, _MPpow
getMP = _fastgetMP
MP = _fastMP
_MPpow = _fastMPpow
__builtins__['pow'] = _fastpow # evil evil
try:
import gmpy
install()
except ImportError:
pass
| apache-2.0 |
MarshedOut/android_external_skia | tools/skpdiff/skpdiff_server.py | 161 | 24230 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import BaseHTTPServer
import json
import os
import os.path
import re
import subprocess
import sys
import tempfile
import urllib2
# Grab the script path because that is where all the static assets are
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Find the tools directory for python imports
TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
# Find the root of the skia trunk for finding skpdiff binary
SKIA_ROOT_DIR = os.path.dirname(TOOLS_DIR)
# Find the default location of gm expectations
DEFAULT_GM_EXPECTATIONS_DIR = os.path.join(SKIA_ROOT_DIR, 'expectations', 'gm')
# Imports from within Skia
if TOOLS_DIR not in sys.path:
sys.path.append(TOOLS_DIR)
GM_DIR = os.path.join(SKIA_ROOT_DIR, 'gm')
if GM_DIR not in sys.path:
sys.path.append(GM_DIR)
import gm_json
import jsondiff
# A simple dictionary of file name extensions to MIME types. The empty string
# entry is used as the default when no extension was given or if the extension
# has no entry in this dictionary.
MIME_TYPE_MAP = {'': 'application/octet-stream',
'html': 'text/html',
'css': 'text/css',
'png': 'image/png',
'js': 'application/javascript',
'json': 'application/json'
}
IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
SKPDIFF_INVOKE_FORMAT = '{} --jsonp=false -o {} -f {} {}'
def get_skpdiff_path(user_path=None):
"""Find the skpdiff binary.
@param user_path If none, searches in Release and Debug out directories of
the skia root. If set, checks that the path is a real file and
returns it.
"""
skpdiff_path = None
possible_paths = []
# Use the user given path, or try out some good default paths.
if user_path:
possible_paths.append(user_path)
else:
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Release', 'skpdiff'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Release', 'skpdiff.exe'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Debug', 'skpdiff'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Debug', 'skpdiff.exe'))
# Use the first path that actually points to the binary
for possible_path in possible_paths:
if os.path.isfile(possible_path):
skpdiff_path = possible_path
break
# If skpdiff was not found, print out diagnostic info for the user.
if skpdiff_path is None:
print('Could not find skpdiff binary. Either build it into the ' +
'default directory, or specify the path on the command line.')
print('skpdiff paths tried:')
for possible_path in possible_paths:
print(' ', possible_path)
return skpdiff_path
def download_file(url, output_path):
"""Download the file at url and place it in output_path"""
reader = urllib2.urlopen(url)
with open(output_path, 'wb') as writer:
writer.write(reader.read())
def download_gm_image(image_name, image_path, hash_val):
"""Download the gm result into the given path.
@param image_name The GM file name, for example imageblur_gpu.png.
@param image_path Path to place the image.
@param hash_val The hash value of the image.
"""
if hash_val is None:
return
# Separate the test name from a image name
image_match = IMAGE_FILENAME_RE.match(image_name)
test_name = image_match.group(1)
# Calculate the URL of the requested image
image_url = gm_json.CreateGmActualUrl(
test_name, gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5, hash_val)
# Download the image as requested
download_file(image_url, image_path)
def get_image_set_from_skpdiff(skpdiff_records):
"""Get the set of all images references in the given records.
@param skpdiff_records An array of records, which are dictionary objects.
"""
expected_set = frozenset([r['baselinePath'] for r in skpdiff_records])
actual_set = frozenset([r['testPath'] for r in skpdiff_records])
return expected_set | actual_set
def set_expected_hash_in_json(expected_results_json, image_name, hash_value):
"""Set the expected hash for the object extracted from
expected-results.json. Note that this only work with bitmap-64bitMD5 hash
types.
@param expected_results_json The Python dictionary with the results to
modify.
@param image_name The name of the image to set the hash of.
@param hash_value The hash to set for the image.
"""
expected_results = expected_results_json[gm_json.JSONKEY_EXPECTEDRESULTS]
if image_name in expected_results:
expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS][0][1] = hash_value
else:
expected_results[image_name] = {
gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS:
[
[
gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5,
hash_value
]
]
}
def get_head_version(path):
"""Get the version of the file at the given path stored inside the HEAD of
the git repository. It is returned as a string.
@param path The path of the file whose HEAD is returned. It is assumed the
path is inside a git repo rooted at SKIA_ROOT_DIR.
"""
# git-show will not work with absolute paths. This ensures we give it a path
# relative to the skia root. This path also has to use forward slashes, even
# on windows.
git_path = os.path.relpath(path, SKIA_ROOT_DIR).replace('\\', '/')
git_show_proc = subprocess.Popen(['git', 'show', 'HEAD:' + git_path],
stdout=subprocess.PIPE)
# When invoked outside a shell, git will output the last committed version
# of the file directly to stdout.
git_version_content, _ = git_show_proc.communicate()
return git_version_content
class GMInstance:
"""Information about a GM test result on a specific device:
- device_name = the name of the device that rendered it
- image_name = the GM test name and config
- expected_hash = the current expected hash value
- actual_hash = the actual hash value
- is_rebaselined = True if actual_hash is what is currently in the expected
results file, False otherwise.
"""
def __init__(self,
device_name, image_name,
expected_hash, actual_hash,
is_rebaselined):
self.device_name = device_name
self.image_name = image_name
self.expected_hash = expected_hash
self.actual_hash = actual_hash
self.is_rebaselined = is_rebaselined
class ExpectationsManager:
def __init__(self, expectations_dir, expected_name, updated_name,
skpdiff_path):
"""
@param expectations_dir The directory to traverse for results files.
This should resemble expectations/gm in the Skia trunk.
@param expected_name The name of the expected result files. These
are in the format of expected-results.json.
@param updated_name The name of the updated expected result files.
Normally this matches --expectations-filename-output for the
rebaseline.py tool.
@param skpdiff_path The path used to execute the skpdiff command.
"""
self._expectations_dir = expectations_dir
self._expected_name = expected_name
self._updated_name = updated_name
self._skpdiff_path = skpdiff_path
self._generate_gm_comparison()
def _generate_gm_comparison(self):
"""Generate all the data needed to compare GMs:
- determine which GMs changed
- download the changed images
- compare them with skpdiff
"""
# Get the expectations and compare them with actual hashes
self._get_expectations()
# Create a temporary file tree that makes sense for skpdiff to operate
# on. We take the realpath of the new temp directory because some OSs
# (*cough* osx) put the temp directory behind a symlink that gets
# resolved later down the pipeline and breaks the image map.
image_output_dir = os.path.realpath(tempfile.mkdtemp('skpdiff'))
expected_image_dir = os.path.join(image_output_dir, 'expected')
actual_image_dir = os.path.join(image_output_dir, 'actual')
os.mkdir(expected_image_dir)
os.mkdir(actual_image_dir)
# Download expected and actual images that differed into the temporary
# file tree.
self._download_expectation_images(expected_image_dir, actual_image_dir)
# Invoke skpdiff with our downloaded images and place its results in the
# temporary directory.
self._skpdiff_output_path = os.path.join(image_output_dir,
'skpdiff_output.json')
skpdiff_cmd = SKPDIFF_INVOKE_FORMAT.format(self._skpdiff_path,
self._skpdiff_output_path,
expected_image_dir,
actual_image_dir)
os.system(skpdiff_cmd)
self._load_skpdiff_output()
def _get_expectations(self):
"""Fills self._expectations with GMInstance objects for each test whose
expectation is different between the following two files:
- the local filesystem's updated results file
- git's head version of the expected results file
"""
differ = jsondiff.GMDiffer()
self._expectations = []
for root, dirs, files in os.walk(self._expectations_dir):
for expectation_file in files:
# There are many files in the expectations directory. We only
# care about expected results.
if expectation_file != self._expected_name:
continue
# Get the name of the results file, and be sure there is an
# updated result to compare against. If there is not, there is
# no point in diffing this device.
expected_file_path = os.path.join(root, self._expected_name)
updated_file_path = os.path.join(root, self._updated_name)
if not os.path.isfile(updated_file_path):
continue
# Always get the expected results from git because we may have
# changed them in a previous instance of the server.
expected_contents = get_head_version(expected_file_path)
updated_contents = None
with open(updated_file_path, 'rb') as updated_file:
updated_contents = updated_file.read()
# Read the expected results on disk to determine what we've
# already rebaselined.
commited_contents = None
with open(expected_file_path, 'rb') as expected_file:
commited_contents = expected_file.read()
# Find all expectations that did not match.
expected_diff = differ.GenerateDiffDictFromStrings(
expected_contents,
updated_contents)
# Generate a set of images that have already been rebaselined
# onto disk.
rebaselined_diff = differ.GenerateDiffDictFromStrings(
expected_contents,
commited_contents)
rebaselined_set = set(rebaselined_diff.keys())
# The name of the device corresponds to the name of the folder
# we are in.
device_name = os.path.basename(root)
# Store old and new versions of the expectation for each GM
for image_name, hashes in expected_diff.iteritems():
self._expectations.append(
GMInstance(device_name, image_name,
hashes['old'], hashes['new'],
image_name in rebaselined_set))
def _load_skpdiff_output(self):
"""Loads the results of skpdiff and annotates them with whether they
have already been rebaselined or not. The resulting data is store in
self.skpdiff_records."""
self.skpdiff_records = None
with open(self._skpdiff_output_path, 'rb') as skpdiff_output_file:
self.skpdiff_records = json.load(skpdiff_output_file)['records']
for record in self.skpdiff_records:
record['isRebaselined'] = self.image_map[record['baselinePath']][1].is_rebaselined
def _download_expectation_images(self, expected_image_dir, actual_image_dir):
"""Download the expected and actual images for the _expectations array.
@param expected_image_dir The directory to download expected images
into.
@param actual_image_dir The directory to download actual images into.
"""
image_map = {}
# Look through expectations and download their images.
for expectation in self._expectations:
# Build appropriate paths to download the images into.
expected_image_path = os.path.join(expected_image_dir,
expectation.device_name + '-' +
expectation.image_name)
actual_image_path = os.path.join(actual_image_dir,
expectation.device_name + '-' +
expectation.image_name)
print('Downloading %s for device %s' % (
expectation.image_name, expectation.device_name))
# Download images
download_gm_image(expectation.image_name,
expected_image_path,
expectation.expected_hash)
download_gm_image(expectation.image_name,
actual_image_path,
expectation.actual_hash)
# Annotate the expectations with where the images were downloaded
# to.
expectation.expected_image_path = expected_image_path
expectation.actual_image_path = actual_image_path
# Map the image paths back to the expectations.
image_map[expected_image_path] = (False, expectation)
image_map[actual_image_path] = (True, expectation)
self.image_map = image_map
def _set_expected_hash(self, device_name, image_name, hash_value):
"""Set the expected hash for the image of the given device. This always
writes directly to the expected results file of the given device
@param device_name The name of the device to write the hash to.
@param image_name The name of the image whose hash to set.
@param hash_value The value of the hash to set.
"""
# Retrieve the expected results file as it is in the working tree
json_path = os.path.join(self._expectations_dir, device_name,
self._expected_name)
expectations = gm_json.LoadFromFile(json_path)
# Set the specified hash.
set_expected_hash_in_json(expectations, image_name, hash_value)
# Write it out to disk using gm_json to keep the formatting consistent.
gm_json.WriteToFile(expectations, json_path)
def commit_rebaselines(self, rebaselines):
"""Sets the expected results file to use the hashes of the images in
the rebaselines list. If a expected result image is not in rebaselines
at all, the old hash will be used.
@param rebaselines A list of image paths to use the hash of.
"""
# Reset all expectations to their old hashes because some of them may
# have been set to the new hash by a previous call to this function.
for expectation in self._expectations:
expectation.is_rebaselined = False
self._set_expected_hash(expectation.device_name,
expectation.image_name,
expectation.expected_hash)
# Take all the images to rebaseline
for image_path in rebaselines:
# Get the metadata about the image at the path.
is_actual, expectation = self.image_map[image_path]
expectation.is_rebaselined = is_actual
expectation_hash = expectation.actual_hash if is_actual else\
expectation.expected_hash
# Write out that image's hash directly to the expected results file.
self._set_expected_hash(expectation.device_name,
expectation.image_name,
expectation_hash)
self._load_skpdiff_output()
class SkPDiffHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_file(self, file_path):
# Grab the extension if there is one
extension = os.path.splitext(file_path)[1]
if len(extension) >= 1:
extension = extension[1:]
# Determine the MIME type of the file from its extension
mime_type = MIME_TYPE_MAP.get(extension, MIME_TYPE_MAP[''])
# Open the file and send it over HTTP
if os.path.isfile(file_path):
with open(file_path, 'rb') as sending_file:
self.send_response(200)
self.send_header('Content-type', mime_type)
self.end_headers()
self.wfile.write(sending_file.read())
else:
self.send_error(404)
def serve_if_in_dir(self, dir_path, file_path):
# Determine if the file exists relative to the given dir_path AND exists
# under the dir_path. This is to prevent accidentally serving files
# outside the directory intended using symlinks, or '../'.
real_path = os.path.normpath(os.path.join(dir_path, file_path))
if os.path.commonprefix([real_path, dir_path]) == dir_path:
if os.path.isfile(real_path):
self.send_file(real_path)
return True
return False
def do_GET(self):
# Simple rewrite rule of the root path to 'viewer.html'
if self.path == '' or self.path == '/':
self.path = '/viewer.html'
# The [1:] chops off the leading '/'
file_path = self.path[1:]
# Handle skpdiff_output.json manually because it is was processed by the
# server when it was started and does not exist as a file.
if file_path == 'skpdiff_output.json':
self.send_response(200)
self.send_header('Content-type', MIME_TYPE_MAP['json'])
self.end_headers()
# Add JSONP padding to the JSON because the web page expects it. It
# expects it because it was designed to run with or without a web
# server. Without a web server, the only way to load JSON is with
# JSONP.
skpdiff_records = self.server.expectations_manager.skpdiff_records
self.wfile.write('var SkPDiffRecords = ')
json.dump({'records': skpdiff_records}, self.wfile)
self.wfile.write(';')
return
# Attempt to send static asset files first.
if self.serve_if_in_dir(SCRIPT_DIR, file_path):
return
# WARNING: Serving any file the user wants is incredibly insecure. Its
# redeeming quality is that we only serve gm files on a white list.
if self.path in self.server.image_set:
self.send_file(self.path)
return
# If no file to send was found, just give the standard 404
self.send_error(404)
def do_POST(self):
if self.path == '/commit_rebaselines':
content_length = int(self.headers['Content-length'])
request_data = json.loads(self.rfile.read(content_length))
rebaselines = request_data['rebaselines']
self.server.expectations_manager.commit_rebaselines(rebaselines)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write('{"success":true}')
return
# If the we have no handler for this path, give em' the 404
self.send_error(404)
def run_server(expectations_manager, port=8080):
# It's important to parse the results file so that we can make a set of
# images that the web page might request.
skpdiff_records = expectations_manager.skpdiff_records
image_set = get_image_set_from_skpdiff(skpdiff_records)
# Do not bind to interfaces other than localhost because the server will
# attempt to serve files relative to the root directory as a last resort
# before 404ing. This means all of your files can be accessed from this
# server, so DO NOT let this server listen to anything but localhost.
server_address = ('127.0.0.1', port)
http_server = BaseHTTPServer.HTTPServer(server_address, SkPDiffHandler)
http_server.image_set = image_set
http_server.expectations_manager = expectations_manager
print('Navigate thine browser to: http://{}:{}/'.format(*server_address))
http_server.serve_forever()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--port', '-p', metavar='PORT',
type=int,
default=8080,
help='port to bind the server to; ' +
'defaults to %(default)s',
)
parser.add_argument('--expectations-dir', metavar='EXPECTATIONS_DIR',
default=DEFAULT_GM_EXPECTATIONS_DIR,
help='path to the gm expectations; ' +
'defaults to %(default)s'
)
parser.add_argument('--expected',
metavar='EXPECTATIONS_FILE_NAME',
default='expected-results.json',
help='the file name of the expectations JSON; ' +
'defaults to %(default)s'
)
parser.add_argument('--updated',
metavar='UPDATED_FILE_NAME',
default='updated-results.json',
help='the file name of the updated expectations JSON;' +
' defaults to %(default)s'
)
parser.add_argument('--skpdiff-path', metavar='SKPDIFF_PATH',
default=None,
help='the path to the skpdiff binary to use; ' +
'defaults to out/Release/skpdiff or out/Default/skpdiff'
)
args = vars(parser.parse_args()) # Convert args into a python dict
# Make sure we have access to an skpdiff binary
skpdiff_path = get_skpdiff_path(args['skpdiff_path'])
if skpdiff_path is None:
sys.exit(1)
# Print out the paths of things for easier debugging
print('script dir :', SCRIPT_DIR)
print('tools dir :', TOOLS_DIR)
print('root dir :', SKIA_ROOT_DIR)
print('expectations dir :', args['expectations_dir'])
print('skpdiff path :', skpdiff_path)
expectations_manager = ExpectationsManager(args['expectations_dir'],
args['expected'],
args['updated'],
skpdiff_path)
run_server(expectations_manager, port=args['port'])
if __name__ == '__main__':
main()
| bsd-3-clause |
ahmadio/edx-platform | common/djangoapps/student/tests/test_login.py | 55 | 25107 | '''
Tests for student activation and login
'''
import json
import unittest
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpResponseBadRequest, HttpResponse
import httpretty
from mock import patch
from social.apps.django_app.default.models import UserSocialAuth
from external_auth.models import ExternalAuthMap
from student.tests.factories import UserFactory, RegistrationFactory, UserProfileFactory
from student.views import login_oauth_token
from third_party_auth.tests.utils import (
ThirdPartyOAuthTestMixin,
ThirdPartyOAuthTestMixinFacebook,
ThirdPartyOAuthTestMixinGoogle
)
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class LoginTest(TestCase):
'''
Test student.views.login_user() view
'''
def setUp(self):
super(LoginTest, self).setUp()
# Create one user and save it to the database
self.user = UserFactory.build(username='test', email='test@edx.org')
self.user.set_password('test_password')
self.user.save()
# Create a registration for the user
RegistrationFactory(user=self.user)
# Create a profile for the user
UserProfileFactory(user=self.user)
# Create the test client
self.client = Client()
cache.clear()
# Store the login url
try:
self.url = reverse('login_post')
except NoReverseMatch:
self.url = reverse('login')
def test_login_success(self):
response, mock_audit_log = self._login_response('test@edx.org', 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', u'test@edx.org'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_success_no_pii(self):
response, mock_audit_log = self._login_response('test@edx.org', 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test@edx.org'])
def test_login_success_unicode_email(self):
unicode_email = u'test' + unichr(40960) + u'@edx.org'
self.user.email = unicode_email
self.user.save()
response, mock_audit_log = self._login_response(unicode_email, 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', unicode_email])
def test_login_fail_no_user_exists(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'ADVANCED_SECURITY': True})
def test_login_fail_incorrect_email_with_advanced_security(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_no_user_exists_no_pii(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [nonexistent_email])
def test_login_fail_wrong_password(self):
response, mock_audit_log = self._login_response('test@edx.org', 'wrong_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'test@edx.org', u'invalid'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_wrong_password_no_pii(self):
response, mock_audit_log = self._login_response('test@edx.org', 'wrong_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'invalid'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test@edx.org'])
def test_login_not_activated(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=False,
value="This account has not been activated")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_not_activated_no_pii(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=False,
value="This account has not been activated")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test'])
def test_login_unicode_email(self):
unicode_email = u'test@edx.org' + unichr(40960)
response, mock_audit_log = self._login_response(unicode_email, 'test_password')
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', unicode_email])
def test_login_unicode_password(self):
unicode_password = u'test_password' + unichr(1972)
response, mock_audit_log = self._login_response('test@edx.org', unicode_password)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'test@edx.org', u'invalid'])
def test_logout_logging(self):
response, _ = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 302)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout', u'test'])
def test_login_user_info_cookie(self):
response, _ = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=True)
# Verify the format of the "user info" cookie set on login
cookie = self.client.cookies[settings.EDXMKTG_USER_INFO_COOKIE_NAME]
user_info = json.loads(cookie.value)
# Check that the version is set
self.assertEqual(user_info["version"], settings.EDXMKTG_USER_INFO_COOKIE_VERSION)
# Check that the username and email are set
self.assertEqual(user_info["username"], self.user.username)
self.assertEqual(user_info["email"], self.user.email)
# Check that the URLs are absolute
for url in user_info["header_urls"].values():
self.assertIn("http://testserver/", url)
def test_logout_deletes_mktg_cookies(self):
response, _ = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=True)
# Check that the marketing site cookies have been set
self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies)
self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies)
# Log out
logout_url = reverse('logout')
response = self.client.post(logout_url)
# Check that the marketing site cookies have been deleted
# (cookies are deleted by setting an expiration date in 1970)
for cookie_name in [settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, settings.EDXMKTG_USER_INFO_COOKIE_NAME]:
cookie = self.client.cookies[cookie_name]
self.assertIn("01-Jan-1970", cookie.get('expires'))
@override_settings(
EDXMKTG_LOGGED_IN_COOKIE_NAME=u"unicode-logged-in",
EDXMKTG_USER_INFO_COOKIE_NAME=u"unicode-user-info",
)
def test_unicode_mktg_cookie_names(self):
# When logged in cookie names are loaded from JSON files, they may
# have type `unicode` instead of `str`, which can cause errors
# when calling Django cookie manipulation functions.
response, _ = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=True)
response = self.client.post(reverse('logout'))
self.assertRedirects(response, "/")
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_logout_logging_no_pii(self):
response, _ = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 302)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test'])
def test_login_ratelimited_success(self):
# Try (and fail) logging in with fewer attempts than the limit of 30
# and verify that you can still successfully log in afterwards.
for i in xrange(20):
password = u'test_password{0}'.format(i)
response, _audit_log = self._login_response('test@edx.org', password)
self._assert_response(response, success=False)
# now try logging in with a valid password
response, _audit_log = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=True)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in xrange(30):
password = u'test_password{0}'.format(i)
self._login_response('test@edx.org', password)
# check to see if this response indicates that this was ratelimited
response, _audit_log = self._login_response('test@edx.org', 'wrong_password')
self._assert_response(response, success=False, value='Too many failed login attempts')
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session(self):
creds = {'email': 'test@edx.org', 'password': 'test_password'}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = UserFactory.FACTORY_FOR.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_url_not_having_login_required_decorator(self):
# accessing logout url as it does not have login-required decorator it will avoid redirect
# and go inside the enforce_single_login
creds = {'email': 'test@edx.org', 'password': 'test_password'}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
url = reverse('logout')
response = client1.get(url)
self.assertEqual(response.status_code, 302)
def test_change_enrollment_400(self):
"""
Tests that a 400 in change_enrollment doesn't lead to a 404
and in fact just logs in the user without incident
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponseBadRequest("I am a 400")
response, _ = self._login_response(
'test@edx.org',
'test_password',
extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def test_change_enrollment_200_no_redirect(self):
"""
Tests "redirect_url" is None if change_enrollment returns a HttpResponse
with no content
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponse()
response, _ = self._login_response(
'test@edx.org',
'test_password',
extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def _login_response(self, email, password, patched_audit_log='student.views.AUDIT_LOG', extra_post_params=None):
''' Post the login info '''
post_params = {'email': email, 'password': password}
if extra_post_params is not None:
post_params.update(extra_post_params)
with patch(patched_audit_log) as mock_audit_log:
result = self.client.post(self.url, post_params)
return result, mock_audit_log
def _assert_response(self, response, success=None, value=None):
'''
Assert that the response had status 200 and returned a valid
JSON-parseable dict.
If success is provided, assert that the response had that
value for 'success' in the JSON dict.
If value is provided, assert that the response contained that
value for 'value' in the JSON dict.
'''
self.assertEqual(response.status_code, 200)
try:
response_dict = json.loads(response.content)
except ValueError:
self.fail("Could not parse response content as JSON: %s"
% str(response.content))
if success is not None:
self.assertEqual(response_dict['success'], success)
if value is not None:
msg = ("'%s' did not contain '%s'" %
(str(response_dict['value']), str(value)))
self.assertTrue(value in response_dict['value'], msg)
def _assert_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertIn(log_string, format_string)
def _assert_not_in_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertNotIn(log_string, format_string)
class ExternalAuthShibTest(ModuleStoreTestCase):
"""
Tests how login_user() interacts with ExternalAuth, in particular Shib
"""
def setUp(self):
super(ExternalAuthShibTest, self).setUp()
self.course = CourseFactory.create(
org='Stanford',
number='456',
display_name='NO SHIB',
user_id=self.user.id,
)
self.shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.user.id,
)
self.user_w_map = UserFactory.create(email='withmap@stanford.edu')
self.extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='withmap@stanford.edu',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=self.user_w_map)
self.user_w_map.save()
self.extauth.save()
self.user_wo_map = UserFactory.create(email='womap@gmail.com')
self.user_wo_map.save()
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_page_redirect(self):
"""
Tests that when a shib user types their email address into the login page, they get redirected
to the shib login.
"""
response = self.client.post(reverse('login'), {'email': self.user_w_map.email, 'password': ''})
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertEqual(obj, {
'success': False,
'redirect': reverse('shib-login'),
})
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_required_dashboard(self):
"""
Tests redirects to when @login_required to dashboard, which should always be the normal login,
since there is no course context
"""
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://testserver/login?next=/dashboard')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_externalauth_login_required_course_context(self):
"""
Tests the redirects when visiting course-specific URL with @login_required.
Should vary by course depending on its enrollment_domain
"""
TARGET_URL = reverse('courseware', args=[self.course.id.to_deprecated_string()]) # pylint: disable=invalid-name
noshib_response = self.client.get(TARGET_URL, follow=True)
self.assertEqual(noshib_response.redirect_chain[-1],
('http://testserver/login?next={url}'.format(url=TARGET_URL), 302))
self.assertContains(noshib_response, ("Sign in or Register | {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
self.assertEqual(noshib_response.status_code, 200)
TARGET_URL_SHIB = reverse('courseware', args=[self.shib_course.id.to_deprecated_string()]) # pylint: disable=invalid-name
shib_response = self.client.get(**{'path': TARGET_URL_SHIB,
'follow': True,
'REMOTE_USER': self.extauth.external_id,
'Shib-Identity-Provider': 'https://idp.stanford.edu/'})
# Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain
# The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we
# won't test its contents
self.assertEqual(shib_response.redirect_chain[-3],
('http://testserver/shib-login/?next={url}'.format(url=TARGET_URL_SHIB), 302))
self.assertEqual(shib_response.redirect_chain[-2],
('http://testserver{url}'.format(url=TARGET_URL_SHIB), 302))
self.assertEqual(shib_response.status_code, 200)
@httpretty.activate
class LoginOAuthTokenMixin(ThirdPartyOAuthTestMixin):
"""
Mixin with tests for the login_oauth_token view. A TestCase that includes
this must define the following:
BACKEND: The name of the backend from python-social-auth
USER_URL: The URL of the endpoint that the backend retrieves user data from
UID_FIELD: The field in the user data that the backend uses as the user id
"""
def setUp(self):
super(LoginOAuthTokenMixin, self).setUp()
self.url = reverse(login_oauth_token, kwargs={"backend": self.BACKEND})
def _assert_error(self, response, status_code, error):
"""Assert that the given response was a 400 with the given error code"""
self.assertEqual(response.status_code, status_code)
self.assertEqual(json.loads(response.content), {"error": error})
self.assertNotIn("partial_pipeline", self.client.session)
def test_success(self):
self._setup_provider_response(success=True)
response = self.client.post(self.url, {"access_token": "dummy"})
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.session['_auth_user_id'], self.user.id) # pylint: disable=no-member
def test_invalid_token(self):
self._setup_provider_response(success=False)
response = self.client.post(self.url, {"access_token": "dummy"})
self._assert_error(response, 401, "invalid_token")
def test_missing_token(self):
response = self.client.post(self.url)
self._assert_error(response, 400, "invalid_request")
def test_unlinked_user(self):
UserSocialAuth.objects.all().delete()
self._setup_provider_response(success=True)
response = self.client.post(self.url, {"access_token": "dummy"})
self._assert_error(response, 401, "invalid_token")
def test_get_method(self):
response = self.client.get(self.url, {"access_token": "dummy"})
self.assertEqual(response.status_code, 405)
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
class LoginOAuthTokenTestFacebook(LoginOAuthTokenMixin, ThirdPartyOAuthTestMixinFacebook, TestCase):
"""Tests login_oauth_token with the Facebook backend"""
pass
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
class LoginOAuthTokenTestGoogle(LoginOAuthTokenMixin, ThirdPartyOAuthTestMixinGoogle, TestCase):
"""Tests login_oauth_token with the Google backend"""
pass
| agpl-3.0 |
pombredanne/teamwork | exts/sphinxcontrib/bibtex/__init__.py | 38 | 4471 | # -*- coding: utf-8 -*-
"""
Sphinx Interface
~~~~~~~~~~~~~~~~
.. autofunction:: setup
.. autofunction:: init_bibtex_cache
.. autofunction:: purge_bibtex_cache
.. autofunction:: process_citations
.. autofunction:: process_citation_references
.. autofunction:: check_duplicate_labels
"""
import docutils.nodes
from sphinxcontrib.bibtex.cache import Cache
from sphinxcontrib.bibtex.nodes import bibliography
from sphinxcontrib.bibtex.roles import CiteRole
from sphinxcontrib.bibtex.directives import BibliographyDirective
from sphinxcontrib.bibtex.transforms import BibliographyTransform
def init_bibtex_cache(app):
"""Create ``app.env.bibtex_cache`` if it does not exist yet.
Reset citation label dictionary.
:param app: The sphinx application.
:type app: :class:`sphinx.application.Sphinx`
"""
if not hasattr(app.env, "bibtex_cache"):
app.env.bibtex_cache = Cache()
def purge_bibtex_cache(app, env, docname):
"""Remove all information related to *docname* from the cache.
:param app: The sphinx application.
:type app: :class:`sphinx.application.Sphinx`
:param env: The sphinx build environment.
:type env: :class:`sphinx.environment.BuildEnvironment`
"""
env.bibtex_cache.purge(docname)
def process_citations(app, doctree, docname):
"""Replace labels of citation nodes by actual labels.
:param app: The sphinx application.
:type app: :class:`sphinx.application.Sphinx`
:param doctree: The document tree.
:type doctree: :class:`docutils.nodes.document`
:param docname: The document name.
:type docname: :class:`str`
"""
for node in doctree.traverse(docutils.nodes.citation):
key = node[0].astext()
try:
label = app.env.bibtex_cache.get_label_from_key(key)
except KeyError:
app.warn("could not relabel citation [%s]" % key)
else:
node[0] = docutils.nodes.label('', label)
def process_citation_references(app, doctree, docname):
"""Replace text of citation reference nodes by actual labels.
:param app: The sphinx application.
:type app: :class:`sphinx.application.Sphinx`
:param doctree: The document tree.
:type doctree: :class:`docutils.nodes.document`
:param docname: The document name.
:type docname: :class:`str`
"""
# XXX sphinx has already turned citation_reference nodes
# XXX into reference nodes
for node in doctree.traverse(docutils.nodes.reference):
# exclude sphinx [source] labels
if isinstance(node[0], docutils.nodes.Element):
if 'viewcode-link' in node[0]['classes']:
continue
text = node[0].astext()
if text.startswith('[') and text.endswith(']'):
key = text[1:-1]
try:
label = app.env.bibtex_cache.get_label_from_key(key)
except KeyError:
app.warn("could not relabel citation reference [%s]" % key)
else:
node[0] = docutils.nodes.Text('[' + label + ']')
def check_duplicate_labels(app, env):
"""Check and warn about duplicate citation labels.
:param app: The sphinx application.
:type app: :class:`sphinx.application.Sphinx`
:param env: The sphinx build environment.
:type env: :class:`sphinx.environment.BuildEnvironment`
"""
label_to_key = {}
for info in env.bibtex_cache.bibliographies.values():
for key, label in info.labels.items():
if label in label_to_key:
app.warn(
"duplicate label for keys %s and %s"
% (key, label_to_key[label]))
else:
label_to_key[label] = key
def setup(app):
"""Set up the bibtex extension:
* register directives
* register nodes
* register roles
* register transforms
* connect events to functions
:param app: The sphinx application.
:type app: :class:`sphinx.application.Sphinx`
"""
app.add_directive("bibliography", BibliographyDirective)
app.add_role("cite", CiteRole())
app.add_node(bibliography)
app.add_transform(BibliographyTransform)
app.connect("builder-inited", init_bibtex_cache)
app.connect("doctree-resolved", process_citations)
app.connect("doctree-resolved", process_citation_references)
app.connect("env-purge-doc", purge_bibtex_cache)
app.connect("env-updated", check_duplicate_labels)
| gpl-2.0 |
bjzhang/xen_arm | tools/python/xen/xend/osdep.py | 26 | 7850 | #!/usr/bin/env python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
import os
import commands
_xend_autorestart = {
"NetBSD": True,
"Linux": True,
"SunOS": False,
}
_vif_script = {
"SunOS": "vif-vnic"
}
_tapif_script = {
"Linux": "no",
}
PROC_XEN_BALLOON = '/proc/xen/balloon'
SYSFS_XEN_MEMORY = '/sys/devices/system/xen_memory/xen_memory0'
def _linux_balloon_stat_proc(label):
"""Returns the value for the named label, or None if an error occurs."""
xend2linux_labels = { 'current' : 'Current allocation',
'target' : 'Requested target',
'low-balloon' : 'Low-mem balloon',
'high-balloon' : 'High-mem balloon',
'limit' : 'Xen hard limit' }
f = file(PROC_XEN_BALLOON, 'r')
try:
for line in f:
keyvalue = line.split(':')
if keyvalue[0] == xend2linux_labels[label]:
values = keyvalue[1].split()
if values[0].isdigit():
return int(values[0])
else:
return None
return None
finally:
f.close()
def _linux_balloon_stat_sysfs(label):
sysfiles = { 'target' : 'target_kb',
'current' : 'info/current_kb',
'low-balloon' : 'info/low_kb',
'high-balloon' : 'info/high_kb',
'limit' : 'info/hard_limit_kb' }
name = os.path.join(SYSFS_XEN_MEMORY, sysfiles[label])
f = file(name, 'r')
val = f.read().strip()
if val.isdigit():
return int(val)
return None
def _linux_balloon_stat(label):
if os.access(PROC_XEN_BALLOON, os.F_OK):
return _linux_balloon_stat_proc(label)
elif os.access(SYSFS_XEN_MEMORY, os.F_OK):
return _linux_balloon_stat_sysfs(label)
return None
def _netbsd_balloon_stat(label):
"""Returns the value for the named label, or None if an error occurs."""
import commands
xend2netbsd_labels = { 'current' : 'kern.xen.balloon.current',
'target' : 'kern.xen.balloon.target',
'low-balloon' : None,
'high-balloon' : None,
'limit' : None }
cmdarg = xend2netbsd_labels[label]
if cmdarg is None:
return None
cmd = "/sbin/sysctl " + cmdarg
sysctloutput = commands.getoutput(cmd)
(name, value) = sysctloutput.split('=')
return int(value)
def _solaris_balloon_stat(label):
"""Returns the value for the named label, or None if an error occurs."""
import fcntl
import array
DEV_XEN_BALLOON = '/dev/xen/balloon'
BLN_IOCTL_CURRENT = 0x42410001
BLN_IOCTL_TARGET = 0x42410002
BLN_IOCTL_LOW = 0x42410003
BLN_IOCTL_HIGH = 0x42410004
BLN_IOCTL_LIMIT = 0x42410005
label_to_ioctl = { 'current' : BLN_IOCTL_CURRENT,
'target' : BLN_IOCTL_TARGET,
'low-balloon' : BLN_IOCTL_LOW,
'high-balloon' : BLN_IOCTL_HIGH,
'limit' : BLN_IOCTL_LIMIT }
f = file(DEV_XEN_BALLOON, 'r')
try:
values = array.array('L', [0])
if fcntl.ioctl(f.fileno(), label_to_ioctl[label], values, 1) == 0:
return values[0]
else:
return None
finally:
f.close()
_balloon_stat = {
"SunOS": _solaris_balloon_stat,
"NetBSD": _netbsd_balloon_stat,
}
def _linux_get_cpuinfo():
cpuinfo = {}
f = file('/proc/cpuinfo', 'r')
try:
p = -1
d = {}
for line in f:
keyvalue = line.split(':')
if len(keyvalue) != 2:
continue
key = keyvalue[0].strip()
val = keyvalue[1].strip()
if key == 'processor':
if p != -1:
cpuinfo[p] = d
p = int(val)
d = {}
else:
d[key] = val
cpuinfo[p] = d
return cpuinfo
finally:
f.close()
def _solaris_get_cpuinfo():
cpuinfo = {}
# call kstat to extrace specific cpu_info output
cmd = "/usr/bin/kstat -p -c misc -m cpu_info"
kstatoutput = commands.getoutput (cmd)
# walk each line
for kstatline in kstatoutput.split('\n'):
# split the line on
# module:cpu #:module#:name value
(module, cpunum, combo, namevalue) = kstatline.split (":")
# check to see if this cpunum is already a key. If not,
# initialize an empty hash table
if not cpuinfo.has_key (int(cpunum)):
cpuinfo[int(cpunum)] = {}
# split the namevalue output on whitespace
data = namevalue.split()
# the key will be data[0]
key = data[0]
# check the length of the data list. If it's larger than
# 2, join the rest of the list together with a space.
# Otherwise, value is just data[1]
if len (data) > 2:
value = ' '.join (data[1:])
else:
value = data[1]
# add this key/value pair to the cpuhash
cpuinfo[int(cpunum)][key] = value
# Translate Solaris tokens into what Xend expects
for key in cpuinfo.keys():
cpuinfo[key]["flags"] = ""
cpuinfo[key]["model name"] = cpuinfo[key]["brand"]
cpuinfo[key]["cpu MHz"] = cpuinfo[key]["clock_MHz"]
# return the hash table
return cpuinfo
def _netbsd_get_cpuinfo():
import commands
cpuinfo = {}
cmd = "/sbin/sysctl hw.ncpu"
sysctloutput = commands.getoutput(cmd)
(name, ncpu) = sysctloutput.split('=')
for i in range(int(ncpu)):
if not cpuinfo.has_key(i):
cpuinfo[i] = {}
# Translate NetBSD tokens into what xend expects
for key in cpuinfo.keys():
cpuinfo[key]['flags'] = ""
cpuinfo[key]['vendor_id'] = ""
cpuinfo[key]['model name'] = ""
cpuinfo[key]['stepping'] = ""
cpuinfo[key]['cpu MHz'] = 0
return cpuinfo
_get_cpuinfo = {
"SunOS": _solaris_get_cpuinfo,
"NetBSD": _netbsd_get_cpuinfo
}
def _default_prefork(name):
pass
def _default_postfork(ct, abandon=False):
pass
# call this for long-running processes that should survive a xend
# restart
def _solaris_prefork(name):
from xen.lowlevel import process
return process.activate(name)
def _solaris_postfork(ct, abandon=False):
from xen.lowlevel import process
process.clear(ct)
if abandon:
process.abandon_latest()
_get_prefork = {
"SunOS": _solaris_prefork
}
_get_postfork = {
"SunOS": _solaris_postfork
}
def _get(var, default=None):
return var.get(os.uname()[0], default)
xend_autorestart = _get(_xend_autorestart)
vif_script = _get(_vif_script, "vif-bridge")
tapif_script = _get(_tapif_script)
lookup_balloon_stat = _get(_balloon_stat, _linux_balloon_stat)
get_cpuinfo = _get(_get_cpuinfo, _linux_get_cpuinfo)
prefork = _get(_get_prefork, _default_prefork)
postfork = _get(_get_postfork, _default_postfork)
| gpl-2.0 |
nuncjo/odoo | openerp/report/render/odt2odt/odt2odt.py | 443 | 2265 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render.rml2pdf import utils
import copy
class odt2odt(object):
def __init__(self, odt, localcontext):
self.localcontext = localcontext
self.etree = odt
self._node = None
def render(self):
def process_text(node,new_node):
for child in utils._child_get(node, self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
new_child.remove(n)
process_text(child, new_child)
else:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
self._node = copy.deepcopy(self.etree)
for n in self._node:
self._node.remove(n)
process_text(self.etree, self._node)
return self._node
def parseNode(node, localcontext = {}):
r = odt2odt(node, localcontext)
return r.render()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mzdaniel/oh-mainline | vendor/packages/scrapy/scrapy/core/downloader/handlers/__init__.py | 19 | 1193 | """Download handlers for different schemes"""
from scrapy.exceptions import NotSupported, NotConfigured
from scrapy.utils.httpobj import urlparse_cached
from scrapy.conf import settings
from scrapy.utils.misc import load_object
class DownloadHandlers(object):
def __init__(self):
self._handlers = {}
self._notconfigured = {}
handlers = settings.get('DOWNLOAD_HANDLERS_BASE')
handlers.update(settings.get('DOWNLOAD_HANDLERS', {}))
for scheme, clspath in handlers.iteritems():
cls = load_object(clspath)
try:
dh = cls()
except NotConfigured, ex:
self._notconfigured[scheme] = str(ex)
else:
self._handlers[scheme] = dh.download_request
def download_request(self, request, spider):
scheme = urlparse_cached(request).scheme
try:
handler = self._handlers[scheme]
except KeyError:
msg = self._notconfigured.get(scheme, \
'no handler available for that scheme')
raise NotSupported("Unsupported URL scheme '%s': %s" % (scheme, msg))
return handler(request, spider)
| agpl-3.0 |
juanyaw/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/types.py | 84 | 2124 | """Define names for all type symbols known in the standard interpreter.
Types that are part of optional modules (e.g. array) are not listed.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "next" attributes instead.
NoneType = type(None)
TypeType = type
ObjectType = object
IntType = int
LongType = long
FloatType = float
BooleanType = bool
try:
ComplexType = complex
except NameError:
pass
StringType = str
# StringTypes is already outdated. Instead of writing "type(x) in
# types.StringTypes", you should use "isinstance(x, basestring)". But
# we keep around for compatibility with Python 2.2.
try:
UnicodeType = unicode
StringTypes = (StringType, UnicodeType)
except NameError:
StringTypes = (StringType,)
BufferType = buffer
TupleType = tuple
ListType = list
DictType = DictionaryType = dict
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.func_code)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
ClassType = type(_C)
UnboundMethodType = type(_C._m) # Same as MethodType
_x = _C()
InstanceType = type(_x)
MethodType = type(_x._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
FileType = file
XRangeType = xrange
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
del tb
SliceType = slice
EllipsisType = type(Ellipsis)
DictProxyType = type(TypeType.__dict__)
NotImplementedType = type(NotImplemented)
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.func_code)
MemberDescriptorType = type(FunctionType.func_globals)
del sys, _f, _g, _C, _x # Not for export
| apache-2.0 |
divio/django | tests/check_framework/test_templates.py | 288 | 1403 | from copy import deepcopy
from django.core.checks.templates import E001
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckTemplateSettingsAppDirsTest(SimpleTestCase):
TEMPLATES_APP_DIRS_AND_LOADERS = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'loaders': ['django.template.loaders.filesystem.Loader'],
},
},
]
@property
def func(self):
from django.core.checks.templates import check_setting_app_dirs_loaders
return check_setting_app_dirs_loaders
@override_settings(TEMPLATES=TEMPLATES_APP_DIRS_AND_LOADERS)
def test_app_dirs_and_loaders(self):
"""
Error if template loaders are specified and APP_DIRS is True.
"""
self.assertEqual(self.func(None), [E001])
def test_app_dirs_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['APP_DIRS']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
def test_loaders_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['OPTIONS']['loaders']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
| bsd-3-clause |
aurarad/auroracoin | test/functional/rpc_getblockstats.py | 1 | 6843 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(DigiByteTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| mit |
ondrejkajinek/pyGrim | pygrim/components/connectors/redis_conn.py | 1 | 2516 | # coding: utf8
from logging import getLogger
from string import strip as string_strip
log = getLogger("jsonrpc.connectors.redis")
def connect_redis(config, section="session:args:"):
"""
helper function for connecting to redis storage
Host server where redis is running
Port port used for connection to redis
Password password, optional
Database db number
"""
try:
import redis
except ImportError:
log.exception("Cannot import python pyckage: redis")
return
return redis.StrictRedis(
host=config.get(section + "host"),
port=config.getint(section + "port", 6379),
db=config.getint(section + "database"),
password=config.get(section + "password", None)
)
def connect_redis_sentinel(config, section="session:args:"):
try:
from redis.sentinel import Sentinel
except ImportError:
log.exception("Cannot import python pyckage: redis")
return
class SentinelWrapper(object):
def __init__(self, sentinel_conn, master_group_name):
self.sentinel_conn = sentinel_conn
self.master_group_name = master_group_name
self._discover()
def _discover(self):
self.master = self.sentinel_conn.master_for(
self.master_group_name
)
# slave nepotrebujeme ;-)
# self.slave = self.sentinel_conn.slave_for(
# self.master_group_name
# )
def __getattr__(self, attr):
return getattr(self.master, attr)
sentinel_hosts = tuple(
map(string_strip, i.split(":", 1))
for i
in config.get(section + "sentinels", "").split(",")
if i.strip()
)
sh_len = len(sentinel_hosts)
if sh_len == 0:
raise RuntimeError("No sentinel configured")
elif sh_len == 1:
raise RuntimeError("Only one sentinel configured")
elif sh_len == 2:
log.warning(
"Connecting to 2 sentinels"
" -> DANGER –> "
"for more informations read the docs"
)
sentinel_obj = Sentinel(
sentinel_hosts,
socket_timeout=config.getfloat(section + "socket_timeout", 0.1),
db=config.getint(section + "database"),
password=config.get(section + "password", None)
)
master_group_name = config.get(section + "master_group_name")
return SentinelWrapper(sentinel_obj, master_group_name)
| mit |
sup95/zulip | zerver/tests/webhooks/test_stash.py | 4 | 1113 | # -*- coding: utf-8 -*-
from six import text_type
from zerver.lib.test_helpers import WebhookTestCase
class StashHookTests(WebhookTestCase):
STREAM_NAME = 'stash'
URL_TEMPLATE = u"/api/v1/external/stash?stream={stream}"
def test_stash_message(self):
# type: () -> None
"""
Messages are generated by Stash on a `git push`.
The subject describes the repo and Stash "project". The
content describes the commits pushed.
"""
expected_subject = u"Secret project/Operation unicorn: master"
expected_message = """`f259e90` was pushed to **master** in **Secret project/Operation unicorn** with:
* `f259e90`: Updating poms ..."""
self.send_and_test_stream_message('push', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded",
**self.api_auth(self.TEST_USER_EMAIL))
def get_body(self, fixture_name):
# type: (text_type) -> text_type
return self.fixture_data("stash", fixture_name, file_type="json")
| apache-2.0 |
thiagopnts/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorservo.py | 3 | 9810 | import base64
import hashlib
import httplib
import json
import os
import subprocess
import tempfile
import threading
import traceback
import urlparse
import uuid
from collections import defaultdict
from mozprocess import ProcessHandler
from .base import (ExecutorException,
Protocol,
RefTestImplementation,
testharness_result_converter,
reftest_result_converter,
WdspecExecutor, WebDriverProtocol)
from .process import ProcessTestExecutor
from ..browsers.base import browser_command
from ..wpttest import WdspecResult, WdspecSubtestResult
from ..webdriver_server import ServoDriverServer
from .executormarionette import WdspecRun
pytestrunner = None
webdriver = None
extra_timeout = 5 # seconds
hosts_text = """127.0.0.1 web-platform.test
127.0.0.1 www.web-platform.test
127.0.0.1 www1.web-platform.test
127.0.0.1 www2.web-platform.test
127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
127.0.0.1 xn--lve-6lad.web-platform.test
"""
def make_hosts_file():
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(hosts_text)
return hosts_path
class ServoTestharnessExecutor(ProcessTestExecutor):
convert_result = testharness_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
pause_after_test=False, **kwargs):
ProcessTestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.pause_after_test = pause_after_test
self.result_data = None
self.result_flag = None
self.protocol = Protocol(self, browser)
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
ProcessTestExecutor.teardown(self)
def do_test(self, test):
self.result_data = None
self.result_flag = threading.Event()
args = [
"--hard-fail", "-u", "Servo/wptrunner",
"-Z", "replace-surrogates", "-z", self.test_url(test),
]
for stylesheet in self.browser.user_stylesheets:
args += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
args += ["--pref", "%s=%s" % (pref, value)]
if self.browser.ca_certificate_path:
args += ["--certificate-path", self.browser.ca_certificate_path]
args += self.browser.binary_args
debug_args, command = browser_command(self.binary, args, self.debug_info)
self.command = command
if self.pause_after_test:
self.command.remove("-z")
self.command = debug_args + self.command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
onFinish=self.on_finish,
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
try:
timeout = test.timeout * self.timeout_multiplier
# Now wait to get the output we expect, or until we reach the timeout
if not self.interactive and not self.pause_after_test:
wait_timeout = timeout + 5
self.result_flag.wait(wait_timeout)
else:
wait_timeout = None
self.proc.wait()
proc_is_running = True
if self.result_flag.is_set():
if self.result_data is not None:
result = self.convert_result(test, self.result_data)
else:
self.proc.wait()
result = (test.result_cls("CRASH", None), [])
proc_is_running = False
else:
result = (test.result_cls("TIMEOUT", None), [])
if proc_is_running:
if self.pause_after_test:
self.logger.info("Pausing until the browser exits")
self.proc.wait()
else:
self.proc.kill()
except KeyboardInterrupt:
self.proc.kill()
raise
return result
def on_output(self, line):
prefix = "ALERT: RESULT: "
line = line.decode("utf8", "replace")
if line.startswith(prefix):
self.result_data = json.loads(line[len(prefix):])
self.result_flag.set()
else:
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
def on_finish(self):
self.result_flag.set()
class TempFilename(object):
def __init__(self, directory):
self.directory = directory
self.path = None
def __enter__(self):
self.path = os.path.join(self.directory, str(uuid.uuid4()))
return self.path
def __exit__(self, *args, **kwargs):
try:
os.unlink(self.path)
except OSError:
pass
class ServoRefTestExecutor(ProcessTestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
screenshot_cache=None, debug_info=None, pause_after_test=False,
**kwargs):
ProcessTestExecutor.__init__(self,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = Protocol(self, browser)
self.screenshot_cache = screenshot_cache
self.implementation = RefTestImplementation(self)
self.tempdir = tempfile.mkdtemp()
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
os.rmdir(self.tempdir)
ProcessTestExecutor.teardown(self)
def screenshot(self, test, viewport_size, dpi):
full_url = self.test_url(test)
with TempFilename(self.tempdir) as output_path:
debug_args, command = browser_command(
self.binary,
[
"--hard-fail", "--exit",
"-u", "Servo/wptrunner",
"-Z", "disable-text-aa,load-webfonts-synchronously,replace-surrogates",
"--output=%s" % output_path, full_url
] + self.browser.binary_args,
self.debug_info)
for stylesheet in self.browser.user_stylesheets:
command += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
command += ["--pref", "%s=%s" % (pref, value)]
command += ["--resolution", viewport_size or "800x600"]
if self.browser.ca_certificate_path:
command += ["--certificate-path", self.browser.ca_certificate_path]
if dpi:
command += ["--device-pixel-ratio", dpi]
# Run ref tests in headless mode
command += ["-z"]
self.command = debug_args + command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env)
try:
self.proc.run()
timeout = test.timeout * self.timeout_multiplier + 5
rv = self.proc.wait(timeout=timeout)
except KeyboardInterrupt:
self.proc.kill()
raise
else:
self.proc = subprocess.Popen(self.command,
env=env)
try:
rv = self.proc.wait()
except KeyboardInterrupt:
self.proc.kill()
raise
if rv is None:
self.proc.kill()
return False, ("EXTERNAL-TIMEOUT", None)
if rv != 0 or not os.path.exists(output_path):
return False, ("CRASH", None)
with open(output_path) as f:
# Might need to strip variable headers or something here
data = f.read()
return True, base64.b64encode(data)
def do_test(self, test):
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def on_output(self, line):
line = line.decode("utf8", "replace")
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
class ServoDriverProtocol(WebDriverProtocol):
server_cls = ServoDriverServer
class ServoWdspecExecutor(WdspecExecutor):
protocol_cls = ServoDriverProtocol
| mpl-2.0 |
jaruba/chromium.src | chrome/common/extensions/docs/server2/table_of_contents_renderer.py | 98 | 2000 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from extensions_paths import PRIVATE_TEMPLATES
from file_system import FileNotFoundError
class TableOfContentsRenderer(object):
'''Renders a table of contents pulled from a list of DocumentSections
returned from document_parser.ParseDocument.
This performs key functionality of DocumentRenderer, pulled into its own
class for testability.
'''
def __init__(self,
host_file_system,
compiled_fs_factory,
template_renderer):
self._templates = compiled_fs_factory.ForTemplates(host_file_system)
self._template_renderer = template_renderer
def Render(self, sections):
'''Renders a list of DocumentSections |sections| and returns a tuple
(text, warnings).
'''
path = '%stable_of_contents.html' % PRIVATE_TEMPLATES
try:
table_of_contents_template = self._templates.GetFromFile(path).Get()
except FileNotFoundError:
return '', ['%s not found' % path]
def make_toc_items(entries):
return [{
'attributes': [{'key': key, 'value': val}
for key, val in entry.attributes.iteritems()
if key != 'id'],
'link': entry.attributes.get('id', ''),
'subheadings': make_toc_items(entry.entries),
'title': entry.name,
} for entry in entries]
toc_items = []
for section in sections:
items_for_section = make_toc_items(section.structure)
if toc_items and items_for_section:
items_for_section[0]['separator'] = True
toc_items.extend(items_for_section)
return self._template_renderer.Render(
self._templates.GetFromFile(
'%stable_of_contents.html' % PRIVATE_TEMPLATES).Get(),
None, # no request
data_sources=('partials'),
additional_context={'items': toc_items})
| bsd-3-clause |
minrk/dask | dask/array/tests/test_array_core.py | 1 | 28628 | from __future__ import absolute_import, division, print_function
from operator import add
from toolz import merge
from toolz.curried import identity
import dask
import dask.array as da
from dask.array.core import *
from dask.utils import raises, ignoring, tmpfile
inc = lambda x: x + 1
def test_getem():
assert getem('X', (2, 3), shape=(4, 6)) == \
{('X', 0, 0): (getarray, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getarray, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getarray, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getarray, 'X', (slice(0, 2), slice(3, 6)))}
def test_top():
assert top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) == \
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
assert top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
assert top(identity, 'z', '', 'x', 'ij', numblocks={'x': (2, 2)}) ==\
{('z',): (identity, [[('x', 0, 0), ('x', 0, 1)],
[('x', 1, 0), ('x', 1, 1)]])}
def test_top_supports_broadcasting_rules():
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (1, 2), 'y': (2, 1)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 0)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 0))}
def test_rec_concatenate():
x = np.array([1, 2])
assert rec_concatenate([[x, x, x], [x, x, x]]).shape == (2, 6)
x = np.array([[1, 2]])
assert rec_concatenate([[x, x, x], [x, x, x]]).shape == (2, 6)
def eq(a, b):
if isinstance(a, Array):
adt = a._dtype
a = a.compute(get=dask.get)
else:
adt = getattr(a, 'dtype', None)
if isinstance(b, Array):
bdt = b._dtype
b = b.compute(get=dask.get)
else:
bdt = getattr(b, 'dtype', None)
if not str(adt) == str(bdt):
return False
try:
return np.allclose(a, b)
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
return c.all()
else:
return c
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
d = {'x': x, 'o': o}
getx = getem('x', (5, 5), shape=(20, 20))
geto = getem('o', (5, 5), shape=(20, 20))
result = top(dotmany, 'out', 'ik', 'x', 'ij', 'o', 'jk',
numblocks={'x': (4, 4), 'o': (4, 4)})
dsk = merge(d, getx, geto, result)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(np.dot(x, o), rec_concatenate(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
d = {'x': x}
getx = getem('x', (5, 5), shape=(20, 20))
f = lambda x: x.T + 1
comp = top(f, 'out', 'ij', 'x', 'ji', numblocks={'x': (4, 4)})
dsk = merge(d, getx, comp)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert eq(rec_concatenate(out), x.T + 1)
def test_transpose():
x = np.arange(240).reshape((4, 6, 10))
d = da.from_array(x, (2, 3, 4))
assert eq(d.transpose((2, 0, 1)),
x.transpose((2, 0, 1)))
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [('x', 'i')]
numblocks = {'x': ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {'i': (1,)}
def test_broadcast_dimensions():
argpairs = [('x', 'ij'), ('y', 'ij')]
d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {'i': 'Hello', 'j': (2, 3)}
def test_Array():
shape = (1000, 1000)
chunks = (100, 100)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, chunks, shape=shape))
a = Array(dsk, name, chunks, shape=shape)
assert a.numblocks == (10, 10)
assert a._keys() == [[('x', i, j) for j in range(10)]
for i in range(10)]
assert a.chunks == ((100,) * 10, (100,) * 10)
assert a.shape == shape
assert len(a) == shape[0]
def test_uneven_chunks():
a = Array({}, 'x', chunks=(3, 3), shape=(10, 10))
assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
shape = (100, 10)
chunks = (10, 10)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, shape=shape, chunks=chunks))
a = Array(dsk, name, chunks, shape=shape)
assert set(concat(a._keys())) == set([('x', i, 0) for i in range(100//10)])
def test_keys():
dsk = dict((('x', i, j), ()) for i in range(5) for j in range(6))
dx = Array(dsk, 'x', chunks=(10, 10), shape=(50, 60))
assert dx._keys() == [[(dx.name, i, j) for j in range(6)]
for i in range(5)]
d = Array({}, 'x', (), shape=())
assert d._keys() == [('x',)]
def test_Array_computation():
a = Array({('x', 0, 0): np.eye(3)}, 'x', shape=(3, 3), chunks=(3, 3))
assert eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
def test_stack():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
s = stack([a, b, c], axis=0)
colon = slice(None, None, None)
assert s.shape == (3, 4, 6)
assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))
assert s.dask[(s.name, 0, 1, 0)] == (getarray, ('A', 1, 0),
(None, colon, colon))
assert s.dask[(s.name, 2, 1, 0)] == (getarray, ('C', 1, 0),
(None, colon, colon))
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))
assert s2.dask[(s2.name, 0, 1, 0)] == (getarray, ('B', 0, 0),
(colon, None, colon))
assert s2.dask[(s2.name, 1, 1, 0)] == (getarray, ('B', 1, 0),
(colon, None, colon))
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))
assert s2.dask[(s2.name, 0, 1, 0)] == (getarray, ('A', 0, 1),
(colon, colon, None))
assert s2.dask[(s2.name, 1, 1, 2)] == (getarray, ('C', 1, 1),
(colon, colon, None))
assert raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).chunks == \
stack([a, b, c], axis=2).chunks
def test_short_stack():
x = np.array([1])
d = da.from_array(x, chunks=(1,))
s = da.stack([d])
assert s.shape == (1, 1)
assert get(s.dask, s._keys())[0][0].shape == (1, 1)
def test_concatenate():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, shape=(4, 6), chunks=(2, 3))
for name in 'ABC']
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ('A', 0, 1)
assert x.dask[(x.name, 5, 0)] == ('C', 1, 0)
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ('A', 1, 0)
assert y.dask[(y.name, 1, 5)] == ('C', 1, 1)
assert set(b.dask.keys()).issubset(y.dask.keys())
assert concatenate([a, b, c], axis=-1).chunks == \
concatenate([a, b, c], axis=1).chunks
assert raises(ValueError, lambda: concatenate([a, b, c], axis=2))
def test_take():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert eq(np.take(x, 3, axis=0), take(a, 3, axis=0))
assert eq(np.take(x, [3, 4, 5], axis=-1), take(a, [3, 4, 5], axis=-1))
assert raises(ValueError, lambda: take(a, 3, axis=2))
def test_binops():
a = Array(dict((('a', i), '') for i in range(3)),
'a', chunks=((10, 10, 10),))
b = Array(dict((('b', i), '') for i in range(3)),
'b', chunks=((10, 10, 10),))
result = elemwise(add, a, b, name='c')
assert result.dask == merge(a.dask, b.dask,
dict((('c', i), (add, ('a', i), ('b', i)))
for i in range(3)))
result = elemwise(pow, a, 2, name='c')
assert result.dask[('c', 0)][1] == ('a', 0)
f = result.dask[('c', 0)][0]
assert f(10) == 100
def test_isnull():
x = np.array([1, np.nan])
a = from_array(x, chunks=(2,))
with ignoring(ImportError):
assert eq(isnull(a), np.isnan(x))
assert eq(notnull(a), ~np.isnan(x))
def test_isclose():
x = np.array([0, np.nan, 1, 1.5])
y = np.array([1e-9, np.nan, 1, 2])
a = from_array(x, chunks=(2,))
b = from_array(y, chunks=(2,))
assert eq(da.isclose(a, b, equal_nan=True),
np.isclose(x, y, equal_nan=True))
def test_elemwise_on_scalars():
x = np.arange(10)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 2
assert eq(a.sum()**2, x.sum()**2)
x = np.arange(11)
a = from_array(x, chunks=(5,))
assert len(a._keys()) == 3
assert eq(a, x)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, chunks=(5,))
b = from_array(y, chunks=(5, 1))
c = a + 1
assert eq(c, x + 1)
c = a + b
assert eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b)**2 > 5
assert eq(expr, (3 / x * y)**2 > 5)
c = exp(a)
assert eq(c, np.exp(x))
assert eq(abs(-a), a)
assert eq(a, +x)
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[('a', 'i4'), ('b', 'f4')])
y = from_array(x, chunks=(1,))
assert eq(y['a'], x['a'])
assert eq(y[['b', 'a']], x[['b', 'a']])
def test_reductions():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(7, 7))
assert eq(a.sum(), x.sum())
assert eq(a.sum(axis=1), x.sum(axis=1))
assert eq(a.sum(axis=1, keepdims=True), x.sum(axis=1, keepdims=True))
assert eq(a.mean(), x.mean())
assert eq(a.var(axis=(1, 0)), x.var(axis=(1, 0)))
b = a.sum(keepdims=True)
assert b._keys() == [[(b.name, 0, 0)]]
assert eq(a.std(axis=0, keepdims=True), x.std(axis=0, keepdims=True))
def test_tensordot():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert eq(tensordot(a, b, axes=1), np.tensordot(x, y, axes=1))
assert eq(tensordot(a, b, axes=(1, 0)), np.tensordot(x, y, axes=(1, 0)))
# assert (tensordot(a, a).chunks
# == tensordot(a, a, axes=((1, 0), (0, 1))).chunks)
# assert eq(tensordot(a, a), np.tensordot(x, x))
def test_dot_method():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
y = np.arange(200).reshape((20, 10))
b = from_array(y, chunks=(5, 5))
assert eq(a.dot(b), x.dot(y))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert eq(x.T, a.T)
def test_norm():
a = np.arange(200, dtype='f8').reshape((20, 10))
b = from_array(a, chunks=(5, 5))
assert eq(b.vnorm(), np.linalg.norm(a))
assert eq(b.vnorm(ord=1), np.linalg.norm(a.flatten(), ord=1))
assert eq(b.vnorm(ord=4, axis=0), np.linalg.norm(a, ord=4, axis=0))
assert b.vnorm(ord=4, axis=0, keepdims=True).ndim == b.ndim
def test_choose():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
assert eq(choose(d > 5, [0, d]), np.choose(x > 5, [0, x]))
assert eq(choose(d > 5, [-d, d]), np.choose(x > 5, [-x, x]))
def test_where():
x = np.random.randint(10, size=(15, 16))
d = from_array(x, chunks=(4, 5))
y = np.random.randint(10, size=15)
e = from_array(y, chunks=(4,))
assert eq(where(d > 5, d, 0), np.where(x > 5, x, 0))
assert eq(where(d > 5, d, -e[:, None]), np.where(x > 5, x, -y[:, None]))
def test_coarsen():
x = np.random.randint(10, size=(24, 24))
d = from_array(x, chunks=(4, 8))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(np.sum, d, {0: 2, 1: 4}))
assert eq(chunk.coarsen(np.sum, x, {0: 2, 1: 4}),
coarsen(da.sum, d, {0: 2, 1: 4}))
def test_insert():
x = np.random.randint(10, size=(10, 10))
a = from_array(x, chunks=(5, 5))
y = np.random.randint(10, size=(5, 10))
b = from_array(y, chunks=(4, 4))
assert eq(np.insert(x, 0, -1, axis=0), insert(a, 0, -1, axis=0))
assert eq(np.insert(x, 3, -1, axis=-1), insert(a, 3, -1, axis=-1))
assert eq(np.insert(x, 5, -1, axis=1), insert(a, 5, -1, axis=1))
assert eq(np.insert(x, -1, -1, axis=-2), insert(a, -1, -1, axis=-2))
assert eq(np.insert(x, [2, 3, 3], -1, axis=1),
insert(a, [2, 3, 3], -1, axis=1))
assert eq(np.insert(x, [2, 3, 8, 8, -2, -2], -1, axis=0),
insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0))
assert eq(np.insert(x, slice(1, 4), -1, axis=1),
insert(a, slice(1, 4), -1, axis=1))
assert eq(np.insert(x, [2] * 3 + [5] * 2, y, axis=0),
insert(a, [2] * 3 + [5] * 2, b, axis=0))
assert eq(np.insert(x, 0, y[0], axis=1),
insert(a, 0, b[0], axis=1))
assert raises(NotImplementedError, lambda: insert(a, [4, 2], -1, axis=0))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=2))
assert raises(IndexError, lambda: insert(a, [3], -1, axis=-3))
def test_multi_insert():
z = np.random.randint(10, size=(1, 2))
c = from_array(z, chunks=(1, 2))
assert eq(np.insert(np.insert(z, [0, 1], -1, axis=0), [1], -1, axis=1),
insert(insert(c, [0, 1], -1, axis=0), [1], -1, axis=1))
def test_broadcast_to():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape in [(5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
assert eq(chunk.broadcast_to(x, shape),
broadcast_to(a, shape))
assert raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))
assert raises(ValueError, lambda: broadcast_to(a, (3,)))
def test_full():
d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))
assert d.chunks == ((2, 1), (2, 2))
assert eq(d, np.full((3, 4), 2))
def test_map_blocks():
inc = lambda x: x + 1
x = np.arange(400).reshape((20, 20))
d = from_array(x, chunks=(7, 7))
e = d.map_blocks(inc, dtype=d.dtype)
assert d.chunks == e.chunks
assert eq(e, x + 1)
d = from_array(x, chunks=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)
assert e.chunks == ((5, 5), (5, 5))
assert eq(e, x[::2, ::2])
d = from_array(x, chunks=(8, 8))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)),
dtype=d.dtype)
assert eq(e, x[::2, ::2])
def test_map_blocks2():
x = np.arange(10, dtype='i8')
d = from_array(x, chunks=(2,))
def func(block, block_id=None):
return np.ones_like(block) * sum(block_id)
d = d.map_blocks(func, dtype='i8')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype='i8')
assert eq(d, expected)
def test_fromfunction():
def f(x, y):
return x + y
d = fromfunction(f, shape=(5, 5), chunks=(2, 2), dtype='f8')
assert eq(d, np.fromfunction(f, shape=(5, 5)))
def test_from_function_requires_block_args():
x = np.arange(10)
assert raises(Exception, lambda: from_array(x))
def test_repr():
d = da.ones((4, 4), chunks=(2, 2))
assert d.name in repr(d)
assert str(d.shape) in repr(d)
assert str(d.chunks) in repr(d)
assert str(d._dtype) in repr(d)
d = da.ones((4000, 4), chunks=(4, 2))
assert len(str(d)) < 1000
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, chunks=((2, 2, 2, 2)))
assert eq(d[..., 1], x[..., 1])
assert eq(d[0, ..., 1], x[0, ..., 1])
def test_slicing_with_ndarray():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=((4, 4)))
assert eq(d[np.arange(8)], x)
assert eq(d[np.ones(8, dtype=bool)], x)
def test_dtype():
d = da.ones((4, 4), chunks=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
def test_blockdims_from_blockshape():
assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))
assert raises(TypeError, lambda: blockdims_from_blockshape((10,), None))
def test_compute():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
A, B = compute(a, b)
assert eq(A, d + 1)
assert eq(B, d + 2)
A, = compute(a)
assert eq(A, d + 1)
def test_coerce():
d = da.from_array(np.array([1]), chunks=(1,))
with dask.set_options(get=dask.get):
assert bool(d)
assert int(d)
assert float(d)
assert complex(d)
def test_store():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
store([a, b], [at, bt])
assert (at == 2).all()
assert (bt == 3).all()
assert raises(ValueError, lambda: store([a], [at, bt]))
assert raises(ValueError, lambda: store(at, at))
assert raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_to_hdf5():
try:
import h5py
except ImportError:
return
x = da.ones((4, 4), chunks=(2, 2))
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x')
with h5py.File(fn) as f:
d = f['/x']
assert eq(d[:], x)
assert d.chunks == (2, 2)
def test_np_array_with_zero_dimensions():
d = da.ones((4, 4), chunks=(2, 2))
assert eq(np.array(d.sum()), np.array(d.compute().sum()))
def test_unique():
x = np.array([1, 2, 4, 4, 5, 2])
d = da.from_array(x, chunks=(3,))
assert eq(da.unique(d), np.unique(x))
def test_dtype_complex():
x = np.arange(24).reshape((4, 6)).astype('f4')
y = np.arange(24).reshape((4, 6)).astype('i8')
z = np.arange(24).reshape((4, 6)).astype('i2')
a = da.from_array(x, chunks=(2, 3))
b = da.from_array(y, chunks=(2, 3))
c = da.from_array(z, chunks=(2, 3))
def eq(a, b):
return (isinstance(a, np.dtype) and
isinstance(b, np.dtype) and
str(a) == str(b))
assert eq(a._dtype, x.dtype)
assert eq(b._dtype, y.dtype)
assert eq((a + 1)._dtype, (x + 1).dtype)
assert eq((a + b)._dtype, (x + y).dtype)
assert eq(a.T._dtype, x.T.dtype)
assert eq(a[:3]._dtype, x[:3].dtype)
assert eq((a.dot(b.T))._dtype, (x.dot(y.T)).dtype)
assert eq(stack([a, b])._dtype, np.vstack([x, y]).dtype)
assert eq(concatenate([a, b])._dtype, np.concatenate([x, y]).dtype)
assert eq(b.std()._dtype, y.std().dtype)
assert eq(c.sum()._dtype, z.sum().dtype)
assert eq(a.min()._dtype, a.min().dtype)
assert eq(b.std()._dtype, b.std().dtype)
assert eq(a.argmin(axis=0)._dtype, a.argmin(axis=0).dtype)
assert eq(da.sin(z)._dtype, np.sin(c).dtype)
assert eq(da.exp(b)._dtype, np.exp(y).dtype)
assert eq(da.floor(a)._dtype, np.floor(x).dtype)
assert eq(da.isnan(b)._dtype, np.isnan(y).dtype)
with ignoring(ImportError):
assert da.isnull(b)._dtype == 'bool'
assert da.notnull(b)._dtype == 'bool'
x = np.array([('a', 1)], dtype=[('text', 'S1'), ('numbers', 'i4')])
d = da.from_array(x, chunks=(1,))
assert eq(d['text']._dtype, x['text'].dtype)
assert eq(d[['numbers', 'text']]._dtype, x[['numbers', 'text']].dtype)
def test_astype():
x = np.ones(5, dtype='f4')
d = da.from_array(x, chunks=(2,))
assert d.astype('i8')._dtype == 'i8'
assert eq(d.astype('i8'), x.astype('i8'))
def test_arithmetic():
x = np.arange(5).astype('f4') + 2
y = np.arange(5).astype('i8') + 2
z = np.arange(5).astype('i4') + 2
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
c = da.from_array(z, chunks=(2,))
assert eq(a + b, x + y)
assert eq(a * b, x * y)
assert eq(a - b, x - y)
assert eq(a / b, x / y)
assert eq(b & b, y & y)
assert eq(b | b, y | y)
assert eq(b ^ b, y ^ y)
assert eq(a // b, x // y)
assert eq(a ** b, x ** y)
assert eq(a % b, x % y)
assert eq(a > b, x > y)
assert eq(a < b, x < y)
assert eq(a >= b, x >= y)
assert eq(a <= b, x <= y)
assert eq(a == b, x == y)
assert eq(a != b, x != y)
assert eq(a + 2, x + 2)
assert eq(a * 2, x * 2)
assert eq(a - 2, x - 2)
assert eq(a / 2, x / 2)
assert eq(b & True, y & True)
assert eq(b | True, y | True)
assert eq(b ^ True, y ^ True)
assert eq(a // 2, x // 2)
assert eq(a ** 2, x ** 2)
assert eq(a % 2, x % 2)
assert eq(a > 2, x > 2)
assert eq(a < 2, x < 2)
assert eq(a >= 2, x >= 2)
assert eq(a <= 2, x <= 2)
assert eq(a == 2, x == 2)
assert eq(a != 2, x != 2)
assert eq(2 + b, 2 + y)
assert eq(2 * b, 2 * y)
assert eq(2 - b, 2 - y)
assert eq(2 / b, 2 / y)
assert eq(True & b, True & y)
assert eq(True | b, True | y)
assert eq(True ^ b, True ^ y)
assert eq(2 // b, 2 // y)
assert eq(2 ** b, 2 ** y)
assert eq(2 % b, 2 % y)
assert eq(2 > b, 2 > y)
assert eq(2 < b, 2 < y)
assert eq(2 >= b, 2 >= y)
assert eq(2 <= b, 2 <= y)
assert eq(2 == b, 2 == y)
assert eq(2 != b, 2 != y)
assert eq(-a, -x)
assert eq(abs(a), abs(x))
assert eq(~(a == b), ~(x == y))
assert eq(~(a == b), ~(x == y))
assert eq(da.logaddexp(a, b), np.logaddexp(x, y))
assert eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
assert eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
assert eq(da.exp(b), np.exp(y))
assert eq(da.log(a), np.log(x))
assert eq(da.log10(a), np.log10(x))
assert eq(da.log1p(a), np.log1p(x))
assert eq(da.expm1(b), np.expm1(y))
assert eq(da.sqrt(a), np.sqrt(x))
assert eq(da.square(a), np.square(x))
assert eq(da.sin(a), np.sin(x))
assert eq(da.cos(b), np.cos(y))
assert eq(da.tan(a), np.tan(x))
assert eq(da.arcsin(b/10), np.arcsin(y/10))
assert eq(da.arccos(b/10), np.arccos(y/10))
assert eq(da.arctan(b/10), np.arctan(y/10))
assert eq(da.arctan2(b*10, a), np.arctan2(y*10, x))
assert eq(da.hypot(b, a), np.hypot(y, x))
assert eq(da.sinh(a), np.sinh(x))
assert eq(da.cosh(b), np.cosh(y))
assert eq(da.tanh(a), np.tanh(x))
assert eq(da.arcsinh(b*10), np.arcsinh(y*10))
assert eq(da.arccosh(b*10), np.arccosh(y*10))
assert eq(da.arctanh(b/10), np.arctanh(y/10))
assert eq(da.deg2rad(a), np.deg2rad(x))
assert eq(da.rad2deg(a), np.rad2deg(x))
assert eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
assert eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
assert eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
assert eq(da.logical_not(a < 1), np.logical_not(x < 1))
assert eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
assert eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
assert eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
assert eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))
assert eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
assert eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
assert eq(da.isfinite(a), np.isfinite(x))
assert eq(da.isinf(a), np.isinf(x))
assert eq(da.isnan(a), np.isnan(x))
assert eq(da.signbit(a - 3), np.signbit(x - 3))
assert eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
assert eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
assert eq(da.ldexp(c, c), np.ldexp(z, z))
assert eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
assert eq(da.floor(a * 0.5), np.floor(x * 0.5))
assert eq(da.ceil(a), np.ceil(x))
assert eq(da.trunc(a / 2), np.trunc(x / 2))
assert eq(da.degrees(b), np.degrees(y))
assert eq(da.radians(a), np.radians(x))
assert eq(da.rint(a + 0.3), np.rint(x + 0.3))
assert eq(da.fix(a - 2.5), np.fix(x - 2.5))
assert eq(da.angle(a + 1j), np.angle(x + 1j))
assert eq(da.real(a + 1j), np.real(x + 1j))
assert eq(da.imag(a + 1j), np.imag(x + 1j))
assert eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
assert eq(da.fabs(b), np.fabs(y))
assert eq(da.sign(b - 2), np.fabs(y - 2))
l1, l2 = da.frexp(a)
r1, r2 = np.frexp(x)
assert eq(l1, r1)
assert eq(l2, r2)
l1, l2 = da.modf(a)
r1, r2 = np.modf(x)
assert eq(l1, r1)
assert eq(l2, r2)
assert eq(da.around(a, -1), np.around(x, -1))
def test_reductions():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
assert eq(da.all(a), np.all(x))
assert eq(da.any(a), np.any(x))
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.max(a), np.max(x))
assert eq(da.mean(a), np.mean(x))
assert eq(da.min(a), np.min(x))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
assert eq(da.nanmax(a), np.nanmax(x))
assert eq(da.nanmin(a), np.nanmin(x))
assert eq(da.nansum(a), np.nansum(x))
assert eq(da.nanvar(a), np.nanvar(x))
assert eq(da.nanstd(a), np.nanstd(x))
def test_optimize():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
expr = a[1:4] + 1
result = optimize(expr.dask, expr._keys())
assert isinstance(result, dict)
assert all(key in result for key in expr._keys())
def test_slicing_with_non_ndarrays():
class ARangeSlice(object):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __array__(self):
return np.arange(self.start, self.stop)
class ARangeSlicable(object):
dtype = 'i8'
def __init__(self, n):
self.n = n
@property
def shape(self):
return (self.n,)
def __getitem__(self, key):
return ARangeSlice(key[0].start, key[0].stop)
x = da.from_array(ARangeSlicable(10), chunks=(4,))
assert eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())
def test_getarray():
assert type(getarray(np.matrix([[1]]), 0)) == np.ndarray
def test_squeeze():
x = da.ones((10, 1), chunks=(3, 1))
assert eq(x.squeeze(), x.compute().squeeze())
assert x.squeeze().chunks == ((3, 3, 3, 1),)
def test_size():
x = da.ones((10, 2), chunks=(3, 1))
assert x.size == np.array(x).size
def test_nbytes():
x = da.ones((10, 2), chunks=(3, 1))
assert x.nbytes == np.array(x).nbytes
def test_Array_normalizes_dtype():
x = da.ones((3,), chunks=(1,), dtype=int)
assert isinstance(x.dtype, np.dtype)
def test_args():
x = da.ones((10, 2), chunks=(3, 1), dtype='i4') + 1
y = Array(*x._args)
assert eq(x, y)
def test_from_array_with_lock():
x = np.arange(10)
d = da.from_array(x, chunks=5, lock=True)
tasks = [v for k, v in d.dask.items() if k[0] == d.name]
assert isinstance(tasks[0][3], type(Lock()))
assert len(set(task[3] for task in tasks)) == 1
assert eq(d, x)
lock = Lock()
e = da.from_array(x, chunks=5, lock=lock)
f = da.from_array(x, chunks=5, lock=lock)
assert eq(e + f, x + x)
| bsd-3-clause |
rbrito/pkg-youtube-dl | youtube_dl/extractor/dotsub.py | 51 | 3079 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
)
class DotsubIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://dotsub.com/view/9c63db2a-fa95-4838-8e6e-13deafe47f09',
'md5': '21c7ff600f545358134fea762a6d42b6',
'info_dict': {
'id': '9c63db2a-fa95-4838-8e6e-13deafe47f09',
'ext': 'flv',
'title': 'MOTIVATION - "It\'s Possible" Best Inspirational Video Ever',
'description': 'md5:41af1e273edbbdfe4e216a78b9d34ac6',
'thumbnail': 're:^https?://dotsub.com/media/9c63db2a-fa95-4838-8e6e-13deafe47f09/p',
'duration': 198,
'uploader': 'liuxt',
'timestamp': 1385778501.104,
'upload_date': '20131130',
'view_count': int,
}
}, {
'url': 'https://dotsub.com/view/747bcf58-bd59-45b7-8c8c-ac312d084ee6',
'md5': '2bb4a83896434d5c26be868c609429a3',
'info_dict': {
'id': '168006778',
'ext': 'mp4',
'title': 'Apartments and flats in Raipur the white symphony',
'description': 'md5:784d0639e6b7d1bc29530878508e38fe',
'thumbnail': 're:^https?://dotsub.com/media/747bcf58-bd59-45b7-8c8c-ac312d084ee6/p',
'duration': 290,
'timestamp': 1476767794.2809999,
'upload_date': '20161018',
'uploader': 'parthivi001',
'uploader_id': 'user52596202',
'view_count': int,
},
'add_ie': ['Vimeo'],
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'https://dotsub.com/api/media/%s/metadata' % video_id, video_id)
video_url = info.get('mediaURI')
if not video_url:
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
[r'<source[^>]+src="([^"]+)"', r'"file"\s*:\s*\'([^\']+)'],
webpage, 'video url', default=None)
info_dict = {
'id': video_id,
'url': video_url,
'ext': 'flv',
}
if not video_url:
setup_data = self._parse_json(self._html_search_regex(
r'(?s)data-setup=([\'"])(?P<content>(?!\1).+?)\1',
webpage, 'setup data', group='content'), video_id)
info_dict = {
'_type': 'url_transparent',
'url': setup_data['src'],
}
info_dict.update({
'title': info['title'],
'description': info.get('description'),
'thumbnail': info.get('screenshotURI'),
'duration': int_or_none(info.get('duration'), 1000),
'uploader': info.get('user'),
'timestamp': float_or_none(info.get('dateCreated'), 1000),
'view_count': int_or_none(info.get('numberOfViews')),
})
return info_dict
| unlicense |
oberstet/autobahn-python | examples/twisted/websocket/wxpython/client.py | 3 | 5425 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import wx
import json
from pprint import pprint
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
class MyFrame(wx.Frame):
"""
Our UI frame to show.
This is taken from http://www.wxpython.org/test7.py.html and modified
for sending events via WebSocket.
"""
def __init__(self, app):
# First, call the base class' __init__ method to create the frame
wx.Frame.__init__(self, None, -1, "wxPython/Autobahn WebSocket Demo")
self._app = app
# Associate some events with methods of this class
self.Bind(wx.EVT_MOVE, self.OnMove)
# Add a panel and some controls to display the size and position
panel = wx.Panel(self, -1)
label1 = wx.StaticText(panel, -1, "WebSocket messages received:")
label2 = wx.StaticText(panel, -1, "Window position:")
self.sizeCtrl = wx.TextCtrl(panel, -1, "", style=wx.TE_READONLY)
self.posCtrl = wx.TextCtrl(panel, -1, "", style=wx.TE_READONLY)
self.panel = panel
# Use some sizers for layout of the widgets
sizer = wx.FlexGridSizer(2, 2, 5, 5)
sizer.Add(label1)
sizer.Add(self.sizeCtrl)
sizer.Add(label2)
sizer.Add(self.posCtrl)
border = wx.BoxSizer()
border.Add(sizer, 0, wx.ALL, 15)
panel.SetSizerAndFit(border)
self.Fit()
# This method is called by the System when the window is moved,
# because of the association above.
def OnMove(self, event):
pos = event.GetPosition()
self.posCtrl.SetValue("%s, %s" % (pos.x, pos.y))
if self._app._factory:
proto = self._app._factory._proto
if proto:
evt = {'x': pos.x, 'y': pos.y}
msg = json.dumps(evt).encode('utf8')
proto.sendMessage(msg)
class MyClientProtocol(WebSocketClientProtocol):
"""
Our protocol for WebSocket client connections.
"""
def onOpen(self):
print("WebSocket connection open.")
# the WebSocket connection is open. we store ourselves on the
# factory object, so that we can access this protocol instance
# from wxPython, e.g. to use sendMessage() for sending WS msgs
##
self.factory._proto = self
self._received = 0
def onMessage(self, payload, isBinary):
# a WebSocket message was received. now interpret it, possibly
# accessing the wxPython app `self.factory._app` or our
# single UI frame `self.factory._app._frame`
##
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
self._received += 1
frame = self.factory._app._frame
frame.sizeCtrl.SetValue("{}".format(self._received))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
# the WebSocket connection is gone. clear the reference to ourselves
# on the factory object. when accessing this protocol instance from
# wxPython, always check if the ref is None. only use it when it's
# not None (which means, we are actually connected)
##
self.factory._proto = None
class MyClientFactory(WebSocketClientFactory):
"""
Our factory for WebSocket client connections.
"""
protocol = MyClientProtocol
def __init__(self, url, app):
WebSocketClientFactory.__init__(self, url)
self._app = app
self._proto = None
if __name__ == '__main__':
import sys
from twisted.internet import wxreactor
wxreactor.install()
from twisted.internet import reactor
from twisted.python import log
log.startLogging(sys.stdout)
app = wx.App(False)
app._factory = None
app._frame = MyFrame(app)
app._frame.Show()
reactor.registerWxApp(app)
app._factory = MyClientFactory("ws://127.0.0.1:9000", app)
reactor.connectTCP("127.0.0.1", 9000, app._factory)
reactor.run()
| mit |
algorithm-ninja/cmsocial | cmsocial/scripts/duplicate_contest.py | 1 | 5312 | # -*- coding: utf-8 -*-
import argparse
import sys
from cms.db import SessionGen, Task, Contest, Participation, User, engine, Dataset, Testcase, \
Statement, Attachment, Manager, SubmissionFormatElement
from cmsocial.db import SocialTask, SocialContest, SocialParticipation, SocialUser, Lesson, \
LessonTask
from sqlalchemy.orm.session import make_transient
from sqlalchemy import or_, Sequence
verbose = False
def apply_edit_list(obj, edit_list):
for edit in edit_list:
setattr(obj, edit[0], edit[1](obj))
def apply_filter(query, cls, filter_fun, filter_list):
query = filter_fun(query)
for col, values in filter_list.iteritems():
query = query.filter(getattr(cls, col).in_(values))
return query
cls_count = dict()
def recursive_clone(session, cls, tree, flt, edit, backedit):
cls_count[cls] = cls_count.get(cls, 0) + 1
indegree = 0
for k, v in tree.iteritems():
for info in v:
if info[0] == cls:
indegree += 1
if cls_count[cls] < indegree:
return []
objects = session.query(cls)
flt_info = flt.get(cls, (lambda x: x, dict()))
objects = apply_filter(objects, cls, flt_info[0], flt_info[1])
objects = objects.order_by(cls.id).all()
if verbose:
print "Cloning %d %s" % (len(objects), cls.__name__)
backeditable_cols = []
for k, v in backedit.iteritems():
for bked in v:
if bked[0] == cls:
backeditable_cols.append(bked[1])
old_values_tmp = dict((bc, dict()) for bc in backeditable_cols)
idmap = dict()
for obj in objects:
session.expunge(obj)
make_transient(obj)
old_id = obj.id
apply_edit_list(obj, edit.get(cls, []))
if obj.id == old_id:
obj.id = None
for bc in backeditable_cols:
old_values_tmp[bc][old_id] = getattr(obj, bc)
setattr(obj, bc, None)
session.add(obj)
session.flush()
idmap[old_id] = obj.id
old_values = dict((bc, dict()) for bc in backeditable_cols)
for bc in backeditable_cols:
for oid in old_values_tmp[bc]:
old_values[bc][idmap[oid]] = old_values_tmp[bc][oid]
backeditlist = []
for child in tree.get(cls, []):
ccls = child[0]
cflt_info = flt.get(ccls, (lambda x: x, dict()))
cflt_info[1][child[1]] = idmap.keys()
flt[ccls] = cflt_info
cedit = edit.get(ccls, [])
cedit.append((child[1], lambda x: idmap.get(getattr(x, child[1]))))
edit[ccls] = cedit
backeditlist += recursive_clone(session, ccls, tree, flt, edit, backedit)
extra_edits = []
id_to_val = dict()
for edl in backeditlist:
if edl[0] == cls:
oldv = old_values.get(edl[1])
extra_edits.append((edl[1], lambda x: edl[2][oldv[x.id]]))
if len(extra_edits) > 0:
for obj in objects:
apply_edit_list(obj, extra_edits)
session.add(obj)
session.flush()
bked = backedit.get(cls, [])
for clscol in bked:
backeditlist.append((clscol[0], clscol[1], idmap))
return backeditlist
def main():
parser = argparse.ArgumentParser(description="Clone a contest")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-r", "--for-real", action="store_true")
parser.add_argument("old_contest")
parser.add_argument("new_contest")
args, _ = parser.parse_known_args()
old_contest = args.old_contest
new_contest = args.new_contest
global verbose
verbose = args.verbose
dryrun = not args.for_real
clone_tree = {
Contest: [
(SocialContest, "id"),
(Participation, "contest_id"),
(Task, "contest_id"),
(Lesson, "contest_id")],
Participation: [(SocialParticipation, "id")],
Task: [
(Dataset, "task_id"),
(LessonTask, "task_id"),
(Statement, "task_id"),
(Attachment, "task_id"),
(SocialTask, "id"),
(SubmissionFormatElement, "task_id")
],
Dataset: [(Testcase, "dataset_id"), (Manager, "dataset_id")],
Lesson: [(LessonTask, "lesson_id")]
}
clone_filter = {
Contest: (lambda x: x, {"name": [old_contest]}),
Participation: (lambda x:
x.join(SocialParticipation).join(User).join(SocialUser)\
.filter(or_(SocialParticipation.access_level == 0, SocialUser.access_level == 0)),
dict())
}
clone_edit = {
Contest: [("name", lambda x: new_contest)],
Task: [("name", lambda x: x.name + "_dup")],
SocialTask: [("access_level", lambda x: 0)],
Lesson: [("access_level", lambda x: 0)],
SocialParticipation: [("score", lambda x: 0)]
}
clone_backedit = {Dataset: [(Task, "active_dataset_id")]}
with SessionGen() as session:
with session.no_autoflush:
recursive_clone(session, Contest, clone_tree, clone_filter, clone_edit, clone_backedit)
if dryrun:
print "Dry run requested, rolling back changes."
session.rollback()
else:
print "Everything OK, committing..."
session.commit()
| agpl-3.0 |
schlueter/ansible | lib/ansible/modules/network/nxos/nxos_vrf.py | 11 | 14549 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrf
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages global VRF configuration.
description:
- This module provides declarative management of VRFs
on CISCO NXOS network devices.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
- Trishna Guha (@trishnaguha)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Cisco NX-OS creates the default VRF by itself. Therefore,
you're not allowed to use default as I(vrf) name in this module.
- C(vrf) name must be shorter than 32 chars.
- VRF names are not case sensible in NX-OS. Anyway, the name is stored
just like it's inserted by the user and it'll not be changed again
unless the VRF is removed and re-created. i.e. C(vrf=NTC) will create
a VRF named NTC, but running it again with C(vrf=ntc) will not cause
a configuration change.
options:
name:
description:
- Name of VRF to be managed.
required: true
aliases: [vrf]
admin_state:
description:
- Administrative state of the VRF.
required: false
default: up
choices: ['up','down']
vni:
description:
- Specify virtual network identifier. Valid values are Integer
or keyword 'default'.
required: false
default: null
version_added: "2.2"
rd:
description:
- VPN Route Distinguisher (RD). Valid values are a string in
one of the route-distinguisher formats (ASN2:NN, ASN4:NN, or
IPV4:NN); the keyword 'auto', or the keyword 'default'.
required: false
default: null
version_added: "2.2"
interfaces:
description:
- List of interfaces to check the VRF has been
configured correctly.
version_added: 2.5
aggregate:
description: List of VRFs definitions.
version_added: 2.5
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
default: no
version_added: 2.5
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present','absent']
description:
description:
- Description of the VRF.
required: false
default: null
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state arguments.
default: 10
'''
EXAMPLES = '''
- name: Ensure ntc VRF exists on switch
nxos_vrf:
name: ntc
description: testing
state: present
- name: Aggregate definition of VRFs
nxos_vrf:
aggregate:
- { name: test1, description: Testing, admin_state: down }
- { name: test2, interfaces: Ethernet1/2 }
- name: Aggregate definitions of VRFs with Purge
nxos_vrf:
aggregate:
- { name: ntc1, description: purge test1 }
- { name: ntc2, description: purge test2 }
state: present
purge: yes
- name: Delete VRFs exist on switch
nxos_vrf:
aggregate:
- { name: ntc1 }
- { name: ntc2 }
state: absent
- name: Assign interfaces to VRF declaratively
nxos_vrf:
name: test1
interfaces:
- Ethernet2/3
- Ethernet2/5
- name: Ensure VRF is tagged with interface Ethernet2/5 only (Removes from Ethernet2/3)
nxos_vrf:
name: test1
interfaces:
- Ethernet2/5
- name: Delete VRF
nxos_vrf:
name: ntc
state: absent
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample:
- vrf context ntc
- no shutdown
- interface Ethernet1/2
- no switchport
- vrf member test2
'''
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.network.common.utils import remove_default_spec
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
body = run_commands(module, cmds)
return body
def get_existing_vrfs(module):
objs = list()
command = "show vrf all"
try:
body = execute_show_command(command, module)[0]
except IndexError:
return list()
try:
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (TypeError, IndexError, KeyError):
return list()
if isinstance(vrf_table, list):
for vrf in vrf_table:
obj = {}
obj['name'] = vrf['vrf_name']
objs.append(obj)
elif isinstance(vrf_table, dict):
obj = {}
obj['name'] = vrf_table['vrf_name']
objs.append(obj)
return objs
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
purge = module.params['purge']
for w in want:
name = w['name']
description = w['description']
vni = w['vni']
rd = w['rd']
admin_state = w['admin_state']
interfaces = w.get('interfaces') or []
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent' and obj_in_have:
commands.append('no vrf context {0}'.format(name))
elif state == 'present':
if not obj_in_have:
commands.append('vrf context {0}'.format(name))
if rd and rd != '':
commands.append('rd {0}'.format(rd))
if description:
commands.append('description {0}'.format(description))
if vni and vni != '':
commands.append('vni {0}'.format(vni))
if admin_state == 'up':
commands.append('no shutdown')
elif admin_state == 'down':
commands.append('shutdown')
if commands:
if vni:
if have.get('vni') and have.get('vni') != '':
commands.insert(1, 'no vni {0}'.format(have['vni']))
commands.append('exit')
if interfaces:
for i in interfaces:
commands.append('interface {0}'.format(i))
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
else:
if interfaces:
if not obj_in_have['interfaces']:
for i in interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
elif set(interfaces) != set(obj_in_have['interfaces']):
missing_interfaces = list(set(interfaces) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
superfluous_interfaces = list(set(obj_in_have['interfaces']) - set(interfaces))
for i in superfluous_interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
commands.append('no switchport')
commands.append('no vrf member {0}'.format(name))
if purge:
existing = get_existing_vrfs(module)
if existing:
for h in existing:
if h['name'] in ('default', 'management'):
pass
else:
obj_in_want = search_obj_in_list(h['name'], want)
if not obj_in_want:
commands.append('no vrf context {0}'.format(h['name']))
return commands
def validate_vrf(name, module):
if name == 'default':
module.fail_json(msg='cannot use default as name of a VRF')
elif len(name) > 32:
module.fail_json(msg='VRF name exceeded max length of 32', name=name)
else:
return name
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['name'] = validate_vrf(d['name'], module)
obj.append(d)
else:
obj.append({
'name': validate_vrf(module.params['name'], module),
'description': module.params['description'],
'vni': module.params['vni'],
'rd': module.params['rd'],
'admin_state': module.params['admin_state'],
'state': module.params['state'],
'interfaces': module.params['interfaces']
})
return obj
def get_value(arg, config, module):
extra_arg_regex = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(arg), re.M)
value = ''
if arg in config:
value = extra_arg_regex.search(config).group('value')
return value
def map_config_to_obj(want, element_spec, module):
objs = list()
for w in want:
obj = deepcopy(element_spec)
del obj['delay']
del obj['state']
command = 'show vrf {0}'.format(w['name'])
try:
body = execute_show_command(command, module)[0]
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (TypeError, IndexError):
return list()
name = vrf_table['vrf_name']
obj['name'] = name
obj['admin_state'] = vrf_table['vrf_state'].lower()
command = 'show run all | section vrf.context.{0}'.format(name)
body = execute_show_command(command, module)[0]
extra_params = ['vni', 'rd', 'description']
for param in extra_params:
obj[param] = get_value(param, body, module)
obj['interfaces'] = []
command = 'show vrf {0} interface'.format(name)
try:
body = execute_show_command(command, module)[0]
vrf_int = body['TABLE_if']['ROW_if']
except (TypeError, IndexError):
vrf_int = None
if vrf_int:
if isinstance(vrf_int, list):
for i in vrf_int:
intf = i['if_name']
obj['interfaces'].append(intf)
elif isinstance(vrf_int, dict):
intf = vrf_int['if_name']
obj['interfaces'].append(intf)
objs.append(obj)
return objs
def check_declarative_intent_params(want, element_spec, module):
if module.params['interfaces']:
time.sleep(module.params['delay'])
have = map_config_to_obj(want, element_spec, module)
for w in want:
for i in w['interfaces']:
obj_in_have = search_obj_in_list(w['name'], have)
if obj_in_have:
interfaces = obj_in_have.get('interfaces')
if interfaces is not None and i not in interfaces:
module.fail_json(msg="Interface %s not configured on vrf %s" % (i, w['name']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(aliases=['vrf']),
description=dict(),
vni=dict(type=str),
rd=dict(type=str),
admin_state=dict(default='up', choices=['up', 'down']),
interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(want, element_spec, module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands and not module.check_mode:
load_config(module, commands)
result['changed'] = True
if result['changed']:
check_declarative_intent_params(want, element_spec, module)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
PyBossa/pybossa | alembic/versions/4a571e217ab8_add_webhooks_table.py | 3 | 1030 | """Add webhooks table
Revision ID: 4a571e217ab8
Revises: 3a98a6674cb2
Create Date: 2015-08-17 16:52:28.279419
"""
# revision identifiers, used by Alembic.
revision = '4a571e217ab8'
down_revision = '3a98a6674cb2'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSON
import datetime
def make_timestamp():
now = datetime.datetime.utcnow()
return now.isoformat()
def upgrade():
op.create_table('webhook',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('created', sa.Text, default=make_timestamp),
sa.Column('updated', sa.Text, default=make_timestamp),
sa.Column('project_id', sa.Integer,
sa.ForeignKey('project.id')),
sa.Column('payload', JSON),
sa.Column('response', sa.Text),
sa.Column('response_status_code', sa.Integer)
)
def downgrade():
op.drop_table('webhook')
| agpl-3.0 |
redline-forensics/auto-dm | controllers/google_maps_ctrl.py | 1 | 1891 | import os
from controllers.google_maps_earth_ctrl import GoogleMapsEarthController
from utils.desktop_utils import open_path
class GoogleMapsController(GoogleMapsEarthController):
def __init__(self, google_maps_model):
super(GoogleMapsController, self).__init__(google_maps_model)
self.google_maps_earth_model.begin_stitching.connect(self.on_begin_image_creation)
self.google_maps_earth_model.update_progress.connect(self.on_image_creation_progress_update)
self.google_maps_earth_model.done_stitching.connect(self.on_image_created)
def init_ui(self, google_maps_view):
super(GoogleMapsController, self).init_ui(google_maps_view)
def create_image(self):
point_a, point_b = self.google_maps_earth_view.map_rectangle
self.google_maps_earth_model.create_image(point_a, point_b, self.google_maps_earth_view.interval)
def cancel_image_create(self):
self.google_maps_earth_model.cancel_image_create()
def on_begin_image_creation(self, max):
self.google_maps_earth_view.show_stitching_progress_dialog(max)
def on_image_creation_progress_update(self, progress):
self.google_maps_earth_view.update_stitching_progress_dialog(progress)
def on_image_created(self, image, tfw):
self.google_maps_earth_view.close_stitching_progress_dialog()
image_file = None
while not image_file:
image_file = self.google_maps_earth_view.show_image_save_dialog(self.google_maps_earth_model.assets_folder)
if not image_file:
discard = self.google_maps_earth_view.show_discard_confirmation_dialog()
if discard:
return
image.save(image_file)
tfw_file = os.path.splitext(image_file)[0] + ".tfw"
with open(tfw_file, "w") as f:
f.write(tfw)
open_path(image_file)
| gpl-3.0 |
susansls/zulip | zerver/templatetags/app_filters.py | 9 | 2978 | from django.conf import settings
from django.template import Library
from django.utils.safestring import mark_safe
from django.utils.lru_cache import lru_cache
from zerver.lib.utils import force_text
from typing import List
import zerver.lib.bugdown.fenced_code
import markdown
import markdown.extensions.admonition
import markdown.extensions.codehilite
import markdown.extensions.toc
import markdown_include.include
register = Library()
def and_n_others(values, limit):
# type: (List[str], int) -> str
# A helper for the commonly appended "and N other(s)" string, with
# the appropriate pluralization.
return " and %d other%s" % (len(values) - limit,
"" if len(values) == limit + 1 else "s")
@register.filter(name='display_list', is_safe=True)
def display_list(values, display_limit):
# type: (List[str], int) -> str
"""
Given a list of values, return a string nicely formatting those values,
summarizing when you have more than `display_limit`. Eg, for a
`display_limit` of 3 we get the following possible cases:
Jessica
Jessica and Waseem
Jessica, Waseem, and Tim
Jessica, Waseem, Tim, and 1 other
Jessica, Waseem, Tim, and 2 others
"""
if len(values) == 1:
# One value, show it.
display_string = "%s" % (values[0],)
elif len(values) <= display_limit:
# Fewer than `display_limit` values, show all of them.
display_string = ", ".join(
"%s" % (value,) for value in values[:-1])
display_string += " and %s" % (values[-1],)
else:
# More than `display_limit` values, only mention a few.
display_string = ", ".join(
"%s" % (value,) for value in values[:display_limit])
display_string += and_n_others(values, display_limit)
return display_string
md_extensions = None
@lru_cache(512 if settings.PRODUCTION else 0)
@register.filter(name='render_markdown_path', is_safe=True)
def render_markdown_path(markdown_file_path):
# type: (str) -> str
"""Given a path to a markdown file, return the rendered html.
Note that this assumes that any HTML in the markdown file is
trusted; it is intended to be used for documentation, not user
data."""
global md_extensions
if md_extensions is None:
md_extensions = [
markdown.extensions.toc.makeExtension(),
markdown.extensions.admonition.makeExtension(),
markdown.extensions.codehilite.makeExtension(
linenums=False,
guess_lang=False
),
zerver.lib.bugdown.fenced_code.makeExtension(),
markdown_include.include.makeExtension(base_path='templates/zerver/help/include/'),
]
md_engine = markdown.Markdown(extensions=md_extensions)
md_engine.reset()
markdown_string = force_text(open(markdown_file_path).read())
html = md_engine.convert(markdown_string)
return mark_safe(html)
| apache-2.0 |
grlee77/pywt | pywt/tests/test_multilevel.py | 3 | 39025 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import warnings
from itertools import combinations
import numpy as np
import pytest
from numpy.testing import (assert_almost_equal, assert_allclose, assert_,
assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_warns)
import pywt
# Check that float32, float64, complex64, complex128 are preserved.
# Other real types get converted to float64.
# complex256 gets converted to complex128
dtypes_in = [np.int8, np.float16, np.float32, np.float64, np.complex64,
np.complex128]
dtypes_out = [np.float64, np.float32, np.float32, np.float64, np.complex64,
np.complex128]
# tolerances used in accuracy comparisons
tol_single = 1e-6
tol_double = 1e-13
dtypes_and_tolerances = [(np.float16, tol_single), (np.float32, tol_single),
(np.float64, tol_double), (np.int8, tol_double),
(np.complex64, tol_single),
(np.complex128, tol_double)]
# test complex256 as well if it is available
try:
dtypes_in += [np.complex256, ]
dtypes_out += [np.complex128, ]
dtypes_and_tolerances += [(np.complex256, tol_double), ]
except AttributeError:
pass
# determine which wavelets to test
wavelist = pywt.wavelist()
if 'dmey' in wavelist:
# accuracy is very low for dmey, so omit it
wavelist.remove('dmey')
# removing wavelets with dwt_possible == False
del_list = []
for wavelet in wavelist:
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
if not isinstance(pywt.DiscreteContinuousWavelet(wavelet),
pywt.Wavelet):
del_list.append(wavelet)
for del_ind in del_list:
wavelist.remove(del_ind)
####
# 1d multilevel dwt tests
####
def test_wavedec():
x = [3, 7, 1, 1, -2, 5, 4, 6]
db1 = pywt.Wavelet('db1')
cA3, cD3, cD2, cD1 = pywt.wavedec(x, db1)
assert_almost_equal(cA3, [8.83883476])
assert_almost_equal(cD3, [-0.35355339])
assert_allclose(cD2, [4., -3.5])
assert_allclose(cD1, [-2.82842712, 0, -4.94974747, -1.41421356])
assert_(pywt.dwt_max_level(len(x), db1) == 3)
def test_waverec_invalid_inputs():
# input must be list or tuple
assert_raises(ValueError, pywt.waverec, np.ones(8), 'haar')
# input list cannot be empty
assert_raises(ValueError, pywt.waverec, [], 'haar')
# 'array_to_coeffs must specify 'output_format' to perform waverec
x = [3, 7, 1, 1, -2, 5, 4, 6]
coeffs = pywt.wavedec(x, 'db1')
arr, coeff_slices = pywt.coeffs_to_array(coeffs)
coeffs_from_arr = pywt.array_to_coeffs(arr, coeff_slices)
message = "Unexpected detail coefficient type"
assert_raises_regex(ValueError, message, pywt.waverec, coeffs_from_arr,
'haar')
def test_waverec_accuracies():
rstate = np.random.RandomState(1234)
x0 = rstate.randn(8)
for dt, tol in dtypes_and_tolerances:
x = x0.astype(dt)
if np.iscomplexobj(x):
x += 1j*rstate.randn(8).astype(x.real.dtype)
coeffs = pywt.wavedec(x, 'db1')
assert_allclose(pywt.waverec(coeffs, 'db1'), x, atol=tol, rtol=tol)
def test_waverec_none():
x = [3, 7, 1, 1, -2, 5, 4, 6]
coeffs = pywt.wavedec(x, 'db1')
# set some coefficients to None
coeffs[2] = None
coeffs[0] = None
assert_(pywt.waverec(coeffs, 'db1').size, len(x))
def test_waverec_odd_length():
x = [3, 7, 1, 1, -2, 5]
coeffs = pywt.wavedec(x, 'db1')
assert_allclose(pywt.waverec(coeffs, 'db1'), x, rtol=1e-12)
def test_waverec_complex():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 1j
coeffs = pywt.wavedec(x, 'db1')
assert_allclose(pywt.waverec(coeffs, 'db1'), x, rtol=1e-12)
def test_multilevel_dtypes_1d():
# only checks that the result is of the expected type
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
# wavedec, waverec
x = np.ones(8, dtype=dt_in)
errmsg = "wrong dtype returned for {0} input".format(dt_in)
coeffs = pywt.wavedec(x, wavelet, level=2)
for c in coeffs:
assert_(c.dtype == dt_out, "wavedec: " + errmsg)
x_roundtrip = pywt.waverec(coeffs, wavelet)
assert_(x_roundtrip.dtype == dt_out, "waverec: " + errmsg)
def test_waverec_all_wavelets_modes():
# test 2D case using all wavelets and modes
rstate = np.random.RandomState(1234)
r = rstate.randn(80)
for wavelet in wavelist:
for mode in pywt.Modes.modes:
coeffs = pywt.wavedec(r, wavelet, mode=mode)
assert_allclose(pywt.waverec(coeffs, wavelet, mode=mode),
r, rtol=tol_single, atol=tol_single)
####
# 2d multilevel dwt function tests
####
def test_waverec2_accuracies():
rstate = np.random.RandomState(1234)
x0 = rstate.randn(4, 4)
for dt, tol in dtypes_and_tolerances:
x = x0.astype(dt)
if np.iscomplexobj(x):
x += 1j*rstate.randn(4, 4).astype(x.real.dtype)
coeffs = pywt.wavedec2(x, 'db1')
assert_(len(coeffs) == 3)
assert_allclose(pywt.waverec2(coeffs, 'db1'), x, atol=tol, rtol=tol)
def test_multilevel_dtypes_2d():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
# wavedec2, waverec2
x = np.ones((8, 8), dtype=dt_in)
errmsg = "wrong dtype returned for {0} input".format(dt_in)
cA, coeffsD2, coeffsD1 = pywt.wavedec2(x, wavelet, level=2)
assert_(cA.dtype == dt_out, "wavedec2: " + errmsg)
for c in coeffsD1:
assert_(c.dtype == dt_out, "wavedec2: " + errmsg)
for c in coeffsD2:
assert_(c.dtype == dt_out, "wavedec2: " + errmsg)
x_roundtrip = pywt.waverec2([cA, coeffsD2, coeffsD1], wavelet)
assert_(x_roundtrip.dtype == dt_out, "waverec2: " + errmsg)
@pytest.mark.slow
def test_waverec2_all_wavelets_modes():
# test 2D case using all wavelets and modes
rstate = np.random.RandomState(1234)
r = rstate.randn(80, 96)
for wavelet in wavelist:
for mode in pywt.Modes.modes:
coeffs = pywt.wavedec2(r, wavelet, mode=mode)
assert_allclose(pywt.waverec2(coeffs, wavelet, mode=mode),
r, rtol=tol_single, atol=tol_single)
def test_wavedec2_complex():
data = np.ones((4, 4)) + 1j
coeffs = pywt.wavedec2(data, 'db1')
assert_(len(coeffs) == 3)
assert_allclose(pywt.waverec2(coeffs, 'db1'), data, rtol=1e-12)
def test_wavedec2_invalid_inputs():
# input array has too few dimensions
data = np.ones(4)
assert_raises(ValueError, pywt.wavedec2, data, 'haar')
def test_waverec2_invalid_inputs():
# input must be list or tuple
assert_raises(ValueError, pywt.waverec2, np.ones((8, 8)), 'haar')
# input list cannot be empty
assert_raises(ValueError, pywt.waverec2, [], 'haar')
# coefficients from a difference decomposition used as input
for dec_func in [pywt.wavedec, pywt.wavedecn]:
coeffs = dec_func(np.ones((8, 8)), 'haar')
message = "Unexpected detail coefficient type"
assert_raises_regex(ValueError, message, pywt.waverec2, coeffs,
'haar')
def test_waverec2_coeff_shape_mismatch():
x = np.ones((8, 8))
coeffs = pywt.wavedec2(x, 'db1')
# introduce a shape mismatch in the coefficients
coeffs = list(coeffs)
coeffs[1] = list(coeffs[1])
coeffs[1][1] = np.zeros((16, 1))
assert_raises(ValueError, pywt.waverec2, coeffs, 'db1')
def test_waverec2_odd_length():
x = np.ones((10, 6))
coeffs = pywt.wavedec2(x, 'db1')
assert_allclose(pywt.waverec2(coeffs, 'db1'), x, rtol=1e-12)
def test_waverec2_none_coeffs():
x = np.arange(24).reshape(6, 4)
coeffs = pywt.wavedec2(x, 'db1')
coeffs[1] = (None, None, None)
assert_(x.shape == pywt.waverec2(coeffs, 'db1').shape)
####
# nd multilevel dwt function tests
####
def test_waverecn():
rstate = np.random.RandomState(1234)
# test 1D through 4D cases
for nd in range(1, 5):
x = rstate.randn(*(4, )*nd)
coeffs = pywt.wavedecn(x, 'db1')
assert_(len(coeffs) == 3)
assert_allclose(pywt.waverecn(coeffs, 'db1'), x, rtol=tol_double)
def test_waverecn_empty_coeff():
coeffs = [np.ones((2, 2, 2)), {}, {}]
assert_equal(pywt.waverecn(coeffs, 'db1').shape, (8, 8, 8))
assert_equal(pywt.waverecn(coeffs, 'db1').shape, (8, 8, 8))
coeffs = [np.ones((2, 2, 2)), {}, {'daa': np.ones((4, 4, 4))}]
coeffs = [np.ones((2, 2, 2)), {}, {}, {'daa': np.ones((8, 8, 8))}]
assert_equal(pywt.waverecn(coeffs, 'db1').shape, (16, 16, 16))
def test_waverecn_invalid_coeffs():
# approximation coeffs as None and no valid detail oeffs
coeffs = [None, {}]
assert_raises(ValueError, pywt.waverecn, coeffs, 'db1')
# use of None for a coefficient value
coeffs = [np.ones((2, 2, 2)), {}, {'daa': None}, ]
assert_raises(ValueError, pywt.waverecn, coeffs, 'db1')
# invalid key names in coefficient list
coeffs = [np.ones((4, 4, 4)), {'daa': np.ones((4, 4, 4)),
'foo': np.ones((4, 4, 4))}]
assert_raises(ValueError, pywt.waverecn, coeffs, 'db1')
# mismatched key name lengths
coeffs = [np.ones((4, 4, 4)), {'daa': np.ones((4, 4, 4)),
'da': np.ones((4, 4, 4))}]
assert_raises(ValueError, pywt.waverecn, coeffs, 'db1')
# key name lengths don't match the array dimensions
coeffs = [[[[1.0]]], {'ad': [[[0.0]]], 'da': [[[0.0]]], 'dd': [[[0.0]]]}]
assert_raises(ValueError, pywt.waverecn, coeffs, 'db1')
# input list cannot be empty
assert_raises(ValueError, pywt.waverecn, [], 'haar')
def test_waverecn_invalid_inputs():
# coefficients from a difference decomposition used as input
for dec_func in [pywt.wavedec, pywt.wavedec2]:
coeffs = dec_func(np.ones((8, 8)), 'haar')
message = "Unexpected detail coefficient type"
assert_raises_regex(ValueError, message, pywt.waverecn, coeffs,
'haar')
def test_waverecn_lists():
# support coefficient arrays specified as lists instead of arrays
coeffs = [[[1.0]], {'ad': [[0.0]], 'da': [[0.0]], 'dd': [[0.0]]}]
assert_equal(pywt.waverecn(coeffs, 'db1').shape, (2, 2))
def test_waverecn_invalid_coeffs2():
# shape mismatch should raise an error
coeffs = [np.ones((4, 4, 4)), {'ada': np.ones((4, 4))}]
assert_raises(ValueError, pywt.waverecn, coeffs, 'db1')
def test_wavedecn_invalid_inputs():
# input array has too few dimensions
data = np.array(0)
assert_raises(ValueError, pywt.wavedecn, data, 'haar')
# invalid number of levels
data = np.ones(16)
assert_raises(ValueError, pywt.wavedecn, data, 'haar', level=-1)
def test_wavedecn_many_levels():
# perfect reconstruction even when level > pywt.dwt_max_level
data = np.arange(64).reshape(8, 8)
tol = 1e-12
dec_funcs = [pywt.wavedec, pywt.wavedec2, pywt.wavedecn]
rec_funcs = [pywt.waverec, pywt.waverec2, pywt.waverecn]
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
for dec_func, rec_func in zip(dec_funcs, rec_funcs):
for mode in ['periodization', 'symmetric']:
coeffs = dec_func(data, 'haar', mode=mode, level=20)
r = rec_func(coeffs, 'haar', mode=mode)
assert_allclose(data, r, atol=tol, rtol=tol)
def test_waverecn_accuracies():
# testing 3D only here
rstate = np.random.RandomState(1234)
x0 = rstate.randn(4, 4, 4)
for dt, tol in dtypes_and_tolerances:
x = x0.astype(dt)
if np.iscomplexobj(x):
x += 1j*rstate.randn(4, 4, 4).astype(x.real.dtype)
coeffs = pywt.wavedecn(x.astype(dt), 'db1')
assert_allclose(pywt.waverecn(coeffs, 'db1'), x, atol=tol, rtol=tol)
def test_multilevel_dtypes_nd():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
# wavedecn, waverecn
x = np.ones((8, 8), dtype=dt_in)
errmsg = "wrong dtype returned for {0} input".format(dt_in)
cA, coeffsD2, coeffsD1 = pywt.wavedecn(x, wavelet, level=2)
assert_(cA.dtype == dt_out, "wavedecn: " + errmsg)
for key, c in coeffsD1.items():
assert_(c.dtype == dt_out, "wavedecn: " + errmsg)
for key, c in coeffsD2.items():
assert_(c.dtype == dt_out, "wavedecn: " + errmsg)
x_roundtrip = pywt.waverecn([cA, coeffsD2, coeffsD1], wavelet)
assert_(x_roundtrip.dtype == dt_out, "waverecn: " + errmsg)
def test_wavedecn_complex():
data = np.ones((4, 4, 4)) + 1j
coeffs = pywt.wavedecn(data, 'db1')
assert_allclose(pywt.waverecn(coeffs, 'db1'), data, rtol=1e-12)
def test_waverecn_dtypes():
x = np.ones((4, 4, 4))
for dt, tol in dtypes_and_tolerances:
coeffs = pywt.wavedecn(x.astype(dt), 'db1')
assert_allclose(pywt.waverecn(coeffs, 'db1'), x, atol=tol, rtol=tol)
@pytest.mark.slow
def test_waverecn_all_wavelets_modes():
# test 2D case using all wavelets and modes
rstate = np.random.RandomState(1234)
r = rstate.randn(80, 96)
for wavelet in wavelist:
for mode in pywt.Modes.modes:
coeffs = pywt.wavedecn(r, wavelet, mode=mode)
assert_allclose(pywt.waverecn(coeffs, wavelet, mode=mode),
r, rtol=tol_single, atol=tol_single)
def test_coeffs_to_array():
# single element list returns the first element
a_coeffs = [np.arange(8).reshape(2, 4), ]
arr, arr_slices = pywt.coeffs_to_array(a_coeffs)
assert_allclose(arr, a_coeffs[0])
assert_allclose(arr, arr[arr_slices[0]])
assert_raises(ValueError, pywt.coeffs_to_array, [])
# invalid second element: array as in wavedec, but not 1D
assert_raises(ValueError, pywt.coeffs_to_array, [a_coeffs[0], ] * 2)
# invalid second element: tuple as in wavedec2, but not a 3-tuple
assert_raises(ValueError, pywt.coeffs_to_array, [a_coeffs[0],
(a_coeffs[0], )])
# coefficients as None is not supported
assert_raises(ValueError, pywt.coeffs_to_array, [None, ])
assert_raises(ValueError, pywt.coeffs_to_array, [a_coeffs,
(None, None, None)])
# invalid type for second coefficient list element
assert_raises(ValueError, pywt.coeffs_to_array, [a_coeffs, None])
# use an invalid key name in the coef dictionary
coeffs = [np.array([0]), dict(d=np.array([0]), c=np.array([0]))]
assert_raises(ValueError, pywt.coeffs_to_array, coeffs)
def test_wavedecn_coeff_reshape_even():
# verify round trip is correct:
# wavedecn - >coeffs_to_array-> array_to_coeffs -> waverecn
# This is done for wavedec{1, 2, n}
rng = np.random.RandomState(1234)
params = {'wavedec': {'d': 1, 'dec': pywt.wavedec, 'rec': pywt.waverec},
'wavedec2': {'d': 2, 'dec': pywt.wavedec2, 'rec': pywt.waverec2},
'wavedecn': {'d': 3, 'dec': pywt.wavedecn, 'rec': pywt.waverecn}}
N = 28
for f in params:
x1 = rng.randn(*([N] * params[f]['d']))
for mode in pywt.Modes.modes:
for wave in wavelist:
w = pywt.Wavelet(wave)
maxlevel = pywt.dwt_max_level(np.min(x1.shape), w.dec_len)
if maxlevel == 0:
continue
coeffs = params[f]['dec'](x1, w, mode=mode)
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs)
coeffs2 = pywt.array_to_coeffs(coeff_arr, coeff_slices,
output_format=f)
x1r = params[f]['rec'](coeffs2, w, mode=mode)
assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def test_wavedecn_coeff_reshape_axes_subset():
# verify round trip is correct when only a subset of axes are transformed:
# wavedecn - >coeffs_to_array-> array_to_coeffs -> waverecn
# This is done for wavedec{1, 2, n}
rng = np.random.RandomState(1234)
mode = 'symmetric'
w = pywt.Wavelet('db2')
N = 16
ndim = 3
for axes in [(-1, ), (0, ), (1, ), (0, 1), (1, 2), (0, 2), None]:
x1 = rng.randn(*([N] * ndim))
coeffs = pywt.wavedecn(x1, w, mode=mode, axes=axes)
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs, axes=axes)
if axes is not None:
# if axes is not None, it must be provided to coeffs_to_array
assert_raises(ValueError, pywt.coeffs_to_array, coeffs)
# mismatched axes size
assert_raises(ValueError, pywt.coeffs_to_array, coeffs,
axes=(0, 1, 2, 3))
assert_raises(ValueError, pywt.coeffs_to_array, coeffs,
axes=())
coeffs2 = pywt.array_to_coeffs(coeff_arr, coeff_slices)
x1r = pywt.waverecn(coeffs2, w, mode=mode, axes=axes)
assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def test_coeffs_to_array_padding():
rng = np.random.RandomState(1234)
x1 = rng.randn(32, 32)
mode = 'symmetric'
coeffs = pywt.wavedecn(x1, 'db2', mode=mode)
# padding=None raises a ValueError when tight packing is not possible
assert_raises(ValueError, pywt.coeffs_to_array, coeffs, padding=None)
# set padded values to nan
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs, padding=np.nan)
npad = np.sum(np.isnan(coeff_arr))
assert_(npad > 0)
# pad with zeros
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs, padding=0)
assert_(np.sum(np.isnan(coeff_arr)) == 0)
assert_(np.sum(coeff_arr == 0) == npad)
# Haar case with N as a power of 2 can be tightly packed
coeffs_haar = pywt.wavedecn(x1, 'haar', mode=mode)
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs_haar, padding=None)
# shape of coeff_arr will match in this case, but not in general
assert_equal(coeff_arr.shape, x1.shape)
def test_waverecn_coeff_reshape_odd():
# verify round trip is correct:
# wavedecn - >coeffs_to_array-> array_to_coeffs -> waverecn
rng = np.random.RandomState(1234)
x1 = rng.randn(35, 33)
for mode in pywt.Modes.modes:
for wave in ['haar', ]:
w = pywt.Wavelet(wave)
maxlevel = pywt.dwt_max_level(np.min(x1.shape), w.dec_len)
if maxlevel == 0:
continue
coeffs = pywt.wavedecn(x1, w, mode=mode)
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs)
coeffs2 = pywt.array_to_coeffs(coeff_arr, coeff_slices)
x1r = pywt.waverecn(coeffs2, w, mode=mode)
# truncate reconstructed values to original shape
x1r = x1r[tuple([slice(s) for s in x1.shape])]
assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def test_array_to_coeffs_invalid_inputs():
coeffs = pywt.wavedecn(np.ones(2), 'haar')
arr, arr_slices = pywt.coeffs_to_array(coeffs)
# empty list of array slices
assert_raises(ValueError, pywt.array_to_coeffs, arr, [])
# invalid format name
assert_raises(ValueError, pywt.array_to_coeffs, arr, arr_slices, 'foo')
def test_wavedecn_coeff_ravel():
# verify round trip is correct:
# wavedecn - >ravel_coeffs-> unravel_coeffs -> waverecn
# This is done for wavedec{1, 2, n}
rng = np.random.RandomState(1234)
params = {'wavedec': {'d': 1, 'dec': pywt.wavedec, 'rec': pywt.waverec},
'wavedec2': {'d': 2, 'dec': pywt.wavedec2, 'rec': pywt.waverec2},
'wavedecn': {'d': 3, 'dec': pywt.wavedecn, 'rec': pywt.waverecn}}
N = 12
for f in params:
x1 = rng.randn(*([N] * params[f]['d']))
for mode in pywt.Modes.modes:
for wave in wavelist:
w = pywt.Wavelet(wave)
maxlevel = pywt.dwt_max_level(np.min(x1.shape), w.dec_len)
if maxlevel == 0:
continue
coeffs = params[f]['dec'](x1, w, mode=mode)
coeff_arr, slices, shapes = pywt.ravel_coeffs(coeffs)
coeffs2 = pywt.unravel_coeffs(coeff_arr, slices, shapes,
output_format=f)
x1r = params[f]['rec'](coeffs2, w, mode=mode)
assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def test_wavedecn_coeff_ravel_zero_level():
# verify round trip is correct:
# wavedecn - >ravel_coeffs-> unravel_coeffs -> waverecn
# This is done for wavedec{1, 2, n}
rng = np.random.RandomState(1234)
params = {'wavedec': {'d': 1, 'dec': pywt.wavedec, 'rec': pywt.waverec},
'wavedec2': {'d': 2, 'dec': pywt.wavedec2, 'rec': pywt.waverec2},
'wavedecn': {'d': 3, 'dec': pywt.wavedecn, 'rec': pywt.waverecn}}
N = 16
for f in params:
x1 = rng.randn(*([N] * params[f]['d']))
for mode in pywt.Modes.modes:
w = pywt.Wavelet('db2')
coeffs = params[f]['dec'](x1, w, mode=mode, level=0)
coeff_arr, slices, shapes = pywt.ravel_coeffs(coeffs)
coeffs2 = pywt.unravel_coeffs(coeff_arr, slices, shapes,
output_format=f)
x1r = params[f]['rec'](coeffs2, w, mode=mode)
assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def test_waverecn_coeff_ravel_odd():
# verify round trip is correct:
# wavedecn - >ravel_coeffs-> unravel_coeffs -> waverecn
rng = np.random.RandomState(1234)
x1 = rng.randn(35, 33)
for mode in pywt.Modes.modes:
for wave in ['haar', ]:
w = pywt.Wavelet(wave)
maxlevel = pywt.dwt_max_level(np.min(x1.shape), w.dec_len)
if maxlevel == 0:
continue
coeffs = pywt.wavedecn(x1, w, mode=mode)
coeff_arr, slices, shapes = pywt.ravel_coeffs(coeffs)
coeffs2 = pywt.unravel_coeffs(coeff_arr, slices, shapes)
x1r = pywt.waverecn(coeffs2, w, mode=mode)
# truncate reconstructed values to original shape
x1r = x1r[tuple([slice(s) for s in x1.shape])]
assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
def test_ravel_wavedec2_with_lists():
x1 = np.ones((8, 8))
wav = pywt.Wavelet('haar')
coeffs = pywt.wavedec2(x1, wav)
# list [cHn, cVn, cDn] instead of tuple is okay
coeffs[1:] = [list(c) for c in coeffs[1:]]
coeff_arr, slices, shapes = pywt.ravel_coeffs(coeffs)
coeffs2 = pywt.unravel_coeffs(coeff_arr, slices, shapes,
output_format='wavedec2')
x1r = pywt.waverec2(coeffs2, wav)
assert_allclose(x1, x1r, rtol=1e-4, atol=1e-4)
# wrong length list will cause a ValueError
coeffs[1:] = [list(c[:-1]) for c in coeffs[1:]] # truncate diag coeffs
assert_raises(ValueError, pywt.ravel_coeffs, coeffs)
def test_ravel_invalid_input():
# wavedec ravel does not support any coefficient arrays being set to None
coeffs = pywt.wavedec(np.ones(8), 'haar')
coeffs[1] = None
assert_raises(ValueError, pywt.ravel_coeffs, coeffs)
# wavedec2 ravel cannot have None or a tuple/list of None
coeffs = pywt.wavedec2(np.ones((8, 8)), 'haar')
coeffs[1] = (None, None, None)
assert_raises(ValueError, pywt.ravel_coeffs, coeffs)
coeffs[1] = [None, None, None]
assert_raises(ValueError, pywt.ravel_coeffs, coeffs)
coeffs[1] = None
assert_raises(ValueError, pywt.ravel_coeffs, coeffs)
# wavedecn ravel cannot have any dictionary elements as None
coeffs = pywt.wavedecn(np.ones((8, 8, 8)), 'haar')
coeffs[1]['ddd'] = None
assert_raises(ValueError, pywt.ravel_coeffs, coeffs)
def test_unravel_invalid_inputs():
coeffs = pywt.wavedecn(np.ones(2), 'haar')
arr, slices, shapes = pywt.ravel_coeffs(coeffs)
# empty list for slices or shapes
assert_raises(ValueError, pywt.unravel_coeffs, arr, slices, [])
assert_raises(ValueError, pywt.unravel_coeffs, arr, [], shapes)
# unequal length for slices/shapes
assert_raises(ValueError, pywt.unravel_coeffs, arr, slices[:-1], shapes)
# invalid format name
assert_raises(ValueError, pywt.unravel_coeffs, arr, slices, shapes, 'foo')
def test_wavedecn_shapes_and_size():
wav = pywt.Wavelet('db2')
for data_shape in [(33, ), (64, 32), (1, 15, 30)]:
for axes in [None, 0, -1]:
for mode in pywt.Modes.modes:
coeffs = pywt.wavedecn(np.ones(data_shape), wav,
mode=mode, axes=axes)
# verify that the shapes match the coefficient shapes
shapes = pywt.wavedecn_shapes(data_shape, wav,
mode=mode, axes=axes)
assert_equal(coeffs[0].shape, shapes[0])
expected_size = coeffs[0].size
for level in range(1, len(coeffs)):
for k, v in coeffs[level].items():
expected_size += v.size
assert_equal(shapes[level][k], v.shape)
# size can be determined from either the shapes or coeffs
size = pywt.wavedecn_size(shapes)
assert_equal(size, expected_size)
size = pywt.wavedecn_size(coeffs)
assert_equal(size, expected_size)
def test_dwtn_max_level():
# predicted and empirical dwtn_max_level match
for wav in [pywt.Wavelet('db2'), 'sym8']:
for data_shape in [(33, ), (64, 32), (1, 15, 30)]:
for axes in [None, 0, -1]:
for mode in pywt.Modes.modes:
coeffs = pywt.wavedecn(np.ones(data_shape), wav,
mode=mode, axes=axes)
max_lev = pywt.dwtn_max_level(data_shape, wav, axes)
assert_equal(len(coeffs[1:]), max_lev)
def test_waverec_axes_subsets():
rstate = np.random.RandomState(0)
data = rstate.standard_normal((8, 8, 8))
# test all combinations of 1 out of 3 axes transformed
for axis in [0, 1, 2]:
coefs = pywt.wavedec(data, 'haar', axis=axis)
rec = pywt.waverec(coefs, 'haar', axis=axis)
assert_allclose(rec, data, atol=1e-14)
def test_waverec_axis_db2():
# test for fix to issue gh-293
rstate = np.random.RandomState(0)
data = rstate.standard_normal((16, 16))
for axis in [0, 1]:
coefs = pywt.wavedec(data, 'db2', axis=axis)
rec = pywt.waverec(coefs, 'db2', axis=axis)
assert_allclose(rec, data, atol=1e-14)
def test_waverec2_axes_subsets():
rstate = np.random.RandomState(0)
data = rstate.standard_normal((8, 8, 8))
# test all combinations of 2 out of 3 axes transformed
for axes in combinations((0, 1, 2), 2):
coefs = pywt.wavedec2(data, 'haar', axes=axes)
rec = pywt.waverec2(coefs, 'haar', axes=axes)
assert_allclose(rec, data, atol=1e-14)
def test_waverecn_axes_subsets():
rstate = np.random.RandomState(0)
data = rstate.standard_normal((8, 8, 8, 8))
# test all combinations of 3 out of 4 axes transformed
for axes in combinations((0, 1, 2, 3), 3):
coefs = pywt.wavedecn(data, 'haar', axes=axes)
rec = pywt.waverecn(coefs, 'haar', axes=axes)
assert_allclose(rec, data, atol=1e-14)
def test_waverecn_int_axis():
# waverecn should also work for axes as an integer
rstate = np.random.RandomState(0)
data = rstate.standard_normal((8, 8))
for axis in [0, 1]:
coefs = pywt.wavedecn(data, 'haar', axes=axis)
rec = pywt.waverecn(coefs, 'haar', axes=axis)
assert_allclose(rec, data, atol=1e-14)
def test_wavedec_axis_error():
data = np.ones(4)
# out of range axis not allowed
assert_raises(ValueError, pywt.wavedec, data, 'haar', axis=1)
def test_waverec_axis_error():
c = pywt.wavedec(np.ones(4), 'haar')
# out of range axis not allowed
assert_raises(ValueError, pywt.waverec, c, 'haar', axis=1)
def test_waverec_shape_mismatch_error():
c = pywt.wavedec(np.ones(16), 'haar')
# truncate a detail coefficient to an incorrect shape
c[3] = c[3][:-1]
assert_raises(ValueError, pywt.waverec, c, 'haar', axis=1)
def test_wavedec2_axes_errors():
data = np.ones((4, 4))
# integer axes not allowed
assert_raises(TypeError, pywt.wavedec2, data, 'haar', axes=1)
# non-unique axes not allowed
assert_raises(ValueError, pywt.wavedec2, data, 'haar', axes=(0, 0))
# out of range axis not allowed
assert_raises(ValueError, pywt.wavedec2, data, 'haar', axes=(0, 2))
def test_waverec2_axes_errors():
data = np.ones((4, 4))
c = pywt.wavedec2(data, 'haar')
# integer axes not allowed
assert_raises(TypeError, pywt.waverec2, c, 'haar', axes=1)
# non-unique axes not allowed
assert_raises(ValueError, pywt.waverec2, c, 'haar', axes=(0, 0))
# out of range axis not allowed
assert_raises(ValueError, pywt.waverec2, c, 'haar', axes=(0, 2))
def test_wavedecn_axes_errors():
data = np.ones((8, 8, 8))
# repeated axes not allowed
assert_raises(ValueError, pywt.wavedecn, data, 'haar', axes=(1, 1))
# out of range axis not allowed
assert_raises(ValueError, pywt.wavedecn, data, 'haar', axes=(0, 1, 3))
def test_waverecn_axes_errors():
data = np.ones((8, 8, 8))
c = pywt.wavedecn(data, 'haar')
# repeated axes not allowed
assert_raises(ValueError, pywt.waverecn, c, 'haar', axes=(1, 1))
# out of range axis not allowed
assert_raises(ValueError, pywt.waverecn, c, 'haar', axes=(0, 1, 3))
def test_per_axis_wavelets_and_modes():
# tests seperate wavelet and edge mode for each axis.
rstate = np.random.RandomState(1234)
data = rstate.randn(24, 24, 16)
# wavelet can be a string or wavelet object
wavelets = (pywt.Wavelet('haar'), 'sym2', 'db2')
# The default number of levels should be the minimum over this list
max_levels = [pywt._dwt.dwt_max_level(nd, nf) for nd, nf in
zip(data.shape, wavelets)]
# mode can be a string or a Modes enum
modes = ('symmetric', 'periodization',
pywt._extensions._pywt.Modes.reflect)
coefs = pywt.wavedecn(data, wavelets, modes)
assert_allclose(pywt.waverecn(coefs, wavelets, modes), data, atol=1e-14)
assert_equal(min(max_levels), len(coefs[1:]))
coefs = pywt.wavedecn(data, wavelets[:1], modes)
assert_allclose(pywt.waverecn(coefs, wavelets[:1], modes), data,
atol=1e-14)
coefs = pywt.wavedecn(data, wavelets, modes[:1])
assert_allclose(pywt.waverecn(coefs, wavelets, modes[:1]), data,
atol=1e-14)
# length of wavelets or modes doesn't match the length of axes
assert_raises(ValueError, pywt.wavedecn, data, wavelets[:2])
assert_raises(ValueError, pywt.wavedecn, data, wavelets, mode=modes[:2])
assert_raises(ValueError, pywt.waverecn, coefs, wavelets[:2])
assert_raises(ValueError, pywt.waverecn, coefs, wavelets, mode=modes[:2])
# dwt2/idwt2 also support per-axis wavelets/modes
data2 = data[..., 0]
coefs2 = pywt.wavedec2(data2, wavelets[:2], modes[:2])
assert_allclose(pywt.waverec2(coefs2, wavelets[:2], modes[:2]), data2,
atol=1e-14)
assert_equal(min(max_levels[:2]), len(coefs2[1:]))
# Tests for fully separable multi-level transforms
def test_fswavedecn_fswaverecn_roundtrip():
# verify proper round trip result for 1D through 4D data
# same DWT as wavedecn/waverecn so don't need to test all modes/wavelets
rstate = np.random.RandomState(0)
for ndim in range(1, 5):
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
for levels in (1, None):
data = rstate.standard_normal((8, )*ndim)
data = data.astype(dt_in)
T = pywt.fswavedecn(data, 'haar', levels=levels)
rec = pywt.fswaverecn(T)
if data.real.dtype in [np.float32, np.float16]:
assert_allclose(rec, data, rtol=1e-6, atol=1e-6)
else:
assert_allclose(rec, data, rtol=1e-14, atol=1e-14)
assert_(T.coeffs.dtype == dt_out)
assert_(rec.dtype == dt_out)
def test_fswavedecn_fswaverecn_zero_levels():
# zero level transform gives coefs matching the original data
rstate = np.random.RandomState(0)
ndim = 2
data = rstate.standard_normal((8, )*ndim)
T = pywt.fswavedecn(data, 'haar', levels=0)
assert_array_equal(T.coeffs, data)
rec = pywt.fswaverecn(T)
assert_array_equal(T.coeffs, rec)
def test_fswavedecn_fswaverecn_variable_levels():
# test with differing number of transform levels per axis
rstate = np.random.RandomState(0)
ndim = 3
data = rstate.standard_normal((16, )*ndim)
T = pywt.fswavedecn(data, 'haar', levels=(1, 2, 3))
rec = pywt.fswaverecn(T)
assert_allclose(rec, data, atol=1e-14)
# levels doesn't match number of axes
assert_raises(ValueError, pywt.fswavedecn, data, 'haar', levels=(1, 1))
assert_raises(ValueError, pywt.fswavedecn, data, 'haar', levels=(1, 1, 1, 1))
# levels too large for array size
assert_warns(UserWarning, pywt.fswavedecn, data, 'haar',
levels=int(np.log2(np.min(data.shape)))+1)
def test_fswavedecn_fswaverecn_variable_wavelets_and_modes():
# test with differing number of transform levels per axis
rstate = np.random.RandomState(0)
ndim = 3
data = rstate.standard_normal((16, )*ndim)
wavelets = ('haar', 'db2', 'sym3')
modes = ('periodic', 'symmetric', 'periodization')
T = pywt.fswavedecn(data, wavelet=wavelets, mode=modes)
for ax in range(ndim):
# expect approx + dwt_max_level detail coeffs along each axis
assert_equal(len(T.coeff_slices[ax]),
pywt.dwt_max_level(data.shape[ax], wavelets[ax])+1)
rec = pywt.fswaverecn(T)
assert_allclose(rec, data, atol=1e-14)
# number of wavelets doesn't match number of axes
assert_raises(ValueError, pywt.fswavedecn, data, wavelets[:2])
# number of modes doesn't match number of axes
assert_raises(ValueError, pywt.fswavedecn, data, wavelets[0], mode=modes[:2])
def test_fswavedecn_fswaverecn_axes_subsets():
"""Fully separable DWT over only a subset of axes"""
rstate = np.random.RandomState(0)
# use anisotropic data to result in unique number of levels per axis
data = rstate.standard_normal((4, 8, 16, 32))
# test all combinations of 3 out of 4 axes transformed
for axes in combinations((0, 1, 2, 3), 3):
T = pywt.fswavedecn(data, 'haar', axes=axes)
rec = pywt.fswaverecn(T)
assert_allclose(rec, data, atol=1e-14)
# some axes exceed data dimensions
assert_raises(ValueError, pywt.fswavedecn, data, 'haar', axes=(1, 5))
def test_fswavedecnresult():
data = np.ones((32, 32))
levels = (1, 2)
result = pywt.fswavedecn(data, 'sym2', levels=levels)
# can access the lowpass band via .approx or via __getitem__
approx_key = (0, ) * data.ndim
assert_array_equal(result[approx_key], result.approx)
dkeys = result.detail_keys()
# the approximation key shouldn't be present in the detail_keys
assert_(approx_key not in dkeys)
# can access all detail coefficients and they have matching ndim
for k in dkeys:
d = result[k]
assert_equal(d.ndim, data.ndim)
# can assign modified coefficients
result[k] = np.zeros_like(d)
# assigning a differently sized array raises a ValueError
assert_raises(ValueError, result.__setitem__,
k, np.zeros(tuple([s + 1 for s in d.shape])))
# warns on assigning with a non-matching dtype
assert_warns(UserWarning, result.__setitem__,
k, np.zeros_like(d).astype(np.float32))
# all coefficients are stacked into result.coeffs (same ndim)
assert_equal(result.coeffs.ndim, data.ndim)
def test_error_on_continuous_wavelet():
# A ValueError is raised if a Continuous wavelet is selected
data = np.ones((16, 16))
for dec_fun, rec_fun in zip([pywt.wavedec, pywt.wavedec2, pywt.wavedecn],
[pywt.waverec, pywt.waverec2, pywt.waverecn]):
for cwave in ['morl', pywt.DiscreteContinuousWavelet('morl')]:
assert_raises(ValueError, dec_fun, data, wavelet=cwave)
c = dec_fun(data, 'db1')
assert_raises(ValueError, rec_fun, c, wavelet=cwave)
def test_default_level():
# default level is the maximum permissible for the transformed axes
data = np.ones((128, 32, 4))
wavelet = ('db8', 'db1')
for dec_func in [pywt.wavedec2, pywt.wavedecn]:
for axes in [(0, 1), (2, 1), (0, 2)]:
c = dec_func(data, wavelet, axes=axes)
max_lev = np.min([pywt.dwt_max_level(data.shape[ax], wav)
for ax, wav in zip(axes, wavelet)])
assert_equal(len(c[1:]), max_lev)
for ax in [0, 1]:
c = pywt.wavedecn(data, wavelet[ax], axes=(ax, ))
assert_equal(len(c[1:]),
pywt.dwt_max_level(data.shape[ax], wavelet[ax]))
def test_waverec_mixed_precision():
rstate = np.random.RandomState(0)
for func, ifunc, shape in [(pywt.wavedec, pywt.waverec, (8, )),
(pywt.wavedec2, pywt.waverec2, (8, 8)),
(pywt.wavedecn, pywt.waverecn, (8, 8, 8))]:
x = rstate.randn(*shape)
coeffs_real = func(x, 'db1')
# real: single precision approx, double precision details
coeffs_real[0] = coeffs_real[0].astype(np.float32)
r = ifunc(coeffs_real, 'db1')
assert_allclose(r, x, rtol=1e-7, atol=1e-7)
assert_equal(r.dtype, np.float64)
x = x + 1j*x
coeffs = func(x, 'db1')
# complex: single precision approx, double precision details
coeffs[0] = coeffs[0].astype(np.complex64)
r = ifunc(coeffs, 'db1')
assert_allclose(r, x, rtol=1e-7, atol=1e-7)
assert_equal(r.dtype, np.complex128)
# complex: double precision approx, single precision details
if x.ndim == 1:
coeffs[0] = coeffs[0].astype(np.complex128)
coeffs[1] = coeffs[1].astype(np.complex64)
if x.ndim == 2:
coeffs[0] = coeffs[0].astype(np.complex128)
coeffs[1] = tuple([v.astype(np.complex64) for v in coeffs[1]])
if x.ndim == 3:
coeffs[0] = coeffs[0].astype(np.complex128)
coeffs[1] = {k: v.astype(np.complex64)
for k, v in coeffs[1].items()}
r = ifunc(coeffs, 'db1')
assert_allclose(r, x, rtol=1e-7, atol=1e-7)
assert_equal(r.dtype, np.complex128)
| mit |
jonesgithub/zulip | analytics/management/commands/analyze_mit.py | 115 | 3527 | from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import Recipient, Message
from zerver.lib.timestamp import timestamp_to_datetime
import datetime
import time
import logging
def compute_stats(log_level):
logger = logging.getLogger()
logger.setLevel(log_level)
one_week_ago = timestamp_to_datetime(time.time()) - datetime.timedelta(weeks=1)
mit_query = Message.objects.filter(sender__realm__domain="mit.edu",
recipient__type=Recipient.STREAM,
pub_date__gt=one_week_ago)
for bot_sender_start in ["imap.", "rcmd.", "sys."]:
mit_query = mit_query.exclude(sender__email__startswith=(bot_sender_start))
# Filtering for "/" covers tabbott/extra@ and all the daemon/foo bots.
mit_query = mit_query.exclude(sender__email__contains=("/"))
mit_query = mit_query.exclude(sender__email__contains=("aim.com"))
mit_query = mit_query.exclude(
sender__email__in=["rss@mit.edu", "bash@mit.edu", "apache@mit.edu",
"bitcoin@mit.edu", "lp@mit.edu", "clocks@mit.edu",
"root@mit.edu", "nagios@mit.edu",
"www-data|local-realm@mit.edu"])
user_counts = {}
for m in mit_query.select_related("sending_client", "sender"):
email = m.sender.email
user_counts.setdefault(email, {})
user_counts[email].setdefault(m.sending_client.name, 0)
user_counts[email][m.sending_client.name] += 1
total_counts = {}
total_user_counts = {}
for email, counts in user_counts.items():
total_user_counts.setdefault(email, 0)
for client_name, count in counts.items():
total_counts.setdefault(client_name, 0)
total_counts[client_name] += count
total_user_counts[email] += count
logging.debug("%40s | %10s | %s" % ("User", "Messages", "Percentage Zulip"))
top_percents = {}
for size in [10, 25, 50, 100, 200, len(total_user_counts.keys())]:
top_percents[size] = 0
for i, email in enumerate(sorted(total_user_counts.keys(),
key=lambda x: -total_user_counts[x])):
percent_zulip = round(100 - (user_counts[email].get("zephyr_mirror", 0)) * 100. /
total_user_counts[email], 1)
for size in top_percents.keys():
top_percents.setdefault(size, 0)
if i < size:
top_percents[size] += (percent_zulip * 1.0 / size)
logging.debug("%40s | %10s | %s%%" % (email, total_user_counts[email],
percent_zulip))
logging.info("")
for size in sorted(top_percents.keys()):
logging.info("Top %6s | %s%%" % (size, round(top_percents[size], 1)))
grand_total = sum(total_counts.values())
print grand_total
logging.info("%15s | %s" % ("Client", "Percentage"))
for client in total_counts.keys():
logging.info("%15s | %s%%" % (client, round(100. * total_counts[client] / grand_total, 1)))
class Command(BaseCommand):
option_list = BaseCommand.option_list + \
(make_option('--verbose', default=False, action='store_true'),)
help = "Compute statistics on MIT Zephyr usage."
def handle(self, *args, **options):
level = logging.INFO
if options["verbose"]:
level = logging.DEBUG
compute_stats(level)
| apache-2.0 |
pdellaert/ansible | lib/ansible/modules/network/junos/junos_banner.py | 52 | 5256 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_banner
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage multiline banners on Juniper JUNOS devices
description:
- This will configure both login and motd banners on network devices.
It allows playbooks to add or remote
banner text from the active running configuration.
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device. Value C(login) indicates
system login message prior to authenticating, C(motd) is login
announcement after successful authentication.
required: true
choices: ['login', 'motd']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
type: bool
default: 'yes'
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: configure the login banner
junos_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
junos_banner:
banner: motd
state: absent
- name: deactivate the motd banner
junos_banner:
banner: motd
state: present
active: False
- name: activate the motd banner
junos_banner:
banner: motd
state: present
active: True
- name: Configure banner from file
junos_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: str
sample: >
[edit system login]
+ message \"this is my login banner\";
"""
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.junos.junos import junos_argument_spec, tostring
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config
USE_PERSISTENT_CONNECTION = True
def validate_param_values(module, obj):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(module.params.get(key), module)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent']),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'system/login'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('text', {'xpath': 'message' if module.params['banner'] == 'login' else 'announcement', 'leaf_only': True})
])
validate_param_values(module, param_to_xpath_map)
want = map_params_to_obj(module, param_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
with locked_config(module):
diff = load_config(module, tostring(ele), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
yazgoo/meshviewer | generate-rotation-images.py | 2 | 2872 | # Execution :
# $ /Applications/blender/blender.app/Contents/MacOS/blender -b -P ~/test_blender.py -- Scene_2012_01_11_15_10_03
# avec : Scene_2012_01_11_15_10_03 : le nom du fichier obj
#
# Réalise :
import bpy
import math
import mathutils
import sys
import os
class locationxyz(list):
pass
# main variables
rayon = 80
#meshname = "Scene_2012_01_11_15_10_03"
meshname = str(sys.argv[5])
dir = "render"
angle = math.radians(5)
etapes = int(math.radians(360) / angle)
# camera position variables
x = 0
y = 0
z = -100
targetLoc = locationxyz()
targetLoc.x = 0
targetLoc.y = 0
targetLoc.z = 10
# rendered image resolution
rnd = bpy.data.scenes[0].render
rnd.resolution_x = 1280
rnd.resolution_y = 756
rnd.resolution_percentage = 50
def pointCameraToTarget(cam, targetLoc):
# targetLoc is (x, y, z) of what we want to point at
# camera angles appear to be set up so that
# cam.rotation_euler = Euler((0,0,0), 'XYZ') points downward,
# i.e., along the -z axis direction.
# In the xy plane (i.e., rotate around z-axis):
dx = targetLoc.x - cam.location.x
dy = targetLoc.y - cam.location.y
dz = targetLoc.z - cam.location.z
print("dx, dy, dz:", dx, dy, dz)
# Signs are chosen carefully due to geometry. If we rotate
# by this much from the -z orientation around the x-axis, we
# will be pointing along the y-axis (for angle < pi rad)
xRad = (3.14159/2.) + math.atan2(dz, math.sqrt(dy**2 + dx**2))
print("xRad: %f, %f deg" % (xRad, xRad*180./math.pi))
zRad = math.atan2(dy, dx) - (3.14159256 / 2.)
print("zRad: %f, %f deg" % (zRad, zRad*180./math.pi))
cam.rotation_euler = mathutils.Euler((xRad, 0, zRad), 'XYZ')
# Suppression du cube présent par défaut
bpy.ops.object.select_by_type(type='MESH')
bpy.ops.object.delete()
currentdir = os.getcwd()
# Chargement d'un fichier OBJ
bpy.ops.import_scene.obj(filepath= meshname + '.obj',axis_forward='X', axis_up='Z')
if not os.path.exists(currentdir + "/" + dir):
os.makedirs(currentdir + "/" + dir)
#os.chdir(dir)
#print(currentdir)
#sys.exit()
lamp = bpy.data.objects['Lamp'] # bpy.types.Camera
lamp.location = (0, -20, 10)
lamp.scale = (1, 1, 1)
bpy.data.lamps[0].energy = 100
# Cam rotation part
cam = bpy.data.objects['Camera'] # bpy.types.Camera
i = 1
cam.location = (x, y, z)
pointCameraToTarget(cam, targetLoc)
lamp.location = (x, y, z)
print("Rendering image " + str(i + 1) + "...")
bpy.ops.render.render()
if i < 9 :
filename = dir + "/image-0" + str(i + 1) + ".jpg"
else :
filename = dir + "/image-" + str(i + 1) + ".jpg"
bpy.data.images[0].save_render(filename)
if i == 0 :
bpy.data.images[0].save_render(dir + "/image-72.jpg")
x2 = x * math.cos(angle) + y * math.sin(angle)
y2 = y * math.cos(angle) - x * math.sin(angle)
x = x2
y = y2
#bpy.ops.wm.collada_export(filepath = meshname + '.dae')
| gpl-3.0 |
weleen/mxnet | example/notebooks/basic/data_iter.py | 1 | 1868 | import numpy as np
import mxnet as mx
class SimpleBatch(object):
def __init__(self, data, label, pad=None):
self.data = data
self.label = label
self.pad = pad
class SimpleIter:
def __init__(self, mu, sigma, batch_size, num_batches):
self.mu = mu
self.sigma = sigma
self.batch_size = batch_size
self.num_batches = num_batches
self.data_shape = (batch_size, mu.shape[1])
self.label_shape = (batch_size, )
self.cur_batch = 0
def __iter__(self):
return self
def reset(self):
self.cur_batch = 0
def __next__(self):
return self.next()
@property
def provide_data(self):
return [('data', self.data_shape)]
@property
def provide_label(self):
return [('softmax_label', self.label_shape)]
def next(self):
if self.cur_batch < self.num_batches:
self.cur_batch += 1
num_classes = self.mu.shape[0]
label = np.random.randint(0, num_classes, self.label_shape)
data = np.zeros(self.data_shape)
for i in range(num_classes):
data[label==i,:] = np.random.normal(
self.mu[i,:], self.sigma[i,:], (sum(label==i), self.data_shape[1]))
return SimpleBatch(data=[mx.nd.array(data)], label=[mx.nd.array(label)], pad=0)
else:
raise StopIteration
class SyntheticData:
"""Genrate synthetic data
"""
def __init__(self, num_classes, num_features):
self.num_classes = num_classes
self.num_features = num_features
self.mu = np.random.rand(num_classes, num_features)
self.sigma = np.ones((num_classes, num_features)) * 0.1
def get_iter(self, batch_size, num_batches=10):
return SimpleIter(self.mu, self.sigma, batch_size, num_batches) | apache-2.0 |
tillahoffmann/tensorflow | tensorflow/python/tools/freeze_graph.py | 5 | 12154 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts checkpoint variables into Const ops in a standalone GraphDef file.
This script is designed to take a GraphDef proto, a SaverDef proto, and a set of
variable values stored in a checkpoint file, and output a GraphDef with all of
the variable ops converted into const ops containing the values of the
variables.
It's useful to do this when we need to load a single file in C++, especially in
environments like mobile or embedded where we may not have access to the
RestoreTensor ops and file loading calls that they rely on.
An example of command-line usage is:
bazel build tensorflow/python/tools:freeze_graph && \
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=some_graph_def.pb \
--input_checkpoint=model.ckpt-8361242 \
--output_graph=/tmp/frozen_graph.pb --output_node_names=softmax
You can also look at freeze_graph_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import saved_model_utils
from tensorflow.python.training import saver as saver_lib
FLAGS = None
def freeze_graph_with_def_protos(input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_blacklist="",
input_meta_graph_def=None,
input_saved_model_dir=None,
saved_model_tags=None):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if (not input_saved_model_dir and
not saver_lib.checkpoint_exists(input_checkpoint)):
print("Input checkpoint '" + input_checkpoint + "' doesn't exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
if input_meta_graph_def:
for node in input_meta_graph_def.graph_def.node:
node.device = ""
elif input_graph_def:
for node in input_graph_def.node:
node.device = ""
if input_graph_def:
_ = importer.import_graph_def(input_graph_def, name="")
with session.Session() as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
elif input_meta_graph_def:
restorer = saver_lib.import_meta_graph(
input_meta_graph_def, clear_devices=True)
restorer.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes.split(","))
elif input_saved_model_dir:
if saved_model_tags is None:
saved_model_tags = []
loader.load(sess, saved_model_tags, input_saved_model_dir)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ":0")
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes.split(","))
variable_names_blacklist = (variable_names_blacklist.split(",")
if variable_names_blacklist else None)
if input_meta_graph_def:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_meta_graph_def.graph_def,
output_node_names.split(","),
variable_names_blacklist=variable_names_blacklist)
else:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(","),
variable_names_blacklist=variable_names_blacklist)
# Write GraphDef to file if output path has been given.
if output_graph:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
return output_graph_def
def _parse_input_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into GraphDef proto."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
return input_graph_def
def _parse_input_meta_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into MetaGraphDef proto."""
if not gfile.Exists(input_graph):
print("Input meta graph file '" + input_graph + "' does not exist!")
return -1
input_meta_graph_def = MetaGraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_meta_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_meta_graph_def)
print("Loaded meta graph file '" + input_graph)
return input_meta_graph_def
def _parse_input_saver_proto(input_saver, input_binary):
"""Parser input tensorflow Saver into SaverDef proto."""
if not gfile.Exists(input_saver):
print("Input saver file '" + input_saver + "' does not exist!")
return -1
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_saver, mode) as f:
saver_def = saver_pb2.SaverDef()
if input_binary:
saver_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), saver_def)
return saver_def
def freeze_graph(input_graph,
input_saver,
input_binary,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_blacklist="",
input_meta_graph=None,
input_saved_model_dir=None,
saved_model_tags=tag_constants.SERVING):
"""Converts all variables in a graph and checkpoint into constants."""
input_graph_def = None
if input_saved_model_dir:
input_graph_def = saved_model_utils.get_meta_graph_def(
input_saved_model_dir, saved_model_tags).graph_def
elif input_graph:
input_graph_def = _parse_input_graph_proto(input_graph, input_binary)
input_meta_graph_def = None
if input_meta_graph:
input_meta_graph_def = _parse_input_meta_graph_proto(
input_meta_graph, input_binary)
input_saver_def = None
if input_saver:
input_saver_def = _parse_input_saver_proto(input_saver, input_binary)
freeze_graph_with_def_protos(
input_graph_def, input_saver_def, input_checkpoint, output_node_names,
restore_op_name, filename_tensor_name, output_graph, clear_devices,
initializer_nodes, variable_names_blacklist, input_meta_graph_def,
input_saved_model_dir, saved_model_tags.split(","))
def main(unused_args):
freeze_graph(FLAGS.input_graph, FLAGS.input_saver, FLAGS.input_binary,
FLAGS.input_checkpoint, FLAGS.output_node_names,
FLAGS.restore_op_name, FLAGS.filename_tensor_name,
FLAGS.output_graph, FLAGS.clear_devices, FLAGS.initializer_nodes,
FLAGS.variable_names_blacklist, FLAGS.input_meta_graph,
FLAGS.input_saved_model_dir, FLAGS.saved_model_tags)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--input_graph",
type=str,
default="",
help="TensorFlow \'GraphDef\' file to load.")
parser.add_argument(
"--input_saver",
type=str,
default="",
help="TensorFlow saver file to load.")
parser.add_argument(
"--input_checkpoint",
type=str,
default="",
help="TensorFlow variables file to load.")
parser.add_argument(
"--output_graph",
type=str,
default="",
help="Output \'GraphDef\' file name.")
parser.add_argument(
"--input_binary",
nargs="?",
const=True,
type="bool",
default=False,
help="Whether the input files are in binary format.")
parser.add_argument(
"--output_node_names",
type=str,
default="",
help="The name of the output nodes, comma separated.")
parser.add_argument(
"--restore_op_name",
type=str,
default="save/restore_all",
help="The name of the master restore operator.")
parser.add_argument(
"--filename_tensor_name",
type=str,
default="save/Const:0",
help="The name of the tensor holding the save path.")
parser.add_argument(
"--clear_devices",
nargs="?",
const=True,
type="bool",
default=True,
help="Whether to remove device specifications.")
parser.add_argument(
"--initializer_nodes",
type=str,
default="",
help="comma separated list of initializer nodes to run before freezing.")
parser.add_argument(
"--variable_names_blacklist",
type=str,
default="",
help="""\
comma separated list of variables to skip converting to constants\
""")
parser.add_argument(
"--input_meta_graph",
type=str,
default="",
help="TensorFlow \'MetaGraphDef\' file to load.")
parser.add_argument(
"--input_saved_model_dir",
type=str,
default="",
help="Path to the dir with TensorFlow \'SavedModel\' file and variables.")
parser.add_argument(
"--saved_model_tags",
type=str,
default="serve",
help="""\
Group of tag(s) of the MetaGraphDef to load, in string format,\
separated by \',\'. For tag-set contains multiple tags, all tags \
must be passed in.\
""")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ACM-CSUSB/Website | migrations/versions/1b44691709c_.py | 1 | 2657 | """empty message
Revision ID: 1b44691709c
Revises: None
Create Date: 2015-09-27 15:14:36.967547
"""
# revision identifiers, used by Alembic.
revision = '1b44691709c'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('members',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sid', sa.Integer(), nullable=False),
sa.Column('group_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=127), nullable=True),
sa.Column('password', sa.String(length=127), nullable=True),
sa.Column('email', sa.String(length=254), nullable=True),
sa.Column('phone', sa.String(length=17), nullable=True),
sa.Column('text', sa.Boolean(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('majors', postgresql.ARRAY(sa.String()), nullable=True),
sa.Column('minors', postgresql.ARRAY(sa.String()), nullable=True),
sa.Column('graduation', sa.TIMESTAMP(), nullable=True),
sa.Column('date', sa.Date(), nullable=True),
sa.Column('gender', sa.String(), nullable=True),
sa.Column('shirt', sa.String(), nullable=True),
sa.Column('shirt_received', sa.Boolean(), nullable=True),
sa.Column('level', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_members_sid'), 'members', ['sid'], unique=True)
op.create_table('content',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('member_id', sa.Integer(), nullable=False),
sa.Column('date', sa.TIMESTAMP(), nullable=True),
sa.Column('path', sa.String(), nullable=True),
sa.Column('title', sa.String(length=127), nullable=True),
sa.Column('minors', postgresql.ARRAY(sa.String()), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('data', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['member_id'], ['members.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('content')
op.drop_index(op.f('ix_members_sid'), table_name='members')
op.drop_table('members')
op.drop_table('groups')
### end Alembic commands ###
| bsd-2-clause |
tensorflow/tensorflow | tensorflow/lite/tools/signature/signature_def_utils.py | 14 | 3390 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions related to SignatureDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.lite.tools.signature import _pywrap_signature_def_util_wrapper as signature_def_util
def set_signature_defs(tflite_model, signature_def_map):
"""Sets SignatureDefs to the Metadata of a TfLite flatbuffer buffer.
Args:
tflite_model: Binary TFLite model (bytes or bytes-like object) to which to
add signature_def.
signature_def_map: dict containing SignatureDefs to store in metadata.
Returns:
buffer: A TFLite model binary identical to model buffer with
metadata field containing SignatureDef.
Raises:
ValueError:
tflite_model buffer does not contain a valid TFLite model.
signature_def_map is empty or does not contain a SignatureDef.
"""
model = tflite_model
if not isinstance(tflite_model, bytearray):
model = bytearray(tflite_model)
serialized_signature_def_map = {
k: v.SerializeToString() for k, v in signature_def_map.items()}
model_buffer = signature_def_util.SetSignatureDefMap(
model, serialized_signature_def_map)
return model_buffer
def get_signature_defs(tflite_model):
"""Get SignatureDef dict from the Metadata of a TfLite flatbuffer buffer.
Args:
tflite_model: TFLite model buffer to get the signature_def.
Returns:
dict containing serving names to SignatureDefs if exists, otherwise, empty
dict.
Raises:
ValueError:
tflite_model buffer does not contain a valid TFLite model.
DecodeError:
SignatureDef cannot be parsed from TfLite SignatureDef metadata.
"""
model = tflite_model
if not isinstance(tflite_model, bytearray):
model = bytearray(tflite_model)
serialized_signature_def_map = signature_def_util.GetSignatureDefMap(model)
def _deserialize(serialized):
signature_def = meta_graph_pb2.SignatureDef()
signature_def.ParseFromString(serialized)
return signature_def
return {k: _deserialize(v) for k, v in serialized_signature_def_map.items()}
def clear_signature_defs(tflite_model):
"""Clears SignatureDefs from the Metadata of a TfLite flatbuffer buffer.
Args:
tflite_model: TFLite model buffer to remove signature_defs.
Returns:
buffer: A TFLite model binary identical to model buffer with
no SignatureDef metadata.
Raises:
ValueError:
tflite_model buffer does not contain a valid TFLite model.
"""
model = tflite_model
if not isinstance(tflite_model, bytearray):
model = bytearray(tflite_model)
return signature_def_util.ClearSignatureDefs(model)
| apache-2.0 |
pquentin/django | django/utils/html_parser.py | 82 | 5221 | import re
import sys
from django.utils.six.moves import html_parser as _html_parser
current_version = sys.version_info
use_workaround = (
(current_version < (2, 7, 3)) or
(current_version >= (3, 0) and current_version < (3, 2, 3))
)
try:
HTMLParseError = _html_parser.HTMLParseError
except AttributeError:
# create a dummy class for Python 3.5+ where it's been removed
class HTMLParseError(Exception):
pass
if not use_workaround:
if current_version >= (3, 4):
class HTMLParser(_html_parser.HTMLParser):
"""Explicitly set convert_charrefs to be False.
This silences a deprecation warning on Python 3.4, but we can't do
it at call time because Python 2.7 does not have the keyword
argument.
"""
def __init__(self, convert_charrefs=False, **kwargs):
_html_parser.HTMLParser.__init__(self, convert_charrefs=convert_charrefs, **kwargs)
else:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
| bsd-3-clause |
frdb194/django | tests/forms_tests/widget_tests/test_select.py | 145 | 8962 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
from django.forms import Select
from django.utils.safestring import mark_safe
from .base import WidgetTest
class SelectTest(WidgetTest):
widget = Select()
nested_widget = Select(choices=(
('outer1', 'Outer 1'),
('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))),
))
def test_render(self):
self.check_html(self.widget, 'beatle', 'J', choices=self.beatles, html=(
"""<select name="beatle">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
))
def test_render_none(self):
"""
If the value is None, none of the options are selected.
"""
self.check_html(self.widget, 'beatle', None, choices=self.beatles, html=(
"""<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
))
def test_render_label_value(self):
"""
If the value corresponds to a label (but not to an option value), none
of the options are selected.
"""
self.check_html(self.widget, 'beatle', 'John', choices=self.beatles, html=(
"""<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
))
def test_render_selected(self):
"""
Only one option can be selected (#8103).
"""
choices = [('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('0', 'extra')]
self.check_html(self.widget, 'choices', '0', choices=choices, html=(
"""<select name="choices">
<option value="0" selected="selected">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0">extra</option>
</select>"""
))
def test_constructor_attrs(self):
"""
Select options shouldn't inherit the parent widget attrs.
"""
widget = Select(
attrs={'class': 'super', 'id': 'super'},
choices=[(1, 1), (2, 2), (3, 3)],
)
self.check_html(widget, 'num', 2, html=(
"""<select name="num" class="super" id="super">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>"""
))
def test_compare_to_str(self):
"""
The value is compared to its str().
"""
self.check_html(
self.widget, 'num', 2,
choices=[('1', '1'), ('2', '2'), ('3', '3')],
html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>"""
),
)
self.check_html(
self.widget, 'num', '2',
choices=[(1, 1), (2, 2), (3, 3)],
html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>"""
),
)
self.check_html(
self.widget, 'num', 2,
choices=[(1, 1), (2, 2), (3, 3)],
html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>"""
),
)
def test_choices_constuctor(self):
widget = Select(choices=[(1, 1), (2, 2), (3, 3)])
self.check_html(widget, 'num', 2, html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>"""
))
def test_choices_constructor_generator(self):
"""
If choices is passed to the constructor and is a generator, it can be
iterated over multiple times without getting consumed.
"""
def get_choices():
for i in range(5):
yield (i, i)
widget = Select(choices=get_choices())
self.check_html(widget, 'num', 2, html=(
"""<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>"""
))
self.check_html(widget, 'num', 3, html=(
"""<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3" selected="selected">3</option>
<option value="4">4</option>
</select>"""
))
def test_choices_constuctor_and_render(self):
"""
If 'choices' is passed to both the constructor and render(), then
they'll both be in the output.
"""
widget = Select(choices=[(1, 1), (2, 2), (3, 3)])
self.check_html(widget, 'num', 2, choices=[(4, 4), (5, 5)], html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>"""
))
def test_choices_escaping(self):
choices = (('bad', 'you & me'), ('good', mark_safe('you > me')))
self.check_html(self.widget, 'escape', None, choices=choices, html=(
"""<select name="escape">
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>"""
))
def test_choices_unicode(self):
self.check_html(
self.widget, 'email', 'ŠĐĆŽćžšđ',
choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')],
html=(
"""<select name="email">
<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">
\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111
</option>
<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>
</select>"""
),
)
def test_choices_optgroup(self):
"""
Choices can be nested one level in order to create HTML optgroups.
"""
self.check_html(self.nested_widget, 'nestchoice', None, html=(
"""<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>"""
))
def test_choices_select_outer(self):
self.check_html(self.nested_widget, 'nestchoice', 'outer1', html=(
"""<select name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>"""
))
def test_choices_select_inner(self):
self.check_html(self.nested_widget, 'nestchoice', 'inner1', html=(
"""<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>"""
))
def test_deepcopy(self):
"""
__deepcopy__() should copy all attributes properly (#25085).
"""
widget = Select()
obj = copy.deepcopy(widget)
self.assertIsNot(widget, obj)
self.assertEqual(widget.choices, obj.choices)
self.assertIsNot(widget.choices, obj.choices)
self.assertEqual(widget.attrs, obj.attrs)
self.assertIsNot(widget.attrs, obj.attrs)
| bsd-3-clause |
ovnicraft/openerp-restaurant | account/wizard/account_report_common_journal.py | 385 | 2942 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_common_journal_report(osv.osv_memory):
_name = 'account.common.journal.report'
_description = 'Account Common Journal Report'
_inherit = "account.common.report"
_columns = {
'amount_currency': fields.boolean("With Currency", help="Print Report with the currency column if the currency differs from the company currency."),
}
def _build_contexts(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = super(account_common_journal_report, self)._build_contexts(cr, uid, ids, data, context=context)
if data['form']['filter'] == 'filter_date':
cr.execute('SELECT period_id FROM account_move_line WHERE date >= %s AND date <= %s', (data['form']['date_from'], data['form']['date_to']))
result['periods'] = map(lambda x: x[0], cr.fetchall())
elif data['form']['filter'] == 'filter_period':
result['periods'] = self.pool.get('account.period').build_ctx_periods(cr, uid, data['form']['period_from'], data['form']['period_to'])
return result
def pre_print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data['form'].update(self.read(cr, uid, ids, ['amount_currency'], context=context)[0])
fy_ids = data['form']['fiscalyear_id'] and [data['form']['fiscalyear_id']] or self.pool.get('account.fiscalyear').search(cr, uid, [('state', '=', 'draft')], context=context)
period_list = data['form']['periods'] or self.pool.get('account.period').search(cr, uid, [('fiscalyear_id', 'in', fy_ids)], context=context)
data['form']['active_ids'] = self.pool.get('account.journal.period').search(cr, uid, [('journal_id', 'in', data['form']['journal_ids']), ('period_id', 'in', period_list)], context=context)
return data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/_LWPCookieJar.py | 267 | 6553 | """Load / save to libwww-perl (LWP) format files.
Actually, the format is slightly extended from that used by LWP's
(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
not recorded by LWP.
It uses the version string "2.0", though really there isn't an LWP Cookies
2.0 format. This indicates that there is extra information in here
(domain_dot and # port_spec) while still being compatible with
libwww-perl, I hope.
"""
import time, re
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT,
join_header_words, split_header_words,
iso2time, time2isoz)
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = cookie._rest.keys()
keys.sort()
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
finally:
f.close()
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not re.search(self.magic_re, magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
| mit |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/distutils/tests/test_build.py | 141 | 1924 | """Tests for distutils.command.build."""
import unittest
import os
import sys
from test.test_support import run_unittest
from distutils.command.build import build
from distutils.tests import support
from sysconfig import get_platform
class BuildTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
cmd = build(dist)
cmd.finalize_options()
# if not specified, plat_name gets the current platform
self.assertEqual(cmd.plat_name, get_platform())
# build_purelib is build + lib
wanted = os.path.join(cmd.build_base, 'lib')
self.assertEqual(cmd.build_purelib, wanted)
# build_platlib is 'build/lib.platform-x.x[-pydebug]'
# examples:
# build/lib.macosx-10.3-i386-2.7
plat_spec = '.%s-%s' % (cmd.plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
self.assertTrue(cmd.build_platlib.endswith('-pydebug'))
plat_spec += '-pydebug'
wanted = os.path.join(cmd.build_base, 'lib' + plat_spec)
self.assertEqual(cmd.build_platlib, wanted)
# by default, build_lib = build_purelib
self.assertEqual(cmd.build_lib, cmd.build_purelib)
# build_temp is build/temp.<plat>
wanted = os.path.join(cmd.build_base, 'temp' + plat_spec)
self.assertEqual(cmd.build_temp, wanted)
# build_scripts is build/scripts-x.x
wanted = os.path.join(cmd.build_base, 'scripts-' + sys.version[0:3])
self.assertEqual(cmd.build_scripts, wanted)
# executable is os.path.normpath(sys.executable)
self.assertEqual(cmd.executable, os.path.normpath(sys.executable))
def test_suite():
return unittest.makeSuite(BuildTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
sorenk/ansible | lib/ansible/plugins/cache/memcached.py | 55 | 6858 | # (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: memcached
short_description: Use memcached DB for cache
description:
- This cache uses JSON formatted, per host records saved in memcached.
version_added: "1.9"
requirements:
- memcache (python lib)
options:
_uri:
description:
- List of connection information for the memcached DBs
default: ['127.0.0.1:11211']
type: list
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the DB entries
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
- section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import collections
import os
import time
from multiprocessing import Lock
from itertools import chain
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.cache import BaseCacheModule
try:
import memcache
except ImportError:
raise AnsibleError("python-memcached is required for the memcached fact cache")
class ProxyClientPool(object):
"""
Memcached connection pooling for thread/fork safety. Inspired by py-redis
connection pool.
Available connections are maintained in a deque and released in a FIFO manner.
"""
def __init__(self, *args, **kwargs):
self.max_connections = kwargs.pop('max_connections', 1024)
self.connection_args = args
self.connection_kwargs = kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._num_connections = 0
self._available_connections = collections.deque(maxlen=self.max_connections)
self._locked_connections = set()
self._lock = Lock()
def _check_safe(self):
if self.pid != os.getpid():
with self._lock:
if self.pid == os.getpid():
# bail out - another thread already acquired the lock
return
self.disconnect_all()
self.reset()
def get_connection(self):
self._check_safe()
try:
connection = self._available_connections.popleft()
except IndexError:
connection = self.create_connection()
self._locked_connections.add(connection)
return connection
def create_connection(self):
if self._num_connections >= self.max_connections:
raise RuntimeError("Too many memcached connections")
self._num_connections += 1
return memcache.Client(*self.connection_args, **self.connection_kwargs)
def release_connection(self, connection):
self._check_safe()
self._locked_connections.remove(connection)
self._available_connections.append(connection)
def disconnect_all(self):
for conn in chain(self._available_connections, self._locked_connections):
conn.disconnect_all()
def __getattr__(self, name):
def wrapped(*args, **kwargs):
return self._proxy_client(name, *args, **kwargs)
return wrapped
def _proxy_client(self, name, *args, **kwargs):
conn = self.get_connection()
try:
return getattr(conn, name)(*args, **kwargs)
finally:
self.release_connection(conn)
class CacheModuleKeys(collections.MutableSet):
"""
A set subclass that keeps track of insertion time and persists
the set in memcached.
"""
PREFIX = 'ansible_cache_keys'
def __init__(self, cache, *args, **kwargs):
self._cache = cache
self._keyset = dict(*args, **kwargs)
def __contains__(self, key):
return key in self._keyset
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
def add(self, key):
self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset)
def discard(self, key):
del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max):
for k in self._keyset.keys():
t = self._keyset[k]
if s_min < t < s_max:
del self._keyset[k]
self._cache.set(self.PREFIX, self._keyset)
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
else:
connection = ['127.0.0.1:11211']
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = {}
self._db = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
return "{0}{1}".format(self._prefix, key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._keys.remove_by_timerange(0, expiry_age)
def get(self, key):
if key not in self._cache:
value = self._db.get(self._make_key(key))
# guard against the key not being removed from the keyset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
self._cache[key] = value
return self._cache.get(key)
def set(self, key, value):
self._db.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
self._cache[key] = value
self._keys.add(key)
def keys(self):
self._expire_keys()
return list(iter(self._keys))
def contains(self, key):
self._expire_keys()
return key in self._keys
def delete(self, key):
del self._cache[key]
self._db.delete(self._make_key(key))
self._keys.discard(key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
return self._keys.copy()
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
| gpl-3.0 |
HBehrens/feedsanitizer | django/contrib/messages/tests/session.py | 413 | 1230 | from django.contrib.messages.tests.base import BaseTest
from django.contrib.messages.storage.session import SessionStorage
def set_session_data(storage, messages):
"""
Sets the messages into the backend request's session and remove the
backend's loaded data cache.
"""
storage.request.session[storage.session_key] = messages
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_session_messages_count(storage):
data = storage.request.session.get(storage.session_key, [])
return len(data)
class SessionTest(BaseTest):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super(SessionTest, self).get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_session_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
| mit |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/cognitive_mapping_and_planning/tfcode/cmp_summary.py | 14 | 8338 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for setting up summaries for CMP.
"""
import sys, os, numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import arg_scope
import logging
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from src import utils
import src.file_utils as fu
import tfcode.nav_utils as nu
def _vis_readout_maps(outputs, global_step, output_dir, metric_summary, N):
# outputs is [gt_map, pred_map]:
if N >= 0:
outputs = outputs[:N]
N = len(outputs)
plt.set_cmap('jet')
fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*2), (5,5))
axes = axes.ravel()[::-1].tolist()
for i in range(N):
gt_map, pred_map = outputs[i]
for j in [0]:
for k in range(gt_map.shape[4]):
# Display something like the midpoint of the trajectory.
id = np.int(gt_map.shape[1]/2)
ax = axes.pop();
ax.imshow(gt_map[j,id,:,:,k], origin='lower', interpolation='none',
vmin=0., vmax=1.)
ax.set_axis_off();
if i == 0: ax.set_title('gt_map')
ax = axes.pop();
ax.imshow(pred_map[j,id,:,:,k], origin='lower', interpolation='none',
vmin=0., vmax=1.)
ax.set_axis_off();
if i == 0: ax.set_title('pred_map')
file_name = os.path.join(output_dir, 'readout_map_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
plt.close(fig)
def _vis(outputs, global_step, output_dir, metric_summary, N):
# Plot the value map, goal for various maps to see what if the model is
# learning anything useful.
#
# outputs is [values, goals, maps, occupancy, conf].
#
if N >= 0:
outputs = outputs[:N]
N = len(outputs)
plt.set_cmap('jet')
fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*5), (5,5))
axes = axes.ravel()[::-1].tolist()
for i in range(N):
values, goals, maps, occupancy, conf = outputs[i]
for j in [0]:
for k in range(values.shape[4]):
# Display something like the midpoint of the trajectory.
id = np.int(values.shape[1]/2)
ax = axes.pop();
ax.imshow(goals[j,id,:,:,k], origin='lower', interpolation='none')
ax.set_axis_off();
if i == 0: ax.set_title('goal')
ax = axes.pop();
ax.imshow(occupancy[j,id,:,:,k], origin='lower', interpolation='none')
ax.set_axis_off();
if i == 0: ax.set_title('occupancy')
ax = axes.pop();
ax.imshow(conf[j,id,:,:,k], origin='lower', interpolation='none',
vmin=0., vmax=1.)
ax.set_axis_off();
if i == 0: ax.set_title('conf')
ax = axes.pop();
ax.imshow(values[j,id,:,:,k], origin='lower', interpolation='none')
ax.set_axis_off();
if i == 0: ax.set_title('value')
ax = axes.pop();
ax.imshow(maps[j,id,:,:,k], origin='lower', interpolation='none')
ax.set_axis_off();
if i == 0: ax.set_title('incr map')
file_name = os.path.join(output_dir, 'value_vis_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
plt.close(fig)
def _summary_vis(m, batch_size, num_steps, arop_full_summary_iters):
arop = []; arop_summary_iters = []; arop_eval_fns = [];
vis_value_ops = []; vis_goal_ops = []; vis_map_ops = [];
vis_occupancy_ops = []; vis_conf_ops = [];
for i, val_op in enumerate(m.value_ops):
vis_value_op = tf.reduce_mean(tf.abs(val_op), axis=3, keep_dims=True)
vis_value_ops.append(vis_value_op)
vis_occupancy_op = tf.reduce_mean(tf.abs(m.occupancys[i]), 3, True)
vis_occupancy_ops.append(vis_occupancy_op)
vis_conf_op = tf.reduce_max(tf.abs(m.confs[i]), axis=3, keep_dims=True)
vis_conf_ops.append(vis_conf_op)
ego_goal_imgs_i_op = m.input_tensors['step']['ego_goal_imgs_{:d}'.format(i)]
vis_goal_op = tf.reduce_max(ego_goal_imgs_i_op, 4, True)
vis_goal_ops.append(vis_goal_op)
vis_map_op = tf.reduce_mean(tf.abs(m.ego_map_ops[i]), 4, True)
vis_map_ops.append(vis_map_op)
vis_goal_ops = tf.concat(vis_goal_ops, 4)
vis_map_ops = tf.concat(vis_map_ops, 4)
vis_value_ops = tf.concat(vis_value_ops, 3)
vis_occupancy_ops = tf.concat(vis_occupancy_ops, 3)
vis_conf_ops = tf.concat(vis_conf_ops, 3)
sh = tf.unstack(tf.shape(vis_value_ops))[1:]
vis_value_ops = tf.reshape(vis_value_ops, shape=[batch_size, -1] + sh)
sh = tf.unstack(tf.shape(vis_conf_ops))[1:]
vis_conf_ops = tf.reshape(vis_conf_ops, shape=[batch_size, -1] + sh)
sh = tf.unstack(tf.shape(vis_occupancy_ops))[1:]
vis_occupancy_ops = tf.reshape(vis_occupancy_ops, shape=[batch_size,-1] + sh)
# Save memory, only return time steps that need to be visualized, factor of
# 32 CPU memory saving.
id = np.int(num_steps/2)
vis_goal_ops = tf.expand_dims(vis_goal_ops[:,id,:,:,:], axis=1)
vis_map_ops = tf.expand_dims(vis_map_ops[:,id,:,:,:], axis=1)
vis_value_ops = tf.expand_dims(vis_value_ops[:,id,:,:,:], axis=1)
vis_conf_ops = tf.expand_dims(vis_conf_ops[:,id,:,:,:], axis=1)
vis_occupancy_ops = tf.expand_dims(vis_occupancy_ops[:,id,:,:,:], axis=1)
arop += [[vis_value_ops, vis_goal_ops, vis_map_ops, vis_occupancy_ops,
vis_conf_ops]]
arop_summary_iters += [arop_full_summary_iters]
arop_eval_fns += [_vis]
return arop, arop_summary_iters, arop_eval_fns
def _summary_readout_maps(m, num_steps, arop_full_summary_iters):
arop = []; arop_summary_iters = []; arop_eval_fns = [];
id = np.int(num_steps-1)
vis_readout_maps_gt = m.readout_maps_gt
vis_readout_maps_prob = tf.reshape(m.readout_maps_probs,
shape=tf.shape(vis_readout_maps_gt))
vis_readout_maps_gt = tf.expand_dims(vis_readout_maps_gt[:,id,:,:,:], 1)
vis_readout_maps_prob = tf.expand_dims(vis_readout_maps_prob[:,id,:,:,:], 1)
arop += [[vis_readout_maps_gt, vis_readout_maps_prob]]
arop_summary_iters += [arop_full_summary_iters]
arop_eval_fns += [_vis_readout_maps]
return arop, arop_summary_iters, arop_eval_fns
def _add_summaries(m, args, summary_mode, arop_full_summary_iters):
task_params = args.navtask.task_params
summarize_ops = [m.lr_op, m.global_step_op, m.sample_gt_prob_op] + \
m.loss_ops + m.acc_ops
summarize_names = ['lr', 'global_step', 'sample_gt_prob_op'] + \
m.loss_ops_names + ['acc_{:d}'.format(i) for i in range(len(m.acc_ops))]
to_aggregate = [0, 0, 0] + [1]*len(m.loss_ops_names) + [1]*len(m.acc_ops)
scope_name = 'summary'
with tf.name_scope(scope_name):
s_ops = nu.add_default_summaries(summary_mode, arop_full_summary_iters,
summarize_ops, summarize_names,
to_aggregate, m.action_prob_op,
m.input_tensors, scope_name=scope_name)
if summary_mode == 'val':
arop, arop_summary_iters, arop_eval_fns = _summary_vis(
m, task_params.batch_size, task_params.num_steps,
arop_full_summary_iters)
s_ops.additional_return_ops += arop
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
if args.arch.readout_maps:
arop, arop_summary_iters, arop_eval_fns = _summary_readout_maps(
m, task_params.num_steps, arop_full_summary_iters)
s_ops.additional_return_ops += arop
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
return s_ops
| bsd-2-clause |
crocs-muni/WSNProtectLayer | rssi_csv_to_tossim/rssi_csv_to_tossim.py | 1 | 1288 | #! /usr/bin/python
import sys
import csv
import os
# Here we start processing of a single csv file containg collected results
def processCsvFile(fToProcess):
rssiReader = csv.DictReader(open(fToProcess, 'rb'), delimiter=';', quotechar='|')
txID = fToProcess.split("Tx")[1].split(".")[0]
print "Transmitting node: " + str(txID)
rowNumber = 0
for row in rssiReader:
rowNumber += 1
if int(row['plen']) == 16:
# (twpower / 4) corresponds to the index for the file of results for txpower
fw[int( row['txpower']) / 4].write("gain " + txID + " " + row['rxnode'] + " " + str(float(row['mean']) - 45) + "\n" )
# MAIN starts here:
sourcedir = os.getcwd() + "\\source\\"
resultsdir = os.getcwd() + "\\results\\"
# Here the names of the output files can be changed
foutputs = ['tossim_tx3.txt', 'tossim_tx7.txt', 'tossim_tx11.txt', 'tossim_tx15.txt', 'tossim_tx19.txt', 'tossim_tx23.txt', 'tossim_tx27.txt', 'tossim_tx31.txt']
i = 0
fw = []
for file in foutputs:
fw.append(open(resultsdir + file, "w"))
i += 1
# We want to process all files in the sourcedir
for files in os.walk(sourcedir):
nbFile = 0
for fsource in files[2]:
nbFile = nbFile + 1
fToProcess = sourcedir + fsource
print "Processing file " + str(nbFile) + ": " + fToProcess
processCsvFile(fToProcess) | bsd-2-clause |
mihailignatenko/erp | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Change.py | 293 | 4692 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from lib.logreport import *
from lib.rpc import *
from ServerParameter import *
database="test"
class Change( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
desktop=getDesktop()
log_detail(self)
self.logobj=Logger()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.protocol = {
'XML-RPC': 'http://',
'XML-RPC secure': 'https://',
'NET-RPC': 'socket://',
}
host=port=protocol=''
if docinfo.getUserFieldValue(0):
m = re.match('^(http[s]?://|socket://)([\w.\-]+):(\d{1,5})$', docinfo.getUserFieldValue(0) or '')
host = m.group(2)
port = m.group(3)
protocol = m.group(1)
if protocol:
for (key, value) in self.protocol.iteritems():
if value==protocol:
protocol=key
break
else:
protocol='XML-RPC'
self.win=DBModalDialog(60, 50, 120, 90, "Connect to Odoo Server")
self.win.addFixedText("lblVariable", 38, 12, 25, 15, "Server ")
self.win.addEdit("txtHost",-2,9,60,15, host or 'localhost')
self.win.addFixedText("lblReportName",45 , 31, 15, 15, "Port ")
self.win.addEdit("txtPort",-2,28,60,15, port or "8069")
self.win.addFixedText("lblLoginName", 2, 51, 60, 15, "Protocol Connection")
self.win.addComboListBox("lstProtocol", -2, 48, 60, 15, True)
self.lstProtocol = self.win.getControl( "lstProtocol" )
self.win.addButton( 'btnNext', -2, -5, 30, 15, 'Next', actionListenerProc = self.btnNext_clicked )
self.win.addButton( 'btnCancel', -2 - 30 - 5 ,-5, 30, 15, 'Cancel', actionListenerProc = self.btnCancel_clicked )
for i in self.protocol.keys():
self.lstProtocol.addItem(i,self.lstProtocol.getItemCount() )
self.win.doModalDialog( "lstProtocol", protocol)
def btnNext_clicked(self, oActionEvent):
global url
aVal=''
#aVal= Fetature used
try:
url = self.protocol[self.win.getListBoxSelectedItem("lstProtocol")]+self.win.getEditText("txtHost")+":"+self.win.getEditText("txtPort")
self.sock=RPCSession(url)
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
docinfo.setUserFieldValue(0,url)
res=self.sock.listdb()
self.win.endExecute()
ServerParameter(aVal,url)
except :
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ServerParameter', LOG_ERROR, info)
ErrorDialog("Connection to server is fail. Please check your Server Parameter.", "", "Error!")
self.win.endExecute()
def btnCancel_clicked(self,oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Change(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( Change, "org.openoffice.openerp.report.change", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mrjaydee82/SinLessKernel-m8 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
jonathan-beard/edx-platform | lms/djangoapps/instructor/enrollment.py | 37 | 16362 | """
Enrollment operations for use by instructor APIs.
Does not include any access control, be sure to check access before calling.
"""
import json
import logging
from django.contrib.auth.models import User
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.utils.translation import override as override_language
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.models import StudentModule
from edxmako.shortcuts import render_to_string
from lang_pref import LANGUAGE_KEY
from submissions import api as sub_api # installed from the edx-submissions repository
from student.models import anonymous_id_for_user
from openedx.core.djangoapps.user_api.models import UserPreference
from microsite_configuration import microsite
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
log = logging.getLogger(__name__)
class EmailEnrollmentState(object):
""" Store the complete enrollment state of an email in a class """
def __init__(self, course_id, email):
exists_user = User.objects.filter(email=email).exists()
if exists_user:
user = User.objects.get(email=email)
mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_id)
# is_active is `None` if the user is not enrolled in the course
exists_ce = is_active is not None and is_active
full_name = user.profile.name
else:
mode = None
exists_ce = False
full_name = None
ceas = CourseEnrollmentAllowed.objects.filter(course_id=course_id, email=email).all()
exists_allowed = ceas.exists()
state_auto_enroll = exists_allowed and ceas[0].auto_enroll
self.user = exists_user
self.enrollment = exists_ce
self.allowed = exists_allowed
self.auto_enroll = bool(state_auto_enroll)
self.full_name = full_name
self.mode = mode
def __repr__(self):
return "{}(user={}, enrollment={}, allowed={}, auto_enroll={})".format(
self.__class__.__name__,
self.user,
self.enrollment,
self.allowed,
self.auto_enroll,
)
def to_dict(self):
"""
example: {
'user': False,
'enrollment': False,
'allowed': True,
'auto_enroll': True,
}
"""
return {
'user': self.user,
'enrollment': self.enrollment,
'allowed': self.allowed,
'auto_enroll': self.auto_enroll,
}
def get_user_email_language(user):
"""
Return the language most appropriate for writing emails to user. Returns
None if the preference has not been set, or if the user does not exist.
"""
# Calling UserPreference directly instead of get_user_preference because the user requesting the
# information is not "user" and also may not have is_staff access.
return UserPreference.get_value(user, LANGUAGE_KEY)
def enroll_email(course_id, student_email, auto_enroll=False, email_students=False, email_params=None, language=None):
"""
Enroll a student by email.
`student_email` is student's emails e.g. "foo@bar.com"
`auto_enroll` determines what is put in CourseEnrollmentAllowed.auto_enroll
if auto_enroll is set, then when the email registers, they will be
enrolled in the course automatically.
`email_students` determines if student should be notified of action by email.
`email_params` parameters used while parsing email templates (a `dict`).
`language` is the language used to render the email.
returns two EmailEnrollmentState's
representing state before and after the action.
"""
previous_state = EmailEnrollmentState(course_id, student_email)
enrollment_obj = None
if previous_state.user:
# if the student is currently unenrolled, don't enroll them in their
# previous mode
course_mode = u"honor"
if previous_state.enrollment:
course_mode = previous_state.mode
enrollment_obj = CourseEnrollment.enroll_by_email(student_email, course_id, course_mode)
if email_students:
email_params['message'] = 'enrolled_enroll'
email_params['email_address'] = student_email
email_params['full_name'] = previous_state.full_name
send_mail_to_student(student_email, email_params, language=language)
else:
cea, _ = CourseEnrollmentAllowed.objects.get_or_create(course_id=course_id, email=student_email)
cea.auto_enroll = auto_enroll
cea.save()
if email_students:
email_params['message'] = 'allowed_enroll'
email_params['email_address'] = student_email
send_mail_to_student(student_email, email_params, language=language)
after_state = EmailEnrollmentState(course_id, student_email)
return previous_state, after_state, enrollment_obj
def unenroll_email(course_id, student_email, email_students=False, email_params=None, language=None):
"""
Unenroll a student by email.
`student_email` is student's emails e.g. "foo@bar.com"
`email_students` determines if student should be notified of action by email.
`email_params` parameters used while parsing email templates (a `dict`).
`language` is the language used to render the email.
returns two EmailEnrollmentState's
representing state before and after the action.
"""
previous_state = EmailEnrollmentState(course_id, student_email)
if previous_state.enrollment:
CourseEnrollment.unenroll_by_email(student_email, course_id)
if email_students:
email_params['message'] = 'enrolled_unenroll'
email_params['email_address'] = student_email
email_params['full_name'] = previous_state.full_name
send_mail_to_student(student_email, email_params, language=language)
if previous_state.allowed:
CourseEnrollmentAllowed.objects.get(course_id=course_id, email=student_email).delete()
if email_students:
email_params['message'] = 'allowed_unenroll'
email_params['email_address'] = student_email
# Since no User object exists for this student there is no "full_name" available.
send_mail_to_student(student_email, email_params, language=language)
after_state = EmailEnrollmentState(course_id, student_email)
return previous_state, after_state
def send_beta_role_email(action, user, email_params):
"""
Send an email to a user added or removed as a beta tester.
`action` is one of 'add' or 'remove'
`user` is the User affected
`email_params` parameters used while parsing email templates (a `dict`).
"""
if action == 'add':
email_params['message'] = 'add_beta_tester'
email_params['email_address'] = user.email
email_params['full_name'] = user.profile.name
elif action == 'remove':
email_params['message'] = 'remove_beta_tester'
email_params['email_address'] = user.email
email_params['full_name'] = user.profile.name
else:
raise ValueError("Unexpected action received '{}' - expected 'add' or 'remove'".format(action))
send_mail_to_student(user.email, email_params, language=get_user_email_language(user))
def reset_student_attempts(course_id, student, module_state_key, delete_module=False):
"""
Reset student attempts for a problem. Optionally deletes all student state for the specified problem.
In the previous instructor dashboard it was possible to modify/delete
modules that were not problems. That has been disabled for safety.
`student` is a User
`problem_to_reset` is the name of a problem e.g. 'L2Node1'.
To build the module_state_key 'problem/' and course information will be appended to `problem_to_reset`.
Raises:
ValueError: `problem_state` is invalid JSON.
StudentModule.DoesNotExist: could not load the student module.
submissions.SubmissionError: unexpected error occurred while resetting the score in the submissions API.
"""
try:
# A block may have children. Clear state on children first.
block = modulestore().get_item(module_state_key)
if block.has_children:
for child in block.children:
try:
reset_student_attempts(course_id, student, child, delete_module=delete_module)
except StudentModule.DoesNotExist:
# If a particular child doesn't have any state, no big deal, as long as the parent does.
pass
except ItemNotFoundError:
log.warning("Could not find %s in modulestore when attempting to reset attempts.", module_state_key)
# Reset the student's score in the submissions API
# Currently this is used only by open assessment (ORA 2)
# We need to do this *before* retrieving the `StudentModule` model,
# because it's possible for a score to exist even if no student module exists.
if delete_module:
sub_api.reset_score(
anonymous_id_for_user(student, course_id),
course_id.to_deprecated_string(),
module_state_key.to_deprecated_string(),
)
module_to_reset = StudentModule.objects.get(
student_id=student.id,
course_id=course_id,
module_state_key=module_state_key
)
if delete_module:
module_to_reset.delete()
else:
_reset_module_attempts(module_to_reset)
def _reset_module_attempts(studentmodule):
"""
Reset the number of attempts on a studentmodule.
Throws ValueError if `problem_state` is invalid JSON.
"""
# load the state json
problem_state = json.loads(studentmodule.state)
# old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0
# save
studentmodule.state = json.dumps(problem_state)
studentmodule.save()
def get_email_params(course, auto_enroll, secure=True):
"""
Generate parameters used when parsing email templates.
`auto_enroll` is a flag for auto enrolling non-registered students: (a `boolean`)
Returns a dict of parameters
"""
protocol = 'https' if secure else 'http'
stripped_site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
# TODO: Use request.build_absolute_uri rather than '{proto}://{site}{path}'.format
# and check with the Services team that this works well with microsites
registration_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('register_user')
)
course_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('course_root', kwargs={'course_id': course.id.to_deprecated_string()})
)
# We can't get the url to the course's About page if the marketing site is enabled.
course_about_url = None
if not settings.FEATURES.get('ENABLE_MKTG_SITE', False):
course_about_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('about_course', kwargs={'course_id': course.id.to_deprecated_string()})
)
is_shib_course = uses_shib(course)
# Composition of email
email_params = {
'site_name': stripped_site_name,
'registration_url': registration_url,
'course': course,
'auto_enroll': auto_enroll,
'course_url': course_url,
'course_about_url': course_about_url,
'is_shib_course': is_shib_course,
}
return email_params
def send_mail_to_student(student, param_dict, language=None):
"""
Construct the email using templates and then send it.
`student` is the student's email address (a `str`),
`param_dict` is a `dict` with keys
[
`site_name`: name given to edX instance (a `str`)
`registration_url`: url for registration (a `str`)
`course_id`: id of course (a `str`)
`auto_enroll`: user input option (a `str`)
`course_url`: url of course (a `str`)
`email_address`: email of student (a `str`)
`full_name`: student full name (a `str`)
`message`: type of email to send and template to use (a `str`)
`is_shib_course`: (a `boolean`)
]
`language` is the language used to render the email. If None the language
of the currently-logged in user (that is, the user sending the email) will
be used.
Returns a boolean indicating whether the email was sent successfully.
"""
# add some helpers and microconfig subsitutions
if 'course' in param_dict:
param_dict['course_name'] = param_dict['course'].display_name_with_default
param_dict['site_name'] = microsite.get_value(
'SITE_NAME',
param_dict['site_name']
)
subject = None
message = None
# see if we are running in a microsite and that there is an
# activation email template definition available as configuration, if so, then render that
message_type = param_dict['message']
email_template_dict = {
'allowed_enroll': (
'emails/enroll_email_allowedsubject.txt',
'emails/enroll_email_allowedmessage.txt'
),
'enrolled_enroll': (
'emails/enroll_email_enrolledsubject.txt',
'emails/enroll_email_enrolledmessage.txt'
),
'allowed_unenroll': (
'emails/unenroll_email_subject.txt',
'emails/unenroll_email_allowedmessage.txt'
),
'enrolled_unenroll': (
'emails/unenroll_email_subject.txt',
'emails/unenroll_email_enrolledmessage.txt'
),
'add_beta_tester': (
'emails/add_beta_tester_email_subject.txt',
'emails/add_beta_tester_email_message.txt'
),
'remove_beta_tester': (
'emails/remove_beta_tester_email_subject.txt',
'emails/remove_beta_tester_email_message.txt'
),
'account_creation_and_enrollment': (
'emails/enroll_email_enrolledsubject.txt',
'emails/account_creation_and_enroll_emailMessage.txt'
),
}
subject_template, message_template = email_template_dict.get(message_type, (None, None))
if subject_template is not None and message_template is not None:
subject, message = render_message_to_string(
subject_template, message_template, param_dict, language=language
)
if subject and message:
# Remove leading and trailing whitespace from body
message = message.strip()
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
send_mail(subject, message, from_address, [student], fail_silently=False)
def render_message_to_string(subject_template, message_template, param_dict, language=None):
"""
Render a mail subject and message templates using the parameters from
param_dict and the given language. If language is None, the platform
default language is used.
Returns two strings that correspond to the rendered, translated email
subject and message.
"""
with override_language(language):
return get_subject_and_message(subject_template, message_template, param_dict)
def get_subject_and_message(subject_template, message_template, param_dict):
"""
Return the rendered subject and message with the appropriate parameters.
"""
subject = render_to_string(subject_template, param_dict)
message = render_to_string(message_template, param_dict)
return subject, message
def uses_shib(course):
"""
Used to return whether course has Shibboleth as the enrollment domain
Returns a boolean indicating if Shibboleth authentication is set for this course.
"""
return course.enrollment_domain and course.enrollment_domain.startswith(settings.SHIBBOLETH_DOMAIN_PREFIX)
| agpl-3.0 |
MarishaYasko/interactive-stories-stands | InteractiveStands/Lib/encodings/cp865.py | 272 | 34618 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp865',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00a4, # CURRENCY SIGN
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xa4' # 0x00af -> CURRENCY SIGN
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00af, # CURRENCY SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
qtproject/pyside-pyside | tests/QtWidgets/bug_1002.py | 1 | 1775 | #############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import unittest
import sys
from PySide2.QtWidgets import QWidget, QPushButton
from helper import UsesQApplication
class TestBug1002 (UsesQApplication):
def testReturnWindow(self):
widget = QWidget()
button = QPushButton(widget)
self.assertEqual(sys.getrefcount(widget), 2)
window = button.window()
self.assertEqual(sys.getrefcount(widget), 3)
self.assertEqual(sys.getrefcount(window), 3)
del widget
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
mupi/tecsaladeaula | forum/serializers.py | 2 | 3403 | from forum.models import Question, Answer, QuestionVote, AnswerVote
from rest_framework import serializers
class QuestionSerializer(serializers.ModelSerializer):
votes = serializers.SerializerMethodField('count_votes')
username = serializers.SerializerMethodField('get_username')
user_obj = serializers.SerializerMethodField('get_user')
timestamp = serializers.DateTimeField(read_only=True)
hidden_to_user = serializers.SerializerMethodField('is_hidden')
moderator = serializers.SerializerMethodField('is_moderator')
class Meta:
model = Question
fields = ('id', 'title', 'course', 'answers', 'text', 'slug',
'votes', 'timestamp', 'username', 'user_obj', 'hidden',
'hidden_by', 'hidden_to_user', 'moderator', 'hidden_justification',)
def count_votes(self, obj):
if obj:
return obj.count_votes
else:
return 0
def get_username(self, obj):
if obj:
return obj.user.username
else:
return u''
def get_user(self, obj):
if obj:
from accounts.serializers import TimtecUserSerializer
tus = TimtecUserSerializer(obj.user)
return tus.data
else:
return u''
def is_hidden(self, obj):
if hasattr(obj, 'hidden_to_user'):
return obj.hidden_to_user
return obj.hidden
def is_moderator(self, obj):
if hasattr(obj, 'moderator'):
return obj.moderator
return False
class AnswerSerializer(serializers.ModelSerializer):
votes = serializers.SerializerMethodField('count_votes')
username = serializers.SerializerMethodField('get_username')
user_obj = serializers.SerializerMethodField('get_user')
timestamp = serializers.DateTimeField(read_only=True)
current_user_vote = serializers.SerializerMethodField('get_current_user_vote')
class Meta:
model = Answer
fields = ('id', 'question', 'text', 'votes', 'timestamp', 'username', 'user_obj', 'current_user_vote')
def count_votes(self, obj):
if obj:
return obj.count_votes
else:
return 0
def get_username(self, obj):
if obj:
return obj.user.username
else:
return u''
def get_user(self, obj):
if obj:
from accounts.serializers import TimtecUserSerializer
tus = TimtecUserSerializer(obj.user)
return tus.data
else:
return u''
def get_current_user_vote(self, obj):
current_user_vote, _ = AnswerVote.objects.get_or_create(user=self.context.get('request').user, answer=obj)
serializer = AnswerVoteSerializer(instance=current_user_vote, many=False, context=self.context)
return serializer.data
class QuestionVoteSerializer(serializers.ModelSerializer):
user = serializers.IntegerField(read_only=True)
timestamp = serializers.DateTimeField(read_only=True)
class Meta:
model = QuestionVote
fields = ('question', 'timestamp', 'user', 'value')
class AnswerVoteSerializer(serializers.ModelSerializer):
user = serializers.IntegerField(read_only=True)
timestamp = serializers.DateTimeField(read_only=True)
class Meta:
model = AnswerVote
fields = ('id', 'answer', 'timestamp', 'user', 'value')
| agpl-3.0 |
Mazecreator/tensorflow | tensorflow/contrib/ndlstm/python/misc.py | 121 | 3612 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous functions useful for nD-LSTM models.
Some of these functions duplicate functionality in tfslim with
slightly different interfaces.
Tensors in this library generally have the shape (num_images, height, width,
depth).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
def _shape(tensor):
"""Get the shape of a tensor as an int list."""
return tensor.get_shape().as_list()
def pixels_as_vector(images, scope=None):
"""Reduce images to vectors by combining all pixels."""
with ops.name_scope(scope, "PixelsAsVector", [images]):
batch_size, height, width, depth = _shape(images)
return array_ops.reshape(images, [batch_size, height * width * depth])
def pool_as_vector(images, scope=None):
"""Reduce images to vectors by averaging all pixels."""
with ops.name_scope(scope, "PoolAsVector", [images]):
return math_ops.reduce_mean(images, [1, 2])
def one_hot_planes(labels, num_classes, scope=None):
"""Compute 1-hot encodings for planes.
Given a label, this computes a label image that contains
1 at all pixels in the plane corresponding to the target
class and 0 in all other planes.
Args:
labels: (batch_size,) tensor
num_classes: number of classes
scope: optional scope name
Returns:
Tensor of shape (batch_size, 1, 1, num_classes) with a 1-hot encoding.
"""
with ops.name_scope(scope, "OneHotPlanes", [labels]):
batch_size, = _shape(labels)
batched = layers.one_hot_encoding(labels, num_classes)
return array_ops.reshape(batched, [batch_size, 1, 1, num_classes])
def one_hot_mask(labels, num_classes, scope=None):
"""Compute 1-hot encodings for masks.
Given a label image, this computes the one hot encoding at
each pixel.
Args:
labels: (batch_size, width, height, 1) tensor containing labels.
num_classes: number of classes
scope: optional scope name
Returns:
Tensor of shape (batch_size, width, height, num_classes) with
a 1-hot encoding.
"""
with ops.name_scope(scope, "OneHotMask", [labels]):
height, width, depth = _shape(labels)
assert depth == 1
sparse_labels = math_ops.to_int32(array_ops.reshape(labels, [-1, 1]))
sparse_size, _ = _shape(sparse_labels)
indices = array_ops.reshape(math_ops.range(0, sparse_size, 1), [-1, 1])
concated = array_ops.concat([indices, sparse_labels], 1)
dense_result = sparse_ops.sparse_to_dense(concated,
[sparse_size, num_classes], 1.0,
0.0)
result = array_ops.reshape(dense_result, [height, width, num_classes])
return result
| apache-2.0 |
crazy-cat/incubator-mxnet | tools/caffe_converter/convert_caffe_modelzoo.py | 24 | 5837 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Convert Caffe's modelzoo
"""
import os
import argparse
from convert_model import convert_model
from convert_mean import convert_mean
import mxnet as mx
_mx_caffe_model = 'http://data.mxnet.io/models/imagenet/test/caffe/'
"""Dictionary for model meta information
For each model, it requires three attributes:
- prototxt: URL for the deploy prototxt file
- caffemodel: URL for the binary caffemodel
- mean : URL for the data mean or a tuple of float
Optionly it takes
- top-1-acc : top 1 accuracy for testing
- top-5-acc : top 5 accuracy for testing
"""
model_meta_info = {
# pylint: disable=line-too-long
'bvlc_alexnet' : {
'prototxt' : 'https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_googlenet/deploy.prototxt',
'caffemodel' : 'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
'mean' : 'https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/caffe/imagenet_mean.binaryproto',
'top-1-acc' : 0.571,
'top-5-acc' : 0.802
},
'bvlc_googlenet' : {
'prototxt' : 'https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_googlenet/deploy.prototxt',
'caffemodel' : 'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
'mean' : (123, 117, 104),
'top-1-acc' : 0.687,
'top-5-acc' : 0.889
},
'vgg-16' : {
'prototxt' : 'https://gist.githubusercontent.com/ksimonyan/211839e770f7b538e2d8/raw/c3ba00e272d9f48594acef1f67e5fd12aff7a806/VGG_ILSVRC_16_layers_deploy.prototxt',
# 'caffemodel' : 'http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel',
'caffemodel' : 'http://data.mxnet.io/models/imagenet/test/caffe/VGG_ILSVRC_16_layers.caffemodel',
'mean': (123.68, 116.779, 103.939),
'top-1-acc' : 0.734,
'top-5-acc' : 0.914
},
'vgg-19' : {
'prototxt' : 'https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/bb2b4fe0a9bb0669211cf3d0bc949dfdda173e9e/VGG_ILSVRC_19_layers_deploy.prototxt',
# 'caffemodel' : 'http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel',
'caffemodel' : 'http://data.mxnet.io/models/imagenet/test/caffe/VGG_ILSVRC_19_layers.caffemodel',
'mean' : (123.68, 116.779, 103.939),
'top-1-acc' : 0.731,
'top-5-acc' : 0.913
},
'resnet-50' : {
'prototxt' : _mx_caffe_model+'ResNet-50-deploy.prototxt',
'caffemodel' : _mx_caffe_model+'ResNet-50-model.caffemodel',
'mean' : _mx_caffe_model+'ResNet_mean.binaryproto',
'top-1-acc' : 0.753,
'top-5-acc' : 0.922
},
'resnet-101' : {
'prototxt' : _mx_caffe_model+'ResNet-101-deploy.prototxt',
'caffemodel' : _mx_caffe_model+'ResNet-101-model.caffemodel',
'mean' : _mx_caffe_model+'ResNet_mean.binaryproto',
'top-1-acc' : 0.764,
'top-5-acc' : 0.929
},
'resnet-152' : {
'prototxt' : _mx_caffe_model+'ResNet-152-deploy.prototxt',
'caffemodel' : _mx_caffe_model+'ResNet-152-model.caffemodel',
'mean' : _mx_caffe_model+'ResNet_mean.binaryproto',
'top-1-acc' : 0.77,
'top-5-acc' : 0.933
},
}
def get_model_meta_info(model_name):
"""returns a dict with model information"""
return dict(dict(model_meta_info)[model_name])
def download_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download caffe model into disk by the given meta info """
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
model_name = os.path.join(dst_dir, model_name)
assert 'prototxt' in meta_info, "missing prototxt url"
prototxt = mx.test_utils.download(meta_info['prototxt'], model_name+'_deploy.prototxt')
assert 'caffemodel' in meta_info, "mssing caffemodel url"
caffemodel = mx.test_utils.download(meta_info['caffemodel'], model_name+'.caffemodel')
assert 'mean' in meta_info, 'no mean info'
mean = meta_info['mean']
if isinstance(mean, str):
mean = mx.test_utils.download(mean, model_name+'_mean.binaryproto')
return (prototxt, caffemodel, mean)
def convert_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download, convert and save a caffe model"""
(prototxt, caffemodel, mean) = download_caffe_model(model_name, meta_info, dst_dir)
model_name = os.path.join(dst_dir, model_name)
convert_model(prototxt, caffemodel, model_name)
if isinstance(mean, str):
mx_mean = model_name + '-mean.nd'
convert_mean(mean, mx_mean)
mean = mx_mean
return (model_name, mean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert Caffe model zoo')
parser.add_argument('model_name', help='can be '+', '.join(model_meta_info.keys()))
args = parser.parse_args()
assert args.model_name in model_meta_info, 'Unknown model ' + args.model_name
fname, _ = convert_caffe_model(args.model_name, model_meta_info[args.model_name])
print('Model is saved into ' + fname)
| apache-2.0 |
ivan-fedorov/intellij-community | python/lib/Lib/distutils/tests/test_install_scripts.py | 152 | 2555 | """Tests for distutils.command.install_scripts."""
import os
import unittest
from distutils.command.install_scripts import install_scripts
from distutils.core import Distribution
from distutils.tests import support
class InstallScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(
build_scripts="/foo/bar")
dist.command_obj["install"] = support.DummyCommand(
install_scripts="/splat/funk",
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
self.assert_(not cmd.force)
self.assert_(not cmd.skip_build)
self.assert_(cmd.build_dir is None)
self.assert_(cmd.install_dir is None)
cmd.finalize_options()
self.assert_(cmd.force)
self.assert_(cmd.skip_build)
self.assertEqual(cmd.build_dir, "/foo/bar")
self.assertEqual(cmd.install_dir, "/splat/funk")
def test_installation(self):
source = self.mkdtemp()
expected = []
def write_script(name, text):
expected.append(name)
f = open(os.path.join(source, name), "w")
f.write(text)
f.close()
write_script("script1.py", ("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("script2.py", ("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("shell.sh", ("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
target = self.mkdtemp()
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(build_scripts=source)
dist.command_obj["install"] = support.DummyCommand(
install_scripts=target,
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
cmd.finalize_options()
cmd.run()
installed = os.listdir(target)
for name in expected:
self.assert_(name in installed)
def test_suite():
return unittest.makeSuite(InstallScriptsTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| apache-2.0 |
bodi000/odoo | addons/mail/tests/test_invite.py | 187 | 2616 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
class test_invite(TestMail):
def test_00_basic_invite(self):
cr, uid = self.cr, self.uid
mail_invite = self.registry('mail.wizard.invite')
# Do: create a mail_wizard_invite, validate it
self._init_mock_build_email()
context = {'default_res_model': 'mail.group', 'default_res_id': self.group_pigs_id}
mail_invite_id = mail_invite.create(cr, self.user_raoul_id, {'partner_ids': [(4, self.partner_bert_id)], 'send_mail': True}, context)
mail_invite.add_followers(cr, self.user_raoul_id, [mail_invite_id], {'default_model': 'mail.group', 'default_res_id': 0})
# Test: Pigs followers should contain Admin, Bert
self.group_pigs.refresh()
follower_ids = [follower.id for follower in self.group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([self.partner_admin_id, self.partner_bert_id]), 'invite: Pigs followers after invite is incorrect')
# Test: (pretend to) send email and check subject, body
self.assertEqual(len(self._build_email_kwargs_list), 1, 'sent email number incorrect, should be only for Bert')
for sent_email in self._build_email_kwargs_list:
self.assertEqual(sent_email.get('subject'), 'Invitation to follow Discussion group: Pigs',
'invite: subject of invitation email is incorrect')
self.assertIn('Raoul Grosbedon invited you to follow Discussion group document: Pigs', sent_email.get('body'),
'invite: body of invitation email is incorrect')
| agpl-3.0 |
expfactory/expfactory | expfactory/validator/experiments.py | 1 | 6892 | """
validators/experiments.py: python functions to validate experiments and library
experiment objects
Copyright (c) 2017-2021, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import re
import sys
import tempfile
import shutil
from expfactory.validator.utils import notvalid
from expfactory.logger import bot
from expfactory.utils import clone, read_json
from glob import glob
import json
class ExperimentValidator:
def __init__(self, quiet=False):
self.tmpdir = tempfile.mkdtemp()
if quiet is True:
bot.level = 0
def __str__(self):
return "expfactory.ExperimentValidator"
def _validate_folder(self, folder=None):
"""validate folder takes a cloned github repo, ensures
the existence of the config.json, and validates it.
"""
from expfactory.experiment import load_experiment
if folder is None:
folder = os.path.abspath(os.getcwd())
config = load_experiment(folder, return_path=True)
if not config:
return notvalid("%s is not an experiment." % (folder))
return self._validate_config(folder)
def validate(self, folder, cleanup=False, validate_folder=True):
"""validate is the entrypoint to all validation, for
a folder, config, or url. If a URL is found, it is
cloned and cleaned up.
:param validate_folder: ensures the folder name (github repo)
matches.
"""
# Obtain any repository URL provided
if folder.startswith("http") or "github" in folder:
folder = clone(folder, tmpdir=self.tmpdir)
# Load config.json if provided directly
elif os.path.basename(folder) == "config.json":
config = os.path.dirname(folder)
return self._validate_config(config, validate_folder)
# Otherwise, validate folder and cleanup
valid = self._validate_folder(folder)
if cleanup is True:
shutil.rmtree(folder)
return valid
def _validate_config(self, folder, validate_folder=True):
"""validate config is the primary validation function that checks
for presence and format of required fields.
Parameters
==========
:folder: full path to folder with config.json
:name: if provided, the folder name to check against exp_id
"""
config = "%s/config.json" % folder
name = os.path.basename(folder)
if not os.path.exists(config):
return notvalid("%s: config.json not found." % (folder))
# Load the config
try:
config = read_json(config)
except:
return notvalid("%s: cannot load json, invalid." % (name))
# Config.json should be single dict
if isinstance(config, list):
return notvalid("%s: config.json is a list, not valid." % (name))
# Check over required fields
fields = self.get_validation_fields()
for field, value, ftype in fields:
bot.verbose("field: %s, required: %s" % (field, value))
# Field must be in the keys if required
if field not in config.keys():
if value == 1:
return notvalid(
"%s: config.json is missing required field %s" % (name, field)
)
# Field is present, check type
else:
if not isinstance(config[field], ftype):
return notvalid(
"%s:%s invalid type, must be %s." % (name, field, str(ftype))
)
# Expid gets special treatment
if field == "exp_id" and validate_folder is True:
if config[field] != name:
return notvalid(
"%s: exp_id parameter %s does not match folder name."
% (name, config[field])
)
# name cannot have special characters, only _ and letters/numbers
if not re.match("^[a-z0-9_-]*$", config[field]):
message = "%s: exp_id parameter %s has invalid characters"
message += "only lowercase [a-z],[0-9], -, and _ allowed."
return notvalid(message % (name, config[field]))
return True
def get_validation_fields(self):
"""get_validation_fields returns a list of tuples (each a field)
we only require the exp_id to coincide with the folder name, for the sake
of reproducibility (given that all are served from sample image or Github
organization). All other fields are optional.
To specify runtime variables, add to "experiment_variables"
0: not required, no warning
1: required, not valid
2: not required, warning
type: indicates the variable type
"""
return [
("name", 1, str), # required
("time", 1, int),
("url", 1, str),
("description", 1, str),
("instructions", 1, str),
("exp_id", 1, str),
("install", 0, list), # list of commands to install / build experiment
("contributors", 0, list), # not required
("reference", 0, list),
("cognitive_atlas_task_id", 0, str),
("template", 0, str),
]
| bsd-3-clause |
jmesteve/openerpseda | openerp/addons_extra/point_of_sale/report/pos_lines.py | 61 | 2251 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class pos_lines(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(pos_lines, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.localcontext.update({
'time': time,
'total_quantity': self.__total_quantity__,
'taxes':self.__taxes__,
})
def __total_quantity__(self, obj):
tot = 0
for line in obj.lines:
tot += line.qty
self.total = tot
return self.total
def __taxes__(self, obj):
self.cr.execute ( " Select acct.name from pos_order as po " \
" LEFT JOIN pos_order_line as pol ON po.id = pol.order_id " \
" LEFT JOIN product_taxes_rel as ptr ON pol.product_id = ptr.prod_id " \
" LEFT JOIN account_tax as acct ON acct.id = ptr.tax_id " \
" WHERE pol.id = %s", (obj.id,))
res=self.cr.fetchone()[0]
return res
report_sxw.report_sxw('report.pos.lines', 'pos.order', 'addons/point_of_sale/report/pos_lines.rml', parser=pos_lines,header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
udoprog/ontv | ontv/format.py | 1 | 6624 | import textwrap
import datetime
from dateutil import relativedelta
from .utils import has_aired_filter
def short_episode(episode):
return u"S{0:02}E{1:02} '{2}'".format(
episode['season_number'],
episode['episode_number'],
episode['episode_name'])
def _build_readable_date(years, months, days):
show_days = True
show_weeks = True
show_months = True
if years > 1:
show_days = False
show_weeks = False
yield "{0} years".format(years)
if years >= 10:
show_months = False
if years == 1:
show_days = False
show_weeks = False
yield "1 year"
if show_months:
if months > 1:
show_days = False
yield "{0} months".format(months)
if months == 1:
yield "1 month"
if show_weeks:
if days > 14:
yield "{0} weeks".format(days / 7)
return
if days > 7:
yield "1 week"
days -= 7
if show_days:
if days > 1:
yield "{0} days".format(days)
return
yield "1 day"
def readable_timedelta(now, then, next_day, suffix="", prefix=""):
delta = relativedelta.relativedelta(now, then)
years = abs(delta.years)
months = abs(delta.months)
days = abs(delta.days)
if years == 0 and months == 0 and days == 0:
return "today"
if years == 0 and months == 0 and days == 1:
return next_day
g = _build_readable_date(years, months, days)
return prefix + ", ".join(g) + suffix
def floor_datetime(dt):
return datetime.datetime(
year=dt.year,
month=dt.month,
day=dt.day,
)
def format_airdate(aired, now=None):
if aired is None:
return "n/a"
try:
then = datetime.datetime.strptime(aired, "%Y-%m-%d")
except:
return "<invalid %Y-%m-%d>"
if now is None:
now = datetime.datetime.now()
now = floor_datetime(now)
if then <= now:
return readable_timedelta(
now, then, "yesterday", suffix=" ago")
return readable_timedelta(
now, then, "tomorrow", prefix="in ")
def format_days(days):
if days == 1:
return "1 day"
return "{0} days".format(days)
def format_wrapped(out, text, indent=u""):
wrapper = textwrap.TextWrapper()
wrapper.initial_indent = indent
wrapper.subsequent_indent = indent
for line in wrapper.wrap(text):
out(line)
def format_episode(
out, term, series_dao, episode,
short_version=True,
indent=u"",
):
color = term.white
if series_dao.is_episode_watched(episode):
color = term.bold_blue
if short_version:
out(color(
u"{0}{1:02} '{2}' {3} ({4})".format(
indent, episode['episode_number'],
episode['episode_name'],
format_airdate(episode['first_aired']),
episode['first_aired'])))
else:
out(color(
u"{0}{1:02} {2}".format(
indent, episode['episode_number'],
episode['episode_name'])))
if short_version:
return
out(term.cyan(u"{0}Air date: {1} ({2})".format(
indent + u" ",
format_airdate(episode['first_aired']),
episode['first_aired'])))
if episode['overview']:
format_wrapped(out, episode['overview'], indent=indent + u" ")
if 'guest_stars' in episode:
out(term.cyan(u"{0}Guest stars:".format(indent + u" ")))
format_wrapped(out, format_compact_list(episode['guest_stars']),
indent=indent + u" ")
def format_season(
out, term, series_dao,
series,
season_number,
episodes=None,
indent=u""
):
now = datetime.datetime.now()
has_aired = has_aired_filter(now)
episodes_legend = format_episodes_count_legend(term)
all_episodes = series_dao.get_season_episodes(series, season_number)
episodes_count, stats = format_episodes_count(
term, series_dao, has_aired, all_episodes)
color = term.white
seen, aired, all = stats
if seen == aired or seen == all:
color = term.bold_green
elif seen > 0:
color = term.bold_yellow
else:
color = term.bold_red
out(u"{0}{c}Season {1}{t.normal} ({2}): {3}".format(
indent, season_number, episodes_legend, episodes_count,
c=color, t=term))
if not episodes:
return
for episode in episodes:
format_episode(out, term, series_dao, episode,
short_version=False,
indent=indent + u" ")
def format_compact_list(items, item_format=u"{0}"):
if items is None:
return u"(empty)"
return u", ".join(map(item_format.format, items))
def format_series(
out, term, series,
seasons=None,
ignored_seasons=set(),
series_dao=None,
indent=u"",
):
out(term.bold_cyan(u"{0} (id: {1})".format(
series['series_name'], series['id'])))
if 'first_aired' in series:
out(term.cyan(u"Air date: {0} ({1})".format(
format_airdate(series['first_aired']),
series['first_aired'])))
if 'overview' in series:
if series['overview']:
format_wrapped(out, series['overview'], indent=u" ")
if not seasons:
return
if 'actors' in series:
out(term.cyan(u"{0}Actors:".format(indent)))
format_wrapped(out, format_compact_list(series['actors']),
indent=indent + u" ")
out(term.cyan(u"Seasons"))
for season_number, season_episodes in sorted(seasons.items()):
if season_number in ignored_seasons:
continue
if len(seasons) != 1:
season_episodes = None
format_season(
out, term, series_dao, series, season_number,
episodes=season_episodes,
indent=u" ")
def format_episodes_count_legend(term):
return (
u"{t.green}{0}{t.normal}/"
u"{t.yellow}{1}{t.normal}/"
u"{t.red}{2}{t.normal}"
).format(
"seen", "aired", "all",
t=term)
def format_episodes_count(term, series_dao, has_aired, episodes):
seen_episodes = len(filter(
series_dao.is_episode_watched, episodes))
aired_episodes = len(filter(has_aired, episodes))
all_episodes = len(episodes)
stats = (seen_episodes, aired_episodes, all_episodes)
return (
u"{t.green}{0}{t.normal}/"
u"{t.yellow}{1}{t.normal}/"
u"{t.red}{2}{t.normal}"
).format(*stats, t=term), stats
| gpl-3.0 |
thonkify/thonkify | src/lib/babel/localtime/_unix.py | 19 | 5117 | from __future__ import with_statement
import os
import re
import sys
import pytz
import subprocess
_systemconfig_tz = re.compile(r'^Time Zone: (.*)$(?m)')
def _tz_from_env(tzenv):
if tzenv[0] == ':':
tzenv = tzenv[1:]
# TZ specifies a file
if os.path.exists(tzenv):
with open(tzenv, 'rb') as tzfile:
return pytz.tzfile.build_tzinfo('local', tzfile)
# TZ specifies a zoneinfo zone.
try:
tz = pytz.timezone(tzenv)
# That worked, so we return this:
return tz
except pytz.UnknownTimeZoneError:
raise pytz.UnknownTimeZoneError(
"tzlocal() does not support non-zoneinfo timezones like %s. \n"
"Please use a timezone in the form of Continent/City")
def _get_localzone(_root='/'):
"""Tries to find the local timezone configuration.
This method prefers finding the timezone name and passing that to pytz,
over passing in the localtime file, as in the later case the zoneinfo
name is unknown.
The parameter _root makes the function look for files like /etc/localtime
beneath the _root directory. This is primarily used by the tests.
In normal usage you call the function without parameters.
"""
tzenv = os.environ.get('TZ')
if tzenv:
return _tz_from_env(tzenv)
# This is actually a pretty reliable way to test for the local time
# zone on operating systems like OS X. On OS X especially this is the
# only one that actually works.
try:
link_dst = os.readlink('/etc/localtime')
except OSError:
pass
else:
pos = link_dst.find('/zoneinfo/')
if pos >= 0:
zone_name = link_dst[pos + 10:]
try:
return pytz.timezone(zone_name)
except pytz.UnknownTimeZoneError:
pass
# If we are on OS X now we are pretty sure that the rest of the
# code will fail and just fall through until it hits the reading
# of /etc/localtime and using it without name. At this point we
# can invoke systemconfig which internally invokes ICU. ICU itself
# does the same thing we do (readlink + compare file contents) but
# since it knows where the zone files are that should be a bit
# better than reimplementing the logic here.
if sys.platform == 'darwin':
c = subprocess.Popen(['systemsetup', '-gettimezone'],
stdout=subprocess.PIPE)
sys_result = c.communicate()[0]
c.wait()
tz_match = _systemconfig_tz.search(sys_result)
if tz_match is not None:
zone_name = tz_match.group(1)
try:
return pytz.timezone(zone_name)
except pytz.UnknownTimeZoneError:
pass
# Now look for distribution specific configuration files
# that contain the timezone name.
tzpath = os.path.join(_root, 'etc/timezone')
if os.path.exists(tzpath):
with open(tzpath, 'rb') as tzfile:
data = tzfile.read()
# Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file.
# That's a misconfiguration, but we need to handle it gracefully:
if data[:5] != 'TZif2':
etctz = data.strip().decode()
# Get rid of host definitions and comments:
if ' ' in etctz:
etctz, dummy = etctz.split(' ', 1)
if '#' in etctz:
etctz, dummy = etctz.split('#', 1)
return pytz.timezone(etctz.replace(' ', '_'))
# CentOS has a ZONE setting in /etc/sysconfig/clock,
# OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and
# Gentoo has a TIMEZONE setting in /etc/conf.d/clock
# We look through these files for a timezone:
zone_re = re.compile('\s*ZONE\s*=\s*\"')
timezone_re = re.compile('\s*TIMEZONE\s*=\s*\"')
end_re = re.compile('\"')
for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'):
tzpath = os.path.join(_root, filename)
if not os.path.exists(tzpath):
continue
with open(tzpath, 'rt') as tzfile:
data = tzfile.readlines()
for line in data:
# Look for the ZONE= setting.
match = zone_re.match(line)
if match is None:
# No ZONE= setting. Look for the TIMEZONE= setting.
match = timezone_re.match(line)
if match is not None:
# Some setting existed
line = line[match.end():]
etctz = line[:end_re.search(line).start()]
# We found a timezone
return pytz.timezone(etctz.replace(' ', '_'))
# No explicit setting existed. Use localtime
for filename in ('etc/localtime', 'usr/local/etc/localtime'):
tzpath = os.path.join(_root, filename)
if not os.path.exists(tzpath):
continue
with open(tzpath, 'rb') as tzfile:
return pytz.tzfile.build_tzinfo('local', tzfile)
raise pytz.UnknownTimeZoneError('Can not find any timezone configuration')
| mit |
niboshi/chainer | tests/chainerx_tests/unit_tests/routines_tests/test_evaluation.py | 5 | 3915 | import chainer
from chainer import functions as F
import numpy
import pytest
import chainerx
from chainerx_tests import array_utils
from chainerx_tests import op_utils
_in_out_eval_dtypes = [
(('float16', 'int16')),
(('float32', 'int32')),
(('float64', 'int64')),
(('float32', 'int16')),
(('float64', 'int16')),
(('float64', 'int32')),
]
_accuracy_params = [
((10, 1), (10,)),
((5, 1), (5,)),
((10, 3), (10,)),
((10, 3, 1), (10,)),
((10, 3, 1, 1), (10,)),
((10, 3, 5), (10, 5)),
((10, 3, 5, 4), (10, 5, 4)),
((10, 3, 5, 4, 1), (10, 5, 4)),
((10, 3, 5, 4, 1, 1), (10, 5, 4)),
]
_invalid_accuracy_dtypes = [
(('int16', 'float16')),
(('int32', 'int32')),
(('float32', 'float32')),
(('float64', 'float64')),
(('int64', 'float64')),
]
_invalid_accuracy_shapes = [
((10, 1), (5,)),
((5, 3), (10, 3)),
]
class EvalBase(op_utils.ChainerOpTest):
def generate_inputs(self):
y_dtype, t_dtype = self.in_dtypes
y = numpy.random.uniform(-1, 1, self.y_shape).astype(y_dtype)
targ = numpy.random.randint(
3, size=self.t_shape).astype(t_dtype)
return y, targ
def forward_chainerx(self, inputs):
return self.forward_xp(inputs, chainerx)
def forward_chainer(self, inputs):
return self.forward_xp(inputs, F)
def forward_xp(self, inputs, xp):
raise NotImplementedError(
'Op test implementation must override `forward_xp`.')
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'y_shape,t_shape', _accuracy_params),
chainer.testing.from_pytest_parameterize(
'in_dtypes', _in_out_eval_dtypes),
chainer.testing.from_pytest_parameterize(
'ignore_label', [None, 0])
])
))
class TestAccuracy(EvalBase):
skip_backward_test = True
skip_double_backward_test = True
def setup(self):
super().setup()
dtype1, dtype2 = self.in_dtypes
if dtype1 == 'float16' or dtype2 == 'float16':
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
y, t = super().generate_inputs()
# TODO(aksub99): Improve tests for the case
# where all labels are ignored.
if y.shape == (10, 1) or y.shape == (5, 1):
self.ignore_label = 0
t.fill(self.ignore_label)
return y, t
def forward_xp(self, inputs, xp):
y, t = inputs
out = xp.accuracy(y, t, self.ignore_label)
return out,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('y_shape,t_shape', _accuracy_params)
@pytest.mark.parametrize('in_dtypes', _invalid_accuracy_dtypes)
@pytest.mark.parametrize('ignore_label', [None, 0])
def test_accuracy_invalid_dtype(device, y_shape,
t_shape, ignore_label, in_dtypes):
dtype1, dtype2 = in_dtypes
y = array_utils.create_dummy_ndarray(chainerx, y_shape, dtype1)
t = array_utils.create_dummy_ndarray(chainerx, t_shape, dtype2)
with pytest.raises(chainerx.DtypeError):
chainerx.accuracy(y, t, ignore_label=ignore_label)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('y_shape,t_shape', _invalid_accuracy_shapes)
@pytest.mark.parametrize('in_dtypes', _in_out_eval_dtypes)
@pytest.mark.parametrize('ignore_label', [None, 0])
def test_accuracy_invalid_shape(device, y_shape,
t_shape, ignore_label, in_dtypes):
dtype1, dtype2 = in_dtypes
y = array_utils.create_dummy_ndarray(chainerx, y_shape, dtype1)
t = array_utils.create_dummy_ndarray(chainerx, t_shape, dtype2)
with pytest.raises(chainerx.DimensionError):
chainerx.accuracy(y, t, ignore_label=ignore_label)
| mit |
louiskun/flaskGIT | venv/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/pg8000.py | 34 | 8375 | # postgresql/pg8000.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pg8000
:name: pg8000
:dbapi: pg8000
:connectstring: \
postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
:url: https://pythonhosted.org/pg8000/
.. _pg8000_unicode:
Unicode
-------
pg8000 will encode / decode string values between it and the server using the
PostgreSQL ``client_encoding`` parameter; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
The ``client_encoding`` can be overriden for a session by executing the SQL:
SET CLIENT_ENCODING TO 'utf8';
SQLAlchemy will execute this SQL on all new connections based on the value
passed to :func:`.create_engine` using the ``client_encoding`` parameter::
engine = create_engine(
"postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8')
.. _pg8000_isolation_level:
pg8000 Transaction Isolation Level
-------------------------------------
The pg8000 dialect offers the same isolation level settings as that
of the :ref:`psycopg2 <psycopg2_isolation_level>` dialect:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. versionadded:: 0.9.5 support for AUTOCOMMIT isolation level when using
pg8000.
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`psycopg2_isolation_level`
"""
from ... import util, exc
import decimal
from ... import processors
from ... import types as sqltypes
from .base import (
PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext,
_DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES)
import re
from sqlalchemy.dialects.postgresql.json import JSON
class _PGNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGNumericNoBind(_PGNumeric):
def bind_processor(self, dialect):
return None
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._dbapi_version > (1, 10, 1):
return None # Has native JSON
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
class PGExecutionContext_pg8000(PGExecutionContext):
pass
class PGCompiler_pg8000(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
if '%%' in text:
util.warn("The SQLAlchemy postgresql dialect "
"now automatically escapes '%' in text() "
"expressions to '%%'.")
return text.replace('%', '%%')
class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_pg8000(PGDialect):
driver = 'pg8000'
supports_unicode_statements = True
supports_unicode_binds = True
default_paramstyle = 'format'
supports_sane_multi_rowcount = True
execution_ctx_cls = PGExecutionContext_pg8000
statement_compiler = PGCompiler_pg8000
preparer = PGIdentifierPreparer_pg8000
description_encoding = 'use_encoding'
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumericNoBind,
sqltypes.Float: _PGNumeric,
JSON: _PGJSON,
}
)
def __init__(self, client_encoding=None, **kwargs):
PGDialect.__init__(self, **kwargs)
self.client_encoding = client_encoding
def initialize(self, connection):
self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14)
super(PGDialect_pg8000, self).initialize(connection)
@util.memoized_property
def _dbapi_version(self):
if self.dbapi and hasattr(self.dbapi, '__version__'):
return tuple(
[
int(x) for x in re.findall(
r'(\d+)(?:[-\.]?|$)', self.dbapi.__version__)])
else:
return (99, 99, 99)
@classmethod
def dbapi(cls):
return __import__('pg8000')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
# adjust for ConnectionFairy possibly being present
if hasattr(connection, 'connection'):
connection = connection.connection
if level == 'AUTOCOMMIT':
connection.autocommit = True
elif level in self._isolation_lookup:
connection.autocommit = False
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
else:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s or AUTOCOMMIT" %
(level, self.name, ", ".join(self._isolation_lookup))
)
def set_client_encoding(self, connection, client_encoding):
# adjust for ConnectionFairy possibly being present
if hasattr(connection, 'connection'):
connection = connection.connection
cursor = connection.cursor()
cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'")
cursor.execute("COMMIT")
cursor.close()
def do_begin_twophase(self, connection, xid):
connection.connection.tpc_begin((0, xid, ''))
def do_prepare_twophase(self, connection, xid):
connection.connection.tpc_prepare()
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False):
connection.connection.tpc_rollback((0, xid, ''))
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False):
connection.connection.tpc_commit((0, xid, ''))
def do_recover_twophase(self, connection):
return [row[1] for row in connection.connection.tpc_recover()]
def on_connect(self):
fns = []
if self.client_encoding is not None:
def on_connect(conn):
self.set_client_encoding(conn, self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if len(fns) > 0:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
dialect = PGDialect_pg8000
| mit |
rdhyee/PyTables | tables/unimplemented.py | 3 | 5459 | # -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: January 14, 2004
# Author: Francesc Alted - faltet@pytables.com
#
# $Id$
#
########################################################################
"""Here is defined the UnImplemented class."""
import warnings
from tables import hdf5extension
from tables.utils import SizeType
from tables.node import Node
from tables.leaf import Leaf
class UnImplemented(hdf5extension.UnImplemented, Leaf):
"""This class represents datasets not supported by PyTables in an HDF5
file.
When reading a generic HDF5 file (i.e. one that has not been created with
PyTables, but with some other HDF5 library based tool), chances are that
the specific combination of datatypes or dataspaces in some dataset might
not be supported by PyTables yet. In such a case, this dataset will be
mapped into an UnImplemented instance and the user will still be able to
access the complete object tree of the generic HDF5 file. The user will
also be able to *read and write the attributes* of the dataset, *access
some of its metadata*, and perform *certain hierarchy manipulation
operations* like deleting or moving (but not copying) the node. Of course,
the user will not be able to read the actual data on it.
This is an elegant way to allow users to work with generic HDF5 files
despite the fact that some of its datasets are not supported by
PyTables. However, if you are really interested in having full access to an
unimplemented dataset, please get in contact with the developer team.
This class does not have any public instance variables or methods, except
those inherited from the Leaf class (see :ref:`LeafClassDescr`).
"""
# Class identifier.
_c_classid = 'UNIMPLEMENTED'
def __init__(self, parentnode, name):
"""Create the `UnImplemented` instance."""
# UnImplemented objects always come from opening an existing node
# (they can not be created).
self._v_new = False
"""Is this the first time the node has been created?"""
self.nrows = SizeType(0)
"""The length of the first dimension of the data."""
self.shape = (SizeType(0),)
"""The shape of the stored data."""
self.byteorder = None
"""The endianness of data in memory ('big', 'little' or
'irrelevant')."""
super(UnImplemented, self).__init__(parentnode, name)
def _g_open(self):
(self.shape, self.byteorder, object_id) = self._open_unimplemented()
try:
self.nrows = SizeType(self.shape[0])
except IndexError:
self.nrows = SizeType(0)
return object_id
def _g_copy(self, newparent, newname, recursive, _log=True, **kwargs):
"""Do nothing.
This method does nothing, but a ``UserWarning`` is issued.
Please note that this method *does not return a new node*, but
``None``.
"""
warnings.warn(
"UnImplemented node %r does not know how to copy itself; skipping"
% (self._v_pathname,))
return None # Can you see it?
def _f_copy(self, newparent=None, newname=None,
overwrite=False, recursive=False, createparents=False,
**kwargs):
"""Do nothing.
This method does nothing, since `UnImplemented` nodes can not
be copied. However, a ``UserWarning`` is issued. Please note
that this method *does not return a new node*, but ``None``.
"""
# This also does nothing but warn.
self._g_copy(newparent, newname, recursive, **kwargs)
return None # Can you see it?
def __repr__(self):
return """%s
NOTE: <The UnImplemented object represents a PyTables unimplemented
dataset present in the '%s' HDF5 file. If you want to see this
kind of HDF5 dataset implemented in PyTables, please contact the
developers.>
""" % (str(self), self._v_file.filename)
# Classes reported as H5G_UNKNOWN by HDF5
class Unknown(Node):
"""This class represents nodes reported as *unknown* by the underlying
HDF5 library.
This class does not have any public instance variables or methods, except
those inherited from the Node class.
"""
# Class identifier
_c_classid = 'UNKNOWN'
def __init__(self, parentnode, name):
"""Create the `Unknown` instance."""
self._v_new = False
super(Unknown, self).__init__(parentnode, name)
def _g_new(self, parentnode, name, init=False):
pass
def _g_open(self):
return 0
def _g_copy(self, newparent, newname, recursive, _log=True, **kwargs):
# Silently avoid doing copies of unknown nodes
return None
def _g_delete(self, parent):
pass
def __str__(self):
pathname = self._v_pathname
classname = self.__class__.__name__
return "%s (%s)" % (pathname, classname)
def __repr__(self):
return """%s
NOTE: <The Unknown object represents a node which is reported as
unknown by the underlying HDF5 library, but that might be
supported in more recent HDF5 versions.>
""" % (str(self))
# These are listed here for backward compatibility with PyTables 0.9.x indexes
class OldIndexArray(UnImplemented):
_c_classid = 'IndexArray'
| bsd-3-clause |
Mchakravartula/rockstor-core | src/rockstor/cli/users_console.py | 6 | 2247 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from base_console import BaseConsole
from rest_util import api_call
class UsersConsole(BaseConsole):
def __init__(self, prompt):
BaseConsole.__init__(self)
self.greeting = 'Users'
self.pprompt = prompt
self.prompt = ('%s %s>' % (self.pprompt, self.greeting))
self.base_url = ('%s/users/' % BaseConsole.url)
def do_list(self, args):
url = self.base_url
if (len(args) > 0):
url = ('%s%s' % (self.base_url, args.split()[0]))
user_info = api_call(url)
print user_info
def do_add(self, args):
if (len(args) > 0):
username, pw, utype = args.split()
data = {'username': username,
'password': pw,
'utype': utype,}
user_info = api_call(self.base_url, data=data, calltype='post')
print user_info
else:
self.do_help(args)
def do_passwd(self, args):
if (len(args) > 0):
username, pw = args.split()
data = {'password': pw}
url = ('%s%s/' % (self.base_url, username))
user_info = api_call(url, data=data, calltype='put')
print user_info
else:
self.do_help(args)
def do_delete(self, args):
if (len(args) > 0):
username = args.split()[0]
url = ('%s%s' % (self.base_url, username))
user_info = api_call(url, calltype='delete')
print user_info
else:
self.do_help(args)
| gpl-3.0 |
WaveBlocks/WaveBlocks | src/WaveBlocks/MatrixPotential.py | 1 | 10430 | """The WaveBlocks Project
This file contains the abstract base class for representation of potentials
for an arbitrary number of components. It defines the interface every subclass
must support to represent a potential.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
class MatrixPotential:
r"""
This class represents a potential :math:`V\left(x\right)`. The potential is given as an analytic
expression. Some calculations with the potential are supported. For example
calculation of eigenvalues and exponentials and numerical evaluation.
Further, there are methods for splitting the potential into a Taylor
expansion and for basis transformations between canonical and eigenbasis.
"""
def __init__(self):
r"""
Create a new ``MatrixPotential`` instance for a given potential matrix :math:`V\left(x\right)`.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("'MatrixPotential' is an abstract base class.")
def __str__(self):
r"""
Put the number of components and the analytical expression (the matrix) into a printable string.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("'MatrixPotential' is an abstract base class.")
def get_number_components(self):
r"""
:return: The number :math:`N` of components the potential supports.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("get_number_components(...)")
def evaluate_at(self, nodes, component=None):
r"""
Evaluate the potential matrix elementwise at some given grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the potential at.
:param component: The component :math:`V_{i,j}` that gets evaluated or 'None' to evaluate all.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("evaluate_at(...)")
def calculate_eigenvalues(self):
r"""
Calculate the eigenvalues :math:`\lambda_i\left(x\right)` of the potential :math:`V\left(x\right)`.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("calculate_eigenvalues(...)")
def evaluate_eigenvalues_at(self, nodes, diagonal_component=None):
r"""
Evaluate the eigenvalues :math:`\lambda_i\left(x\right)` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvalues at.
:param diagonal_component: The index :math:`i` of the eigenvalue :math:`\lambda_i` that gets evaluated or 'None' to evaluate all.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("evaluate_eigenvalues_at(...)")
def calculate_eigenvectors(self):
r"""
Calculate the eigenvectors :math:`\nu_i\left(x\right)` of the potential :math:`V\left(x\right)`.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("calculate_eigenvectors(...)")
def evaluate_eigenvectors_at(self, nodes):
r"""
Evaluate the eigenvectors :math:`\nu_i\left(x\right)` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvectors at.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("evaluate_eigenvectors_at(...)")
def project_to_eigen(self, nodes, values, basis=None):
r"""
Project a given vector from the canonical basis to the eigenbasis of the potential.
:param nodes: The grid nodes :math:`\gamma` for the pointwise transformation.
:param values: The list of vectors :math:`\varphi_i` containing the values we want to transform.
:param basis: A list of basisvectors :math:`nu_i`. Allows to use this function for external data, similar to a static function.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("project_to_eigen(...)")
def project_to_canonical(self, nodes, values, basis=None):
r"""
Project a given vector from the potential's eigenbasis to the canonical basis.
:param nodes: The grid nodes :math:`\gamma` for the pointwise transformation.
:param values: The list of vectors :math:`\varphi_i` containing the values we want to transform.
:param basis: A list of basis vectors :math:`nu_i`. Allows to use this function for external data, similar to a static function.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("project_to_canonical(...)")
def calculate_exponential(self, factor=1):
r"""
Calculate the matrix exponential :math:`E = \exp\left(\alpha M\right)`.
:param factor: A prefactor :math:`\alpha` in the exponential.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("calculate_exponential(...)")
def evaluate_exponential_at(self, nodes):
r"""
Evaluate the exponential of the potential matrix :math:`V` at some grid nodes :math:`\gamma`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the exponential at.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("evaluate_exponential_at(...)")
def calculate_jacobian(self):
r"""
Calculate the jacobian matrix for each component :math:`V_{i,j}` of the potential.
For potentials which depend only one variable :math:`x`, this equals the first derivative.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("calculate_jacobian(...)")
def evaluate_jacobian_at(self, nodes, component=None):
r"""
Evaluate the jacobian at some grid nodes :math:`\gamma` for each component :math:`V_{i,j}` of the potential.
:param nodes: The grid nodes :math:`\gamma` the jacobian gets evaluated at.
:param component: The index tuple :math:`\left(i,j\right)` that specifies the potential's entry of which the jacobian is evaluated. (Defaults to 'None' to evaluate all)
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("evaluate_jacobian_at(...)")
def calculate_hessian(self):
r"""
Calculate the hessian matrix for each component :math:`V_{i,j}` of the potential.
For potentials which depend only one variable :math:`x`, this equals the second derivative.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("calculate_hessian(...)")
def evaluate_hessian_at(self, nodes, component=None):
r"""
Evaluate the hessian at some grid nodes :math:`\gamma` for each component :math:`V_{i,j}` of the potential.
:param nodes: The grid nodes :math:`\gamma` the hessian gets evaluated at.
:param component: The index tuple :math:`\left(i,j\right)` that specifies the potential's entry of which the hessian is evaluated. (Or 'None' to evaluate all)
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("evaluate_hessian_at(...)")
def calculate_local_quadratic(self, diagonal_component=None):
r"""
Calculate the local quadratic approximation matrix :math:`U` of the potential's
eigenvalues in :math:`\Lambda`. This function is used for the homogeneous case and
takes into account the leading component :math:`\chi`.
:param diagonal_component: Specifies the index :math:`i` of the eigenvalue :math:`\lambda_i` that gets expanded into a Taylor series :math:`u_i`.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("calculate_local_quadratic(...)")
def evaluate_local_quadratic_at(self, nodes):
r"""
Numerically evaluate the local quadratic approximation matrix :math:`U` of
the potential's eigenvalues in :math:`\Lambda` at the given grid nodes :math:`\gamma`.
This function is used for the homogeneous case and takes into account the leading component :math:`\chi`.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the quadratic approximation at.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("evaluate_local_quadratic_at(...)")
def calculate_local_remainder(self, diagonal_component=0):
r"""
Calculate the non-quadratic remainder matrix :math:`W` of the quadratic
approximation matrix :math:`U` of the potential's eigenvalue matrix :math:`\Lambda`.
This function is used for the homogeneous case and takes into account the leading component :math:`\chi`.
:param diagonal_component: Specifies the index :math:`\chi` of the leading component :math:`\lambda_\chi`.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("calculate_local_remainder(...)")
def evaluate_local_remainder_at(self, position, nodes, component=None):
r"""
Numerically evaluate the non-quadratic remainder matrix :math:`W` of the quadratic
approximation matrix :math:`U` of the potential's eigenvalues in :math:`\Lambda` at the
given nodes :math:`\gamma`. This function is used for the homogeneous and the
inhomogeneous case and just evaluates the remainder matrix :math:`W`.
:param position: The point :math:`q` where the Taylor series is computed.
:param nodes: The grid nodes :math:`\gamma` we want to evaluate the potential at.
:param component: The component :math:`\left(i,j\right)` of the remainder matrix :math:`W` that is evaluated.
:raise NotImplementedError: This is an abstract base class.
"""
raise NotImplementedError("evaluate_local_remainder_at(...)")
| bsd-3-clause |
CG-F16-16-Rutgers/steersuite-rutgers | steerstats/tools/gameobjects/vector3.py | 8 | 16094 | from math import *
from util import format_number
class Vector3(object):
__slots__ = ('_v',)
_gameobjects_vector = 3
def __init__(self, *args):
"""Creates a Vector3 from 3 numeric values or a list-like object
containing at least 3 values. No arguments result in a null vector.
"""
if len(args) == 3:
self._v = map(float, args[:3])
return
if not args:
self._v = [0., 0., 0.]
elif len(args) == 1:
self._v = map(float, args[0][:3])
else:
raise ValueError("Vector3.__init__ takes 0, 1 or 3 parameters")
@classmethod
def from_points(cls, p1, p2):
v = cls.__new__(cls, object)
ax, ay, az = p1
bx, by, bz = p2
v._v = [bx-ax, by-ay, bz-az]
return v
@classmethod
def from_floats(cls, x, y, z):
"""Creates a Vector3 from individual float values.
Warning: There is no checking (for efficiency) here: x, y, z _must_ be
floats.
"""
v = cls.__new__(cls, object)
v._v = [x, y, z]
return v
@classmethod
def from_iter(cls, iterable):
"""Creates a Vector3 from an iterable containing at least 3 values."""
next = iter(iterable).next
v = cls.__new__(cls, object)
v._v = [ float(next()), float(next()), float(next()) ]
return v
@classmethod
def _from_float_sequence(cls, sequence):
v = cls.__new__(cls, object)
v._v = list(sequence[:3])
return v
def copy(self):
"""Returns a copy of this vector."""
v = self.__new__(self.__class__, object)
v._v = self._v[:]
return v
#return self.from_floats(self._v[0], self._v[1], self._v[2])
__copy__ = copy
def _get_x(self):
return self._v[0]
def _set_x(self, x):
try:
self._v[0] = 1.0 * x
except:
raise TypeError, "Must be a number"
x = property(_get_x, _set_x, None, "x component.")
def _get_y(self):
return self._v[1]
def _set_y(self, y):
try:
self._v[1] = 1.0 * y
except:
raise TypeError, "Must be a number"
y = property(_get_y, _set_y, None, "y component.")
def _get_z(self):
return self._v[2]
def _set_z(self, z):
try:
self._v[2] = 1.0 * z
except:
raise TypeError, "Must be a number"
z = property(_get_z, _set_z, None, "z component.")
def _get_length(self):
x, y, z = self._v
return sqrt(x*x + y*y + z*z)
def length(self):
return self._get_length()
def lengthSquared(self):
x, y, z = self._v
return x*x + y*y + z*z
def _set_length(self, length):
v = self._v
try:
x, y, z = v
l = length / sqrt(x*x + y*y +z*z)
except ZeroDivisionError:
v[0] = 0.
v[1] = 0.
v[2] = 0.
return self
v[0] = x*l
v[1] = y*l
v[2] = z*l
length = property(_get_length, _set_length, None, "Length of the vector")
def unit(self):
"""Returns a unit vector."""
x, y, z = self._v
l = sqrt(x*x + y*y + z*z)
return self.from_floats(x/l, y/l, z/l)
def set(self, x, y, z):
"""Sets the components of this vector.
x -- x component
y -- y component
z -- z component
"""
v = self._v
try:
v[0] = x * 1.0
v[1] = y * 1.0
v[2] = z * 1.0
except TypeError:
raise TypeError("Must be a number")
return self
def __str__(self):
x, y, z = self._v
return "(%s, %s, %s)" % (format_number(x),
format_number(y),
format_number(z))
def __repr__(self):
x, y, z = self._v
return "Vector3(%s, %s, %s)" % (x, y, z)
def __len__(self):
return 3
def __iter__(self):
"""Iterates the components in x, y, z order."""
return iter(self._v[:])
def __getitem__(self, index):
"""Retrieves a component, given its index.
index -- 0, 1 or 2 for x, y or z
"""
try:
return self._v[index]
except IndexError:
raise IndexError, "There are 3 values in this object, index should be 0, 1 or 2!"
def __setitem__(self, index, value):
"""Sets a component, given its index.
index -- 0, 1 or 2 for x, y or z
value -- New (float) value of component
"""
try:
self._v[index] = 1.0 * value
except IndexError:
raise IndexError, "There are 3 values in this object, index should be 0, 1 or 2!"
except TypeError:
raise TypeError, "Must be a number"
def __eq__(self, rhs):
"""Test for equality
rhs -- Vector or sequence of 3 values
"""
x, y, z = self._v
xx, yy, zz = rhs
return x==xx and y==yy and z==zz
def __ne__(self, rhs):
"""Test of inequality
rhs -- Vector or sequenece of 3 values
"""
x, y, z = self._v
xx, yy, zz = rhs
return x!=xx or y!=yy or z!=zz
def __hash__(self):
return hash(self._v)
def __add__(self, rhs):
"""Returns the result of adding a vector (or collection of 3 numbers)
from this vector.
rhs -- Vector or sequence of 2 values
"""
x, y, z = self._v
ox, oy, oz = rhs
return self.from_floats(x+ox, y+oy, z+oz)
def __iadd__(self, rhs):
"""Adds another vector (or a collection of 3 numbers) to this vector.
rhs -- Vector or sequence of 2 values
"""
ox, oy, oz = rhs
v = self._v
v[0] += ox
v[1] += oy
v[2] += oz
return self
def __radd__(self, lhs):
"""Adds vector to this vector (right version)
lhs -- Left hand side vector or sequence
"""
x, y, z = self._v
ox, oy, oz = lhs
return self.from_floats(x+ox, y+oy, z+oz)
def __sub__(self, rhs):
"""Returns the result of subtracting a vector (or collection of
3 numbers) from this vector.
rhs -- 3 values
"""
x, y, z = self._v
ox, oy, oz = rhs
return self.from_floats(x-ox, y-oy, z-oz)
def _isub__(self, rhs):
"""Subtracts another vector (or a collection of 3 numbers) from this
vector.
rhs -- Vector or sequence of 3 values
"""
ox, oy, oz = rhs
v = self._v
v[0] -= ox
v[1] -= oy
v[2] -= oz
return self
def __rsub__(self, lhs):
"""Subtracts a vector (right version)
lhs -- Left hand side vector or sequence
"""
x, y, z = self._v
ox, oy, oz = lhs
return self.from_floats(ox-x, oy-y, oz-z)
def scalar_mul(self, scalar):
v = self._v
v[0] *= scalar
v[1] *= scalar
v[2] *= scalar
def vector_mul(self, vector):
x, y, z = vector
v= self._v
v[0] *= x
v[1] *= y
v[2] *= z
def get_scalar_mul(self, scalar):
x, y, z = self._v
return self.from_floats(x*scalar, y*scalar, z*scalar)
def get_vector_mul(self, vector):
x, y, z = self._v
xx, yy, zz = vector
return self.from_floats(x * xx, y * yy, z * zz)
def __mul__(self, rhs):
"""Return the result of multiplying this vector by another vector, or
a scalar (single number).
rhs -- Vector, sequence or single value.
"""
x, y, z = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
return self.from_floats(x*ox, y*oy, z*oz)
else:
return self.from_floats(x*rhs, y*rhs, z*rhs)
def __imul__(self, rhs):
"""Multiply this vector by another vector, or a scalar
(single number).
rhs -- Vector, sequence or single value.
"""
v = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
v[0] *= ox
v[1] *= oy
v[2] *= oz
else:
v[0] *= rhs
v[1] *= rhs
v[2] *= rhs
return self
def __rmul__(self, lhs):
x, y, z = self._v
if hasattr(lhs, "__getitem__"):
ox, oy, oz = lhs
return self.from_floats(x*ox, y*oy, z*oz)
else:
return self.from_floats(x*lhs, y*lhs, z*lhs)
def __div__(self, rhs):
"""Return the result of dividing this vector by another vector, or a scalar (single number)."""
x, y, z = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
return self.from_floats(x/ox, y/oy, z/oz)
else:
return self.from_floats(x/rhs, y/rhs, z/rhs)
def __idiv__(self, rhs):
"""Divide this vector by another vector, or a scalar (single number)."""
v = self._v
if hasattr(rhs, "__getitem__"):
v[0] /= ox
v[1] /= oy
v[2] /= oz
else:
v[0] /= rhs
v[1] /= rhs
v[2] /= rhs
return self
def __rdiv__(self, lhs):
x, y, z = self._v
if hasattr(lhs, "__getitem__"):
ox, oy, oz = lhs
return self.from_floats(ox/x, oy/y, oz/z)
else:
return self.from_floats(lhs/x, lhs/y, lhs/z)
def scalar_div(self, scalar):
v = self._v
v[0] /= scalar
v[1] /= scalar
v[2] /= scalar
def vector_div(self, vector):
x, y, z = vector
v= self._v
v[0] /= x
v[1] /= y
v[2] /= z
def get_scalar_div(self, scalar):
x, y, z = self.scalar
return self.from_floats(x / scalar, y / scalar, z / scalar)
def get_vector_div(self, vector):
x, y, z = self._v
xx, yy, zz = vector
return self.from_floats(x / xx, y / yy, z / zz)
def __neg__(self):
"""Returns the negation of this vector (a vector pointing in the opposite direction.
eg v1 = Vector(1,2,3)
print -v1
>>> (-1,-2,-3)
"""
x, y, z = self._v
return self.from_floats(-x, -y, -z)
def __pos__(self):
return self.copy()
def __nonzero__(self):
x, y, z = self._v
return bool(x or y or z)
def __call__(self, keys):
"""Returns a tuple of the values in a vector
keys -- An iterable containing the keys (x, y or z)
eg v = Vector3(1.0, 2.0, 3.0)
v('zyx') -> (3.0, 2.0, 1.0)
"""
ord_x = ord('x')
v = self._v
return tuple( v[ord(c)-ord_x] for c in keys )
def as_tuple(self):
"""Returns a tuple of the x, y, z components. A little quicker than
tuple(vector)."""
return tuple(self._v)
def scale(self, scale):
"""Scales the vector by onther vector or a scalar. Same as the
*= operator.
scale -- Value to scale the vector by
"""
v = self._v
if hasattr(scale, "__getitem__"):
ox, oy, oz = scale
v[0] *= ox
v[1] *= oy
v[2] *= oz
else:
v[0] *= scale
v[1] *= scale
v[2] *= scale
return self
def get_length(self):
"""Calculates the length of the vector."""
x, y, z = self._v
return sqrt(x*x + y*y +z*z)
get_magnitude = get_length
def set_length(self, new_length):
"""Sets the length of the vector. (Normalises it then scales it)
new_length -- The new length of the vector.
"""
v = self._v
try:
x, y, z = v
l = new_length / sqrt(x*x + y*y + z*z)
except ZeroDivisionError:
v[0] = 0.0
v[1] = 0.0
v[2] = 0.0
return self
v[0] = x*l
v[1] = y*l
v[2] = z*l
return self
def get_distance_to(self, p):
"""Returns the distance of this vector to a point.
p -- A position as a vector, or collection of 3 values.
"""
ax, ay, az = self._v
bx, by, bz = p
dx = ax-bx
dy = ay-by
dz = az-bz
return sqrt( dx*dx + dy*dy + dz*dz )
def get_distance_to_squared(self, p):
"""Returns the squared distance of this vector to a point.
p -- A position as a vector, or collection of 3 values.
"""
ax, ay, az = self._v
bx, by, bz = p
dx = ax-bx
dy = ay-by
dz = az-bz
return dx*dx + dy*dy + dz*dz
def normalise(self):
"""Scales the vector to be length 1."""
v = self._v
x, y, z = v
l = sqrt(x*x + y*y + z*z)
try:
v[0] /= l
v[1] /= l
v[2] /= l
except ZeroDivisionError:
v[0] = 0.0
v[1] = 0.0
v[2] = 0.0
return self
normalize = normalise
def get_normalised(self):
x, y, z = self._v
l = sqrt(x*x + y*y + z*z)
return self.from_floats(x/l, y/l, z/l)
get_normalized = get_normalised
def in_sphere(self, sphere):
"""Returns true if this vector (treated as a position) is contained in
the given sphere.
"""
return distance3d(sphere.position, self) <= sphere.radius
def dot(self, other):
"""Returns the dot product of this vector with another.
other -- A vector or tuple
"""
x, y, z = self._v
ox, oy, oz = other
return x*ox + y*oy + z*oz
def cross(self, other):
"""Returns the cross product of this vector with another.
other -- A vector or tuple
"""
x, y, z = self._v
bx, by, bz = other
return self.from_floats( y*bz - by*z,
z*bx - bz*x,
x*by - bx*y )
def cross_tuple(self, other):
"""Returns the cross product of this vector with another, as a tuple.
This avoids the Vector3 construction if you don't need it.
other -- A vector or tuple
"""
x, y, z = self._v
bx, by, bz = other
return ( y*bz - by*z,
z*bx - bz*x,
x*by - bx*y )
def distance3d_squared(p1, p2):
x, y, z = p1
xx, yy, zz = p2
dx = x - xx
dy = y - yy
dz = z - zz
return dx*dx + dy*dy +dz*dz
def distance3d(p1, p2):
x, y, z = p1
xx, yy, zz = p2
dx = x - xx
dy = y - yy
dz = z - zz
return sqrt(dx*dx + dy*dy +dz*dz)
def centre_point3d(points):
return sum( Vector3(p) for p in points ) / len(points)
if __name__ == "__main__":
v1 = Vector3(2.2323, 3.43242, 1.)
print 3*v1
print (2, 4, 6)*v1
print (1, 2, 3)+v1
print v1('xxxyyyzzz')
print v1[2]
print v1.z
v1[2]=5.
print v1
v2= Vector3(1.2, 5, 10)
print v2
v1 += v2
print v1.get_length()
print repr(v1)
print v1[1]
p1 = Vector3(1,2,3)
print p1
print repr(p1)
for v in p1:
print v
#print p1[6]
ptest = Vector3( [1,2,3] )
print ptest
z = Vector3()
print z
file("test.txt", "w").write( "\n".join(str(float(n)) for n in range(20)) )
f = file("test.txt")
v1 = Vector3.from_iter( f )
v2 = Vector3.from_iter( f )
v3 = Vector3.from_iter( f )
print v1, v2, v3
print "--"
print v1
print v1 + (10,20,30)
print v1('xz')
print -v1
#print tuple(ptest)
#p1.set( (4, 5, 6) )
#print p1
print Vector3(10,10,30)+v1
print Vector3((0,0,0,1))
print Vector3(1, 2, 3).scale(3)
print Vector3(1, 2, 3).scale((2, 4, 6))
print bool(v1)
| gpl-3.0 |
linkinpark342/gummy_panzer | main.py | 1 | 2427 | #!/usr/bin/env python
import pygame
import gummy_panzer
import pkg_resources
import os
import sys
#new imports for this tutorial intro
from gummy_panzer import util, settings
from pygame.locals import *
settings.SCREEN_WIDTH = 800
settings.SCREEN_HEIGHT = 600
INTROWIDTH = 1600
NUMINTROS = 4
INTRO = 0
MAINMENU = 1
PLAYMODE = 2
pygame.display.set_caption("GummyPanzer")
screen = pygame.display.set_mode((800, 600))
state = MAINMENU
introc = 0
intronum = 0
introsurf_cache = {0: util.load_image("intro0.png").convert(),
1: util.load_image("intro1.png").convert(),
2: util.load_image("intro2.png").convert(),
3: util.load_image("intro3.png").convert(),
}
introcount = 800
#font for the control box
music = pygame.mixer.Sound(pkg_resources.resource_stream("gummy_panzer",
os.path.join("Sounds", "menu.ogg")))
"""
music.play(-1)
while 1 and state == INTRO:
if introc == introcount:
introc = 0
intronum += 1
if intronum == NUMINTROS:
state = MAINMENU
continue
else:
introc += 1
if intronum in introsurf_cache:
introsurf = introsurf_cache[intronum]
screen.blit(introsurf, (0, 0), (introc, 0, 800, 600))
#KEYPRESS EVENTS__+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+=-+_+_+_
for e in pygame.event.get():
if e.type == QUIT:
pygame.quit()
#Check if a key was pressed
if e.type == KEYDOWN:
#Quit if the Escape key is pressed
if e.key == K_ESCAPE:
state = MAINMENU
else:
introc = introcount
pygame.display.flip()
music.stop()
"""
while 1 and state == MAINMENU:
mainmenusurf = util.load_image("titlepage.png")
for e in pygame.event.get():
#QUIT is the big red X button on the window bar
if e.type == QUIT:
pygame.quit()
#Check if a key was pressed
if e.type == KEYDOWN:
#Quit if the Escape key is pressed
if e.key == K_ESCAPE:
pygame.quit()
else:
state = PLAYMODE
screen.blit(mainmenusurf, (0, 0))
pygame.display.flip()
music.stop()
g = gummy_panzer.Game()
try:
fxn = g.tick
while True:
if not fxn():
fxn = g.boss_tick
except gummy_panzer.EndOfGameException:
pygame.quit()
sys.exit(0)
| gpl-3.0 |
miyyer/qb | qanta/buzzer/util.py | 2 | 7569 | import os
import pickle
import numpy as np
import chainer
from multiprocessing import Pool
from functools import partial
from chainer import Variable
from chainer.backends import cuda
from qanta.datasets.quiz_bowl import QuizBowlDataset
from qanta.guesser.abstract import AbstractGuesser
from qanta.util.constants import BUZZER_DEV_FOLD, BUZZER_TRAIN_FOLD
# constansts
N_GUESSES = 10
os.makedirs("output/buzzer", exist_ok=True)
dataset_dir = "output/buzzer/{}_data.pkl"
def vector_converter_0(guesses_sequence):
"""vector converter / feature extractor with only prob
Args:
guesses_sequence: a sequence (length of question) of list of guesses
(n_guesses), each entry is (guess, prob)
Returns:
a sequence of vectors
"""
length = len(guesses_sequence)
prev_prob_vec = [0.0 for _ in range(N_GUESSES)]
prev_dict = dict()
vecs = []
for i in range(length):
prob_vec = []
prob_diff_vec = []
isnew_vec = []
guesses = guesses_sequence[i]
for guess, prob in guesses:
prob_vec.append(prob)
if i > 0 and guess in prev_dict:
prev_prob = prev_dict[guess]
prob_diff_vec.append(prob - prev_prob)
isnew_vec.append(0)
else:
prob_diff_vec.append(prob)
isnew_vec.append(1)
if len(guesses) < N_GUESSES:
for k in range(max(N_GUESSES - len(guesses), 0)):
prob_vec.append(0)
prob_diff_vec.append(0)
isnew_vec.append(0)
features = (
prob_vec[:3]
+ isnew_vec[:3]
+ prob_diff_vec[:3]
+ [prob_vec[0] - prob_vec[1], prob_vec[1] - prob_vec[2]]
+ [prob_vec[0] - prev_prob_vec[0], prob_vec[1] - prev_prob_vec[1]]
+ [sum(isnew_vec[:5])]
+ [np.average(prob_vec), np.average(prev_prob_vec)]
+ [np.average(prob_vec[:6]), np.average(prev_prob_vec[:5])]
+ [np.var(prob_vec), np.var(prev_prob_vec)]
+ [np.var(prob_vec[:5]), np.var(prev_prob_vec[:5])]
)
vecs.append(np.array(features, dtype=np.float32))
prev_prob_vec = prob_vec
prev_dict = {g: p for g, p in guesses}
return vecs
def vector_converter_1(guesses_sequence):
"""vector converter / feature extractor with both logit and prob
Args:
guesses_sequence: a sequence (length of question) of list of guesses
(n_guesses), each entry is (guess, logit, prob)
Returns:
a sequence of vectors
"""
length = len(guesses_sequence)
prev_logit_vec = [0.0 for _ in range(N_GUESSES)]
prev_prob_vec = [0.0 for _ in range(N_GUESSES)]
prev_dict = dict()
vecs = []
for i in range(length):
logit_vec = []
prob_vec = []
logit_diff_vec = []
prob_diff_vec = []
isnew_vec = []
guesses = guesses_sequence[i]
for guess, logit, prob in guesses:
logit_vec.append(logit)
prob_vec.append(prob)
if i > 0 and guess in prev_dict:
prev_logit, prev_prob = prev_dict[guess]
logit_diff_vec.append(logit - prev_logit)
prob_diff_vec.append(prob - prev_prob)
isnew_vec.append(0)
else:
logit_diff_vec.append(logit)
prob_diff_vec.append(prob)
isnew_vec.append(1)
if len(guesses) < N_GUESSES:
for k in range(max(N_GUESSES - len(guesses), 0)):
logit_vec.append(0)
prob_vec.append(0)
logit_diff_vec.append(0)
prob_diff_vec.append(0)
isnew_vec.append(0)
features = (
logit_vec[:3]
+ prob_vec[:3]
+ isnew_vec[:3]
+ logit_diff_vec[:3]
+ prob_diff_vec[:3]
+ [logit_vec[0] - logit_vec[1], logit_vec[1] - logit_vec[2]]
+ [prob_vec[0] - prob_vec[1], prob_vec[1] - prob_vec[2]]
+ [logit_vec[0] - prev_logit_vec[0], logit_vec[1] - prev_logit_vec[1]]
+ [prob_vec[0] - prev_prob_vec[0], prob_vec[1] - prev_prob_vec[1]]
+ [sum(isnew_vec[:5])]
+ [np.average(logit_vec), np.average(prev_logit_vec)]
+ [np.average(prob_vec), np.average(prev_prob_vec)]
+ [np.average(logit_vec[:6]), np.average(prev_logit_vec[:5])]
+ [np.average(prob_vec[:6]), np.average(prev_prob_vec[:5])]
+ [np.var(logit_vec), np.var(prev_logit_vec)]
+ [np.var(prob_vec), np.var(prev_prob_vec)]
+ [np.var(logit_vec[:5]), np.var(prev_logit_vec[:5])]
+ [np.var(prob_vec[:5]), np.var(prev_prob_vec[:5])]
)
vecs.append(np.array(features, dtype=np.float32))
prev_logit_vec = logit_vec
prev_prob_vec = prob_vec
prev_dict = {x: (y, z) for x, y, z in guesses}
return vecs
def process_question(questions, vector_converter, item):
"""multiprocessing worker that converts the guesser output of a single
question into format used by the buzzer
"""
qid, q_rows = item
qid = q_rows.qanta_id.tolist()[0]
answer = questions[qid].page
q_rows = q_rows.groupby("char_index")
char_indices = sorted(q_rows.groups.keys())
guesses_sequence = []
labels = []
for idx in char_indices:
p = q_rows.get_group(idx).sort_values("score", ascending=False)
guesses_sequence.append(list(zip(p.guess, p.score))[:N_GUESSES])
labels.append(int(p.guess.tolist()[0] == answer))
vectors = vector_converter(guesses_sequence)
return qid, vectors, labels, char_indices
def read_data(
fold,
output_type="char",
guesser_module="qanta.guesser.rnn",
guesser_class="RnnGuesser",
guesser_config_num=0,
vector_converter=vector_converter_0,
):
if os.path.isfile(dataset_dir.format(fold)):
with open(dataset_dir.format(fold), "rb") as f:
return pickle.load(f)
g_dir = AbstractGuesser.output_path(
guesser_module, guesser_class, guesser_config_num, ""
)
g_path = AbstractGuesser.guess_path(g_dir, fold, output_type)
with open(g_path, "rb") as f:
df = pickle.load(f)
df_groups = df.groupby("qanta_id")
questions = QuizBowlDataset(buzzer_train=True).questions_by_fold()
questions = {q.qanta_id: q for q in questions[fold]}
pool = Pool(8)
worker = partial(process_question, questions, vector_converter)
dataset = pool.map(worker, df_groups)
with open(dataset_dir.format(fold), "wb") as f:
pickle.dump(dataset, f)
return dataset
def convert_seq(batch, device=None):
def to_device_batch(batch):
if device is None:
return batch
elif device < 0:
return [chainer.dataset.to_device(device, x) for x in batch]
else:
xp = cuda.cupy.get_array_module(*batch)
concat = xp.concatenate(batch, axis=0)
sections = np.cumsum([len(x) for x in batch[:-1]], dtype=np.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = cuda.cupy.split(concat_dev, sections)
return batch_dev
qids, vectors, labels, positions = list(map(list, zip(*batch)))
xs = [Variable(x) for x in to_device_batch(vectors)]
ys = to_device_batch(labels)
return {"xs": xs, "ys": ys}
if __name__ == "__main__":
data = read_data(BUZZER_TRAIN_FOLD)
print(data)
| mit |
Magic07/online-judge-solutions | leetcode/1728-fancy-sequence.py | 2 | 1118 | class Fancy:
def __init__(self):
self.data=[]
self.add=[]
self.mult=[]
def append(self, val: int) -> None:
self.data.append(val)
if len(self.mult)==0:
self.mult.append(1)
self.add.append(0)
self.mult.append(self.mult[-1])
self.add.append(self.add[-1])
def addAll(self, inc: int) -> None:
if len(self.data)==0:
return
self.add[-1]+=inc
def multAll(self, m: int) -> None:
if len(self.data)==0:
return
self.mult[-1]*=m
self.add[-1]*=m
def getIndex(self, idx: int) -> int:
if idx>=len(self.data):
return -1
m=self.mult[-1]//self.mult[idx]
inc=self.add[-1]-self.add[idx]*m
return (self.data[idx]*m+inc)%1000000007
# Your Fancy object will be instantiated and called as such:
# obj = Fancy()
# obj.append(val)
# obj.addAll(inc)
# obj.multAll(m)
# param_4 = obj.getIndex(idx)
# Ref: https://leetcode.com/problems/fancy-sequence/discuss/898753/Python-Time-O(1)-for-each | mit |
40223123/w16b_test | static/Brython3.1.1-20150328-091302/Lib/unittest/result.py | 727 | 6397 | """Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
#fixme brython
pass
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
#fix me brython
#return '__unittest' in tb.tb_frame.f_globals
return True #for now, lets just return False
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
| gpl-3.0 |
Nyks45/Veno-M | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
ray-project/ray | python/ray/_private/runtime_env.py | 1 | 23362 | import hashlib
import logging
import json
import yaml
from filelock import FileLock
from pathlib import Path
from zipfile import ZipFile
from ray._private.thirdparty.pathspec import PathSpec
from ray.job_config import JobConfig
from enum import Enum
import ray
from ray.experimental.internal_kv import (_internal_kv_put, _internal_kv_get,
_internal_kv_exists,
_internal_kv_initialized)
from typing import List, Tuple, Optional, Callable
from urllib.parse import urlparse
import os
import sys
# We need to setup this variable before
# using this module
PKG_DIR = None
logger = logging.getLogger(__name__)
FILE_SIZE_WARNING = 10 * 1024 * 1024 # 10MB
class RuntimeEnvDict:
"""Parses and validates the runtime env dictionary from the user.
Attributes:
working_dir (Path): Specifies the working directory of the worker.
This can either be a local directory or zip file.
Examples:
"." # cwd
"local_project.zip" # archive is unpacked into directory
py_modules (List[Path]): Similar to working_dir, but specifies python
modules to add to the `sys.path`.
Examples:
["/path/to/other_module", "/other_path/local_project.zip"]
pip (List[str] | str): Either a list of pip packages, or a string
containing the path to a pip requirements.txt file.
conda (dict | str): Either the conda YAML config, the name of a
local conda env (e.g., "pytorch_p36"), or the path to a conda
environment.yaml file.
The Ray dependency will be automatically injected into the conda
env to ensure compatibility with the cluster Ray. The conda name
may be mangled automatically to avoid conflicts between runtime
envs.
This field cannot be specified at the same time as the 'pip' field.
To use pip with conda, please specify your pip dependencies within
the conda YAML config:
https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-e
nvironments.html#create-env-file-manually
Examples:
{"channels": ["defaults"], "dependencies": ["codecov"]}
"pytorch_p36" # Found on DLAMIs
container (dict): Require a given (Docker) container image,
The Ray worker process will run in a container with this image.
The `worker_path` is the default_worker.py path.
The `run_options` list spec is here:
https://docs.docker.com/engine/reference/run/
Examples:
{"image": "anyscale/ray-ml:nightly-py38-cpu",
"worker_path": "/root/python/ray/workers/default_worker.py",
"run_options": ["--cap-drop SYS_ADMIN","--log-level=debug"]}
env_vars (dict): Environment variables to set.
Examples:
{"OMP_NUM_THREADS": "32", "TF_WARNINGS": "none"}
"""
def __init__(self, runtime_env_json: dict):
# Simple dictionary with all options validated. This will always
# contain all supported keys; values will be set to None if
# unspecified. However, if all values are None this is set to {}.
self._dict = dict()
if "working_dir" in runtime_env_json:
self._dict["working_dir"] = runtime_env_json["working_dir"]
if not isinstance(self._dict["working_dir"], str):
raise TypeError("`working_dir` must be a string. Type "
f"{type(self._dict['working_dir'])} received.")
working_dir = Path(self._dict["working_dir"]).absolute()
else:
self._dict["working_dir"] = None
working_dir = None
self._dict["conda"] = None
if "conda" in runtime_env_json:
if sys.platform == "win32":
raise NotImplementedError("The 'conda' field in runtime_env "
"is not currently supported on "
"Windows.")
conda = runtime_env_json["conda"]
if isinstance(conda, str):
yaml_file = Path(conda)
if yaml_file.suffix in (".yaml", ".yml"):
if working_dir and not yaml_file.is_absolute():
yaml_file = working_dir / yaml_file
if not yaml_file.is_file():
raise ValueError(
f"Can't find conda YAML file {yaml_file}")
try:
self._dict["conda"] = yaml.safe_load(
yaml_file.read_text())
except Exception as e:
raise ValueError(
f"Invalid conda file {yaml_file} with error {e}")
else:
logger.info(
f"Using preinstalled conda environment: {conda}")
self._dict["conda"] = conda
elif isinstance(conda, dict):
self._dict["conda"] = conda
elif conda is not None:
raise TypeError("runtime_env['conda'] must be of type str or "
"dict")
self._dict["pip"] = None
if "pip" in runtime_env_json:
if sys.platform == "win32":
raise NotImplementedError("The 'pip' field in runtime_env "
"is not currently supported on "
"Windows.")
if ("conda" in runtime_env_json
and runtime_env_json["conda"] is not None):
raise ValueError(
"The 'pip' field and 'conda' field of "
"runtime_env cannot both be specified.\n"
f"specified pip field: {runtime_env_json['pip']}\n"
f"specified conda field: {runtime_env_json['conda']}\n"
"To use pip with conda, please only set the 'conda' "
"field, and specify your pip dependencies "
"within the conda YAML config dict: see "
"https://conda.io/projects/conda/en/latest/"
"user-guide/tasks/manage-environments.html"
"#create-env-file-manually")
pip = runtime_env_json["pip"]
if isinstance(pip, str):
# We have been given a path to a requirements.txt file.
pip_file = Path(pip)
if working_dir and not pip_file.is_absolute():
pip_file = working_dir / pip_file
if not pip_file.is_file():
raise ValueError(f"{pip_file} is not a valid file")
self._dict["pip"] = pip_file.read_text()
elif isinstance(pip, list) and all(
isinstance(dep, str) for dep in pip):
# Construct valid pip requirements.txt from list of packages.
self._dict["pip"] = "\n".join(pip) + "\n"
else:
raise TypeError("runtime_env['pip'] must be of type str or "
"List[str]")
if "uris" in runtime_env_json:
self._dict["uris"] = runtime_env_json["uris"]
if "container" in runtime_env_json:
self._dict["container"] = runtime_env_json["container"]
self._dict["env_vars"] = None
if "env_vars" in runtime_env_json:
env_vars = runtime_env_json["env_vars"]
self._dict["env_vars"] = env_vars
if not (isinstance(env_vars, dict) and all(
isinstance(k, str) and isinstance(v, str)
for (k, v) in env_vars.items())):
raise TypeError("runtime_env['env_vars'] must be of type"
"Dict[str, str]")
# Used by Ray's experimental package loading feature.
# TODO(architkulkarni): This should be unified with existing fields
if "_packaging_uri" in runtime_env_json:
self._dict["_packaging_uri"] = runtime_env_json["_packaging_uri"]
if self._dict["env_vars"] is None:
self._dict["env_vars"] = {}
# TODO(ekl): env vars is probably not the right long term impl.
self._dict["env_vars"].update(
RAY_PACKAGING_URI=self._dict["_packaging_uri"])
if "_ray_release" in runtime_env_json:
self._dict["_ray_release"] = runtime_env_json["_ray_release"]
if "_ray_commit" in runtime_env_json:
self._dict["_ray_commit"] = runtime_env_json["_ray_commit"]
else:
if self._dict.get("pip") or self._dict.get("conda"):
self._dict["_ray_commit"] = ray.__commit__
# Used for testing wheels that have not yet been merged into master.
# If this is set to True, then we do not inject Ray into the conda
# or pip dependencies.
if "_skip_inject_ray" in runtime_env_json:
self._dict["_skip_inject_ray"] = runtime_env_json[
"_skip_inject_ray"]
# TODO(ekl) we should have better schema validation here.
# TODO(ekl) support py_modules
# TODO(architkulkarni) support docker
# TODO(architkulkarni) This is to make it easy for the worker caching
# code in C++ to check if the env is empty without deserializing and
# parsing it. We should use a less confusing approach here.
if all(val is None for val in self._dict.values()):
self._dict = {}
def get_parsed_dict(self) -> dict:
return self._dict
def serialize(self) -> str:
# Use sort_keys=True because we will use the output as a key to cache
# workers by, so we need the serialization to be independent of the
# dict order.
return json.dumps(self._dict, sort_keys=True)
def set_uris(self, uris):
self._dict["uris"] = uris
class Protocol(Enum):
"""A enum for supported backend storage."""
# For docstring
def __new__(cls, value, doc=None):
self = object.__new__(cls)
self._value_ = value
if doc is not None:
self.__doc__ = doc
return self
GCS = "gcs", "For packages created and managed by the system."
PIN_GCS = "pingcs", "For packages created and managed by the users."
def _xor_bytes(left: bytes, right: bytes) -> bytes:
if left and right:
return bytes(a ^ b for (a, b) in zip(left, right))
return left or right
def _dir_travel(
path: Path,
excludes: List[Callable],
handler: Callable,
):
e = _get_gitignore(path)
if e is not None:
excludes.append(e)
skip = any(e(path) for e in excludes)
if not skip:
try:
handler(path)
except Exception as e:
logger.error(f"Issue with path: {path}")
raise e
if path.is_dir():
for sub_path in path.iterdir():
_dir_travel(sub_path, excludes, handler)
if e is not None:
excludes.pop()
def _zip_module(root: Path, relative_path: Path, excludes: Optional[Callable],
zip_handler: ZipFile) -> None:
"""Go through all files and zip them into a zip file"""
def handler(path: Path):
# Pack this path if it's an empty directory or it's a file.
if path.is_dir() and next(path.iterdir(),
None) is None or path.is_file():
file_size = path.stat().st_size
if file_size >= FILE_SIZE_WARNING:
logger.warning(
f"File {path} is very large ({file_size} bytes). "
"Consider excluding this file from the working directory.")
to_path = path.relative_to(relative_path)
zip_handler.write(path, to_path)
excludes = [] if excludes is None else [excludes]
_dir_travel(root, excludes, handler)
def _hash_modules(
root: Path,
relative_path: Path,
excludes: Optional[Callable],
) -> bytes:
"""Helper function to create hash of a directory.
It'll go through all the files in the directory and xor
hash(file_name, file_content) to create a hash value.
"""
hash_val = None
BUF_SIZE = 4096 * 1024
def handler(path: Path):
md5 = hashlib.md5()
md5.update(str(path.relative_to(relative_path)).encode())
if not path.is_dir():
with path.open("rb") as f:
data = f.read(BUF_SIZE)
while len(data) != 0:
md5.update(data)
data = f.read(BUF_SIZE)
nonlocal hash_val
hash_val = _xor_bytes(hash_val, md5.digest())
excludes = [] if excludes is None else [excludes]
_dir_travel(root, excludes, handler)
return hash_val
def _get_local_path(pkg_uri: str) -> str:
assert PKG_DIR, "Please set PKG_DIR in the module first."
(_, pkg_name) = _parse_uri(pkg_uri)
return os.path.join(PKG_DIR, pkg_name)
def _parse_uri(pkg_uri: str) -> Tuple[Protocol, str]:
uri = urlparse(pkg_uri)
protocol = Protocol(uri.scheme)
return (protocol, uri.netloc)
def _get_excludes(path: Path, excludes: List[str]) -> Callable:
path = path.absolute()
pathspec = PathSpec.from_lines("gitwildmatch", excludes)
def match(p: Path):
path_str = str(p.absolute().relative_to(path))
path_str += "/"
return pathspec.match_file(path_str)
return match
def _get_gitignore(path: Path) -> Optional[Callable]:
path = path.absolute()
ignore_file = path / ".gitignore"
if ignore_file.is_file():
with ignore_file.open("r") as f:
pathspec = PathSpec.from_lines("gitwildmatch", f.readlines())
def match(p: Path):
path_str = str(p.absolute().relative_to(path))
if p.is_dir():
path_str += "/"
return pathspec.match_file(path_str)
return match
else:
return None
# TODO(yic): Fix this later to handle big directories in better way
def get_project_package_name(working_dir: str, py_modules: List[str],
excludes: List[str]) -> str:
"""Get the name of the package by working dir and modules.
This function will generate the name of the package by the working
directory and modules. It'll go through all the files in working_dir
and modules and hash the contents of these files to get the hash value
of this package. The final package name is: _ray_pkg_<HASH_VAL>.zip
Right now, only the modules given will be included. The dependencies
are not included automatically.
Examples:
.. code-block:: python
>>> import any_module
>>> get_project_package_name("/working_dir", [any_module])
.... _ray_pkg_af2734982a741.zip
e.g., _ray_pkg_029f88d5ecc55e1e4d64fc6e388fd103.zip
Args:
working_dir (str): The working directory.
py_modules (list[str]): The python module.
excludes (list[str]): The dir or files that should be excluded
Returns:
Package name as a string.
"""
RAY_PKG_PREFIX = "_ray_pkg_"
hash_val = None
if working_dir:
if not isinstance(working_dir, str):
raise TypeError("`working_dir` must be a string.")
working_dir = Path(working_dir).absolute()
if not working_dir.exists() or not working_dir.is_dir():
raise ValueError(f"working_dir {working_dir} must be an existing"
" directory")
hash_val = _xor_bytes(
hash_val,
_hash_modules(working_dir, working_dir,
_get_excludes(working_dir, excludes)))
for py_module in py_modules or []:
if not isinstance(py_module, str):
raise TypeError("`py_module` must be a string.")
module_dir = Path(py_module).absolute()
if not module_dir.exists() or not module_dir.is_dir():
raise ValueError(f"py_module {py_module} must be an existing"
" directory")
hash_val = _xor_bytes(
hash_val, _hash_modules(module_dir, module_dir.parent, None))
return RAY_PKG_PREFIX + hash_val.hex() + ".zip" if hash_val else None
def create_project_package(working_dir: str, py_modules: List[str],
excludes: List[str], output_path: str) -> None:
"""Create a pckage that will be used by workers.
This function is used to create a package file based on working directory
and python local modules.
Args:
working_dir (str): The working directory.
py_modules (list[str]): The list of path of python modules to be
included.
excludes (List(str)): The directories or file to be excluded.
output_path (str): The path of file to be created.
"""
pkg_file = Path(output_path).absolute()
with ZipFile(pkg_file, "w") as zip_handler:
if working_dir:
# put all files in /path/working_dir into zip
working_path = Path(working_dir).absolute()
_zip_module(working_path, working_path,
_get_excludes(working_path, excludes), zip_handler)
for py_module in py_modules or []:
module_path = Path(py_module).absolute()
_zip_module(module_path, module_path.parent, None, zip_handler)
def fetch_package(pkg_uri: str) -> int:
"""Fetch a package from a given uri if not exists locally.
This function is used to fetch a pacakge from the given uri and unpack it.
Args:
pkg_uri (str): The uri of the package to download.
Returns:
The directory containing this package
"""
pkg_file = Path(_get_local_path(pkg_uri))
local_dir = pkg_file.with_suffix("")
assert local_dir != pkg_file, "Invalid pkg_file!"
if local_dir.exists():
assert local_dir.is_dir(), f"{local_dir} is not a directory"
return local_dir
logger.debug("Fetch packge")
(protocol, pkg_name) = _parse_uri(pkg_uri)
if protocol in (Protocol.GCS, Protocol.PIN_GCS):
code = _internal_kv_get(pkg_uri)
if code is None:
raise IOError("Fetch uri failed")
code = code or b""
pkg_file.write_bytes(code)
else:
raise NotImplementedError(f"Protocol {protocol} is not supported")
logger.debug(f"Unpack {pkg_file} to {local_dir}")
with ZipFile(str(pkg_file), "r") as zip_ref:
zip_ref.extractall(local_dir)
pkg_file.unlink()
return local_dir
def _store_package_in_gcs(gcs_key: str, data: bytes) -> int:
_internal_kv_put(gcs_key, data)
return len(data)
def push_package(pkg_uri: str, pkg_path: str) -> int:
"""Push a package to uri.
This function is to push a local file to remote uri. Right now, only GCS
is supported.
Args:
pkg_uri (str): The uri of the package to upload to.
pkg_path (str): Path of the local file.
Returns:
The number of bytes uploaded.
"""
(protocol, pkg_name) = _parse_uri(pkg_uri)
data = Path(pkg_path).read_bytes()
if protocol in (Protocol.GCS, Protocol.PIN_GCS):
return _store_package_in_gcs(pkg_uri, data)
else:
raise NotImplementedError(f"Protocol {protocol} is not supported")
def package_exists(pkg_uri: str) -> bool:
"""Check whether the package with given uri exists or not.
Args:
pkg_uri (str): The uri of the package
Return:
True for package existing and False for not.
"""
assert _internal_kv_initialized()
(protocol, pkg_name) = _parse_uri(pkg_uri)
if protocol in (Protocol.GCS, Protocol.PIN_GCS):
return _internal_kv_exists(pkg_uri)
else:
raise NotImplementedError(f"Protocol {protocol} is not supported")
def rewrite_runtime_env_uris(job_config: JobConfig) -> None:
"""Rewrite the uris field in job_config.
This function is used to update the runtime field in job_config. The
runtime field will be generated based on the hash of required files and
modules.
Args:
job_config (JobConfig): The job config.
"""
# For now, we only support local directory and packages
uris = job_config.runtime_env.get("uris")
if uris is not None:
return
working_dir = job_config.runtime_env.get("working_dir")
py_modules = job_config.runtime_env.get("py_modules")
excludes = job_config.runtime_env.get("excludes")
if working_dir or py_modules:
if excludes is None:
excludes = []
pkg_name = get_project_package_name(working_dir, py_modules, excludes)
job_config.set_runtime_env_uris(
[Protocol.GCS.value + "://" + pkg_name])
def upload_runtime_env_package_if_needed(job_config: JobConfig) -> None:
"""Upload runtime env if it's not there.
It'll check whether the runtime environment exists in the cluster or not.
If it doesn't exist, a package will be created based on the working
directory and modules defined in job config. The package will be
uploaded to the cluster after this.
Args:
job_config (JobConfig): The job config of driver.
"""
assert _internal_kv_initialized()
pkg_uris = job_config.get_runtime_env_uris()
for pkg_uri in pkg_uris:
if not package_exists(pkg_uri):
file_path = _get_local_path(pkg_uri)
pkg_file = Path(file_path)
working_dir = job_config.runtime_env.get("working_dir")
py_modules = job_config.runtime_env.get("py_modules")
excludes = job_config.runtime_env.get("excludes") or []
logger.info(f"{pkg_uri} doesn't exist. Create new package with"
f" {working_dir} and {py_modules}")
if not pkg_file.exists():
create_project_package(working_dir, py_modules, excludes,
file_path)
# Push the data to remote storage
pkg_size = push_package(pkg_uri, pkg_file)
logger.info(f"{pkg_uri} has been pushed with {pkg_size} bytes")
def ensure_runtime_env_setup(pkg_uris: List[str]) -> Optional[str]:
"""Make sure all required packages are downloaded it local.
Necessary packages required to run the job will be downloaded
into local file system if it doesn't exist.
Args:
pkg_uri list(str): Package of the working dir for the runtime env.
Return:
Working directory is returned if the pkg_uris is not empty,
otherwise, None is returned.
"""
pkg_dir = None
assert _internal_kv_initialized()
for pkg_uri in pkg_uris:
# For each node, the package will only be downloaded one time
# Locking to avoid multiple process download concurrently
pkg_file = Path(_get_local_path(pkg_uri))
with FileLock(str(pkg_file) + ".lock"):
pkg_dir = fetch_package(pkg_uri)
sys.path.insert(0, str(pkg_dir))
# Right now, multiple pkg_uris are not supported correctly.
# We return the last one as working directory
return str(pkg_dir) if pkg_dir else None
| apache-2.0 |
florian-dacosta/OCB | addons/product_extended/product_extended.py | 26 | 4955 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
class product_product(osv.osv):
_name = 'product.product'
_inherit = 'product.product'
def compute_price(self, cr, uid, ids, recursive=False, test=False, real_time_accounting = False, context=None):
'''
Will return test dict when the test = False
Multiple ids at once?
testdict is used to inform the user about the changes to be made
'''
testdict = {}
for prod_id in ids:
bom_obj = self.pool.get('mrp.bom')
bom_id = bom_obj._bom_find(cr, uid, product_id = prod_id, context=context)
if bom_id:
# In recursive mode, it will first compute the prices of child boms
if recursive:
#Search the products that are components of this bom of prod_id
bom = bom_obj.browse(cr, uid, bom_id, context=context)
#Call compute_price on these subproducts
prod_set = set([x.product_id.id for x in bom.bom_line_ids])
res = self.compute_price(cr, uid, list(prod_set), recursive=recursive, test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update(res)
#Use calc price to calculate and put the price on the product of the BoM if necessary
price = self._calc_price(cr, uid, bom_obj.browse(cr, uid, bom_id, context=context), test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update({prod_id : price})
if test:
return testdict
else:
return True
def _calc_price(self, cr, uid, bom, test = False, real_time_accounting=False, context=None):
if context is None:
context={}
price = 0
uom_obj = self.pool.get("product.uom")
tmpl_obj = self.pool.get('product.template')
for sbom in bom.bom_line_ids:
my_qty = sbom.product_qty
price += uom_obj._compute_price(cr, uid, sbom.product_id.uom_id.id, sbom.product_id.standard_price, sbom.product_uom.id) * my_qty
if bom.routing_id:
for wline in bom.routing_id.workcenter_lines:
wc = wline.workcenter_id
cycle = wline.cycle_nbr
hour = (wc.time_start + wc.time_stop + cycle * wc.time_cycle) * (wc.time_efficiency or 1.0)
price += wc.costs_cycle * cycle + wc.costs_hour * hour
price = self.pool.get('product.uom')._compute_price(cr,uid,bom.product_uom.id, price, bom.product_id.uom_id.id)
#Convert on product UoM quantities
if price > 0:
price = uom_obj._compute_price(cr, uid, bom.product_uom.id, price / bom.product_qty, bom.product_id.uom_id.id)
product = tmpl_obj.browse(cr, uid, bom.product_tmpl_id.id, context=context)
if not test:
if (product.valuation != "real_time" or not real_time_accounting):
tmpl_obj.write(cr, uid, [product.id], {'standard_price' : price}, context=context)
else:
#Call wizard function here
wizard_obj = self.pool.get("stock.change.standard.price")
ctx = context.copy()
ctx.update({'active_id': product.id, 'active_model': 'product.template'})
wiz_id = wizard_obj.create(cr, uid, {'new_price': price}, context=ctx)
wizard_obj.change_price(cr, uid, [wiz_id], context=ctx)
return price
product_product()
class product_bom(osv.osv):
_inherit = 'mrp.bom'
_columns = {
'standard_price': fields.related('product_tmpl_id','standard_price',type="float",relation="product.product",string="Standard Price",store=False)
}
product_bom()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mythmon/kitsune | kitsune/users/tests/test_views.py | 5 | 22341 | import os
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.test.utils import override_settings
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from tidings.tests import watch
from kitsune.questions.tests import QuestionFactory, AnswerFactory
from kitsune.questions.models import Question, Answer
from kitsune.sumo.tests import (TestCase, LocalizingClient,
send_mail_raise_smtp)
from kitsune.sumo.urlresolvers import reverse
from kitsune.users import ERROR_SEND_EMAIL
from kitsune.users.models import (
CONTRIBUTOR_GROUP, Profile, RegistrationProfile, EmailChange, Setting,
email_utils, Deactivation)
from kitsune.users.tests import UserFactory, GroupFactory, add_permission
class RegisterTests(TestCase):
def setUp(self):
self.client.logout()
super(RegisterTests, self).setUp()
@mock.patch.object(Site.objects, 'get_current')
def test_new_user(self, get_current):
get_current.return_value.domain = 'su.mo.com'
response = self.client.post(reverse('users.register', locale='en-US'),
{'username': 'newbie',
'email': 'newbie@example.com',
'password': 'foobar22',
'password2': 'foobar22'}, follow=True)
eq_(200, response.status_code)
u = User.objects.get(username='newbie')
assert u.password.startswith('sha256')
assert not u.is_active
eq_(1, len(mail.outbox))
assert mail.outbox[0].subject.find('Please confirm your') == 0
key = RegistrationProfile.objects.all()[0].activation_key
assert mail.outbox[0].body.find('activate/%s/%s' % (u.id, key)) > 0
# By default, users aren't added to any groups
eq_(0, len(u.groups.all()))
# Now try to log in
u.is_active = True
u.save()
response = self.client.post(reverse('users.login', locale='en-US'),
{'username': 'newbie',
'password': 'foobar22'}, follow=True)
eq_(200, response.status_code)
eq_('http://testserver/en-US/?fpa=1', response.redirect_chain[0][0])
@mock.patch.object(email_utils, 'send_messages')
@mock.patch.object(Site.objects, 'get_current')
def test_new_user_smtp_error(self, get_current, send_messages):
get_current.return_value.domain = 'su.mo.com'
send_messages.side_effect = send_mail_raise_smtp
response = self.client.post(
reverse('users.registercontributor', locale='en-US'),
{'username': 'newbie',
'email': 'newbie@example.com',
'password': 'foobar22',
'password2': 'foobar22'}, follow=True)
self.assertContains(response, unicode(ERROR_SEND_EMAIL))
assert not User.objects.filter(username='newbie').exists()
@mock.patch.object(Site.objects, 'get_current')
def test_unicode_password(self, get_current):
u_str = u'a1\xe5\xe5\xee\xe9\xf8\xe7\u6709\u52b9'
get_current.return_value.domain = 'su.mo.com'
response = self.client.post(reverse('users.register', locale='ja'),
{'username': 'cjkuser',
'email': 'cjkuser@example.com',
'password': u_str,
'password2': u_str}, follow=True)
eq_(200, response.status_code)
u = User.objects.get(username='cjkuser')
u.is_active = True
u.save()
assert u.password.startswith('sha256')
# make sure you can login now
response = self.client.post(reverse('users.login', locale='ja'),
{'username': 'cjkuser',
'password': u_str}, follow=True)
eq_(200, response.status_code)
eq_('http://testserver/ja/?fpa=1', response.redirect_chain[0][0])
@mock.patch.object(Site.objects, 'get_current')
def test_new_user_activation(self, get_current):
get_current.return_value.domain = 'su.mo.com'
user_ = RegistrationProfile.objects.create_inactive_user(
'sumouser1234', 'testpass', 'sumouser@test.com')
assert not user_.is_active
key = RegistrationProfile.objects.all()[0].activation_key
url = reverse('users.activate', args=[user_.id, key])
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
user_ = User.objects.get(pk=user_.pk)
assert user_.is_active
# Verify that the RegistrationProfile was nuked.
eq_(0, RegistrationProfile.objects.filter(activation_key=key).count())
@mock.patch.object(Site.objects, 'get_current')
def test_question_created_time_on_user_activation(self, get_current):
get_current.return_value.domain = 'su.mo.com'
user_ = RegistrationProfile.objects.create_inactive_user(
'sumouser1234', 'testpass', 'sumouser@test.com')
assert not user_.is_active
then = datetime.now() - timedelta(days=1)
q = QuestionFactory(creator=user_, created=then)
assert q.created == then
key = RegistrationProfile.objects.all()[0].activation_key
url = reverse('users.activate', args=[user_.id, key])
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
user_ = User.objects.get(pk=user_.pk)
assert user_.is_active
q = Question.objects.get(pk=q.pk)
assert q.created > then
@mock.patch.object(Site.objects, 'get_current')
def test_new_user_claim_watches(self, get_current):
"""Claim user watches upon activation."""
watch(email='sumouser@test.com', user=UserFactory(), save=True)
get_current.return_value.domain = 'su.mo.com'
user_ = RegistrationProfile.objects.create_inactive_user(
'sumouser1234', 'testpass', 'sumouser@test.com')
key = RegistrationProfile.objects.all()[0].activation_key
self.client.get(reverse('users.activate', args=[user_.id, key]), follow=True)
# Watches are claimed.
assert user_.watch_set.exists()
@mock.patch.object(Site.objects, 'get_current')
def test_new_user_with_questions(self, get_current):
"""The user's questions are mentioned on the confirmation page."""
get_current.return_value.domain = 'su.mo.com'
# TODO: remove this test once we drop unconfirmed questions.
user_ = RegistrationProfile.objects.create_inactive_user(
'sumouser1234', 'testpass', 'sumouser@test.com')
# Before we activate, let's create a question.
q = Question.objects.create(title='test_question', creator=user_,
content='test')
# Activate account.
key = RegistrationProfile.objects.all()[0].activation_key
url = reverse('users.activate', args=[user_.id, key])
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
q = Question.objects.get(creator=user_)
# Question is listed on the confirmation page.
assert 'test_question' in response.content
assert q.get_absolute_url().encode('utf8') in response.content
def test_duplicate_username(self):
u = UserFactory()
response = self.client.post(
reverse('users.registercontributor', locale='en-US'),
{'username': u.username,
'email': 'newbie@example.com',
'password': 'foo',
'password2': 'foo'}, follow=True)
self.assertContains(response, 'already exists')
def test_duplicate_email(self):
u = UserFactory(email='noob@example.com')
User.objects.create(username='noob', email='noob@example.com').save()
response = self.client.post(
reverse('users.registercontributor', locale='en-US'),
{'username': 'newbie',
'email': u.email,
'password': 'foo',
'password2': 'foo'}, follow=True)
self.assertContains(response, 'already exists')
@mock.patch.object(Site.objects, 'get_current')
def test_active_user_activation(self, get_current):
"""If an already active user tries to activate with a valid key,
we take them to login page and show message."""
get_current.return_value.domain = 'su.mo.com'
user = RegistrationProfile.objects.create_inactive_user(
'sumouser1234', 'testpass', 'sumouser@test.com')
user.is_active = True
user.save()
key = RegistrationProfile.objects.all()[0].activation_key
url = reverse('users.activate', args=[user.id, key])
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
doc = pq(response.content)
eq_('Your account is already activated, log in below.',
doc('ul.user-messages').text())
@mock.patch.object(Site.objects, 'get_current')
def test_old_activation_url(self, get_current):
get_current.return_value.domain = 'su.mo.com'
user = RegistrationProfile.objects.create_inactive_user(
'sumouser1234', 'testpass', 'sumouser@test.com')
assert not user.is_active
key = RegistrationProfile.objects.all()[0].activation_key
url = reverse('users.old_activate', args=[key])
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
user = User.objects.get(pk=user.pk)
assert user.is_active
@mock.patch.object(Site.objects, 'get_current')
def test_new_contributor(self, get_current):
"""Verify that interested contributors are added to group."""
get_current.return_value.domain = 'su.mo.com'
group_name = 'Registered as contributor'
GroupFactory(name=group_name)
data = {
'username': 'newbie',
'email': 'newbie@example.com',
'password': 'foobar22',
'password2': 'foobar22',
'interested': 'yes'}
response = self.client.post(reverse('users.register', locale='en-US'),
data, follow=True)
eq_(200, response.status_code)
u = User.objects.get(username='newbie')
eq_(group_name, u.groups.all()[0].name)
# Activate user and verify email is sent.
key = RegistrationProfile.objects.all()[0].activation_key
url = reverse('users.activate', args=[u.id, key])
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
eq_(2, len(mail.outbox))
assert mail.outbox[1].subject.find('Welcome to') == 0
assert u.username in mail.outbox[1].body
class ChangeEmailTestCase(TestCase):
client_class = LocalizingClient
def setUp(self):
self.user = UserFactory()
self.client.login(username=self.user.username, password='testpass')
super(ChangeEmailTestCase, self).setUp()
def test_redirect(self):
"""Test our redirect from old url to new one."""
response = self.client.get(reverse('users.old_change_email',
locale='en-US'), follow=False)
eq_(301, response.status_code)
eq_('http://testserver/en-US/users/change_email', response['location'])
@mock.patch.object(Site.objects, 'get_current')
def test_user_change_email(self, get_current):
"""Send email to change user's email and then change it."""
get_current.return_value.domain = 'su.mo.com'
# Attempt to change email.
response = self.client.post(reverse('users.change_email'),
{'email': 'paulc@trololololololo.com'},
follow=True)
eq_(200, response.status_code)
# Be notified to click a confirmation link.
eq_(1, len(mail.outbox))
assert mail.outbox[0].subject.find('Please confirm your') == 0
ec = EmailChange.objects.all()[0]
assert ec.activation_key in mail.outbox[0].body
eq_('paulc@trololololololo.com', ec.email)
# Visit confirmation link to change email.
response = self.client.get(reverse('users.confirm_email',
args=[ec.activation_key]))
eq_(200, response.status_code)
u = User.objects.get(username=self.user.username)
eq_('paulc@trololololololo.com', u.email)
def test_user_change_email_same(self):
"""Changing to same email shows validation error."""
self.user.email = 'valid@email.com'
self.user.save()
response = self.client.post(reverse('users.change_email'),
{'email': self.user.email})
eq_(200, response.status_code)
doc = pq(response.content)
eq_('This is your current email.', doc('ul.errorlist').text())
def test_user_change_email_duplicate(self):
"""Changing to same email shows validation error."""
u = UserFactory(email='newvalid@email.com')
response = self.client.post(reverse('users.change_email'),
{'email': u.email})
eq_(200, response.status_code)
doc = pq(response.content)
eq_('A user with that email address already exists.',
doc('ul.errorlist').text())
@mock.patch.object(Site.objects, 'get_current')
def test_user_confirm_email_duplicate(self, get_current):
"""If we detect a duplicate email when confirming an email change,
don't change it and notify the user."""
get_current.return_value.domain = 'su.mo.com'
old_email = self.user.email
new_email = 'newvalid@email.com'
response = self.client.post(reverse('users.change_email'),
{'email': new_email})
eq_(200, response.status_code)
assert mail.outbox[0].subject.find('Please confirm your') == 0
ec = EmailChange.objects.all()[0]
# Before new email is confirmed, give the same email to a user
u = UserFactory(email=new_email)
# Visit confirmation link and verify email wasn't changed.
response = self.client.get(reverse('users.confirm_email',
args=[ec.activation_key]))
eq_(200, response.status_code)
doc = pq(response.content)
eq_(u'Unable to change email for user %s' % self.user.username,
doc('article h1').text())
u = User.objects.get(username=self.user.username)
eq_(old_email, u.email)
class MakeContributorTests(TestCase):
def setUp(self):
self.user = UserFactory()
self.client.login(username=self.user.username, password='testpass')
GroupFactory(name=CONTRIBUTOR_GROUP)
super(MakeContributorTests, self).setUp()
def test_make_contributor(self):
"""Test adding a user to the contributor group"""
eq_(0, self.user.groups.filter(name=CONTRIBUTOR_GROUP).count())
response = self.client.post(reverse('users.make_contributor',
force_locale=True))
eq_(302, response.status_code)
eq_(1, self.user.groups.filter(name=CONTRIBUTOR_GROUP).count())
class AvatarTests(TestCase):
def setUp(self):
self.user = UserFactory()
self.profile = self.user.profile
self.client.login(username=self.user.username, password='testpass')
super(AvatarTests, self).setUp()
def tearDown(self):
p = Profile.objects.get(user=self.user)
if os.path.exists(p.avatar.path):
os.unlink(p.avatar.path)
super(AvatarTests, self).tearDown()
def test_upload_avatar(self):
assert not self.profile.avatar, 'User has no avatar.'
with open('kitsune/upload/tests/media/test.jpg') as f:
url = reverse('users.edit_avatar', locale='en-US')
data = {'avatar': f}
r = self.client.post(url, data)
eq_(302, r.status_code)
p = Profile.objects.get(user=self.user)
assert p.avatar, 'User has an avatar.'
assert p.avatar.path.endswith('.png')
def test_replace_missing_avatar(self):
"""If an avatar is missing, allow replacing it."""
assert not self.profile.avatar, 'User has no avatar.'
self.profile.avatar = 'path/does/not/exist.jpg'
self.profile.save()
assert self.profile.avatar, 'User has a bad avatar.'
with open('kitsune/upload/tests/media/test.jpg') as f:
url = reverse('users.edit_avatar', locale='en-US')
data = {'avatar': f}
r = self.client.post(url, data)
eq_(302, r.status_code)
p = Profile.objects.get(user=self.user)
assert p.avatar, 'User has an avatar.'
assert not p.avatar.path.endswith('exist.jpg')
assert p.avatar.path.endswith('.png')
class SessionTests(TestCase):
client_class = LocalizingClient
def setUp(self):
self.user = UserFactory()
self.client.logout()
super(SessionTests, self).setUp()
def test_login_sets_extra_cookie(self):
"""On login, set the SESSION_EXISTS_COOKIE."""
url = reverse('users.login')
res = self.client.post(url, {'username': self.user.username,
'password': 'testpass'})
assert settings.SESSION_EXISTS_COOKIE in res.cookies
c = res.cookies[settings.SESSION_EXISTS_COOKIE]
assert 'secure' not in c.output().lower()
def test_logout_deletes_cookie(self):
"""On logout, delete the SESSION_EXISTS_COOKIE."""
url = reverse('users.logout')
res = self.client.post(url)
assert settings.SESSION_EXISTS_COOKIE in res.cookies
c = res.cookies[settings.SESSION_EXISTS_COOKIE]
assert '1970' in c['expires']
@override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True)
def test_expire_at_browser_close(self):
"""If SESSION_EXPIRE_AT_BROWSER_CLOSE, do expire then."""
url = reverse('users.login')
res = self.client.post(url, {'username': self.user.username,
'password': 'testpass'})
c = res.cookies[settings.SESSION_EXISTS_COOKIE]
eq_('', c['max-age'])
@override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False,
SESSION_COOKIE_AGE=123)
def test_expire_in_a_long_time(self):
"""If not SESSION_EXPIRE_AT_BROWSER_CLOSE, set an expiry date."""
url = reverse('users.login')
res = self.client.post(url, {'username': self.user.username,
'password': 'testpass'})
c = res.cookies[settings.SESSION_EXISTS_COOKIE]
eq_(123, c['max-age'])
class UserSettingsTests(TestCase):
def setUp(self):
self.user = UserFactory()
self.profile = self.user.profile
self.client.login(username=self.user.username, password='testpass')
super(UserSettingsTests, self).setUp()
def test_create_setting(self):
url = reverse('users.edit_settings', locale='en-US')
eq_(Setting.objects.filter(user=self.user).count(), 0) # No settings
res = self.client.get(url, follow=True)
eq_(200, res.status_code)
res = self.client.post(url, {'forums_watch_new_thread': True},
follow=True)
eq_(200, res.status_code)
assert Setting.get_for_user(self.user, 'forums_watch_new_thread')
class UserProfileTests(TestCase):
def setUp(self):
self.user = UserFactory()
self.profile = self.user.profile
self.userrl = reverse('users.profile', args=[self.user.username], locale='en-US')
super(UserProfileTests, self).setUp()
def test_ProfileFactory(self):
res = self.client.get(self.userrl)
self.assertContains(res, self.user.username)
def test_profile_redirect(self):
"""Ensure that old profile URL's get redirected."""
res = self.client.get(reverse('users.profile', args=[self.user.pk],
locale='en-US'))
eq_(302, res.status_code)
def test_profile_inactive(self):
"""Inactive users don't have a public profile."""
self.user.is_active = False
self.user.save()
res = self.client.get(self.userrl)
eq_(404, res.status_code)
def test_profile_post(self):
res = self.client.post(self.userrl)
eq_(405, res.status_code)
def test_profile_deactivate(self):
"""Test user deactivation"""
p = UserFactory().profile
self.client.login(username=self.user.username, password='testpass')
res = self.client.post(reverse('users.deactivate', locale='en-US'), {'user_id': p.user.id})
eq_(403, res.status_code)
add_permission(self.user, Profile, 'deactivate_users')
res = self.client.post(reverse('users.deactivate', locale='en-US'), {'user_id': p.user.id})
eq_(302, res.status_code)
log = Deactivation.objects.get(user_id=p.user_id)
eq_(log.moderator_id, self.user.id)
p = Profile.objects.get(user_id=p.user_id)
assert not p.user.is_active
def test_deactivate_and_flag_spam(self):
self.client.login(username=self.user.username, password='testpass')
add_permission(self.user, Profile, 'deactivate_users')
# Verify content is flagged as spam when requested.
u = UserFactory()
AnswerFactory(creator=u)
QuestionFactory(creator=u)
url = reverse('users.deactivate-spam', locale='en-US')
res = self.client.post(url, {'user_id': u.id})
eq_(302, res.status_code)
eq_(1, Question.objects.filter(creator=u, is_spam=True).count())
eq_(0, Question.objects.filter(creator=u, is_spam=False).count())
eq_(1, Answer.objects.filter(creator=u, is_spam=True).count())
eq_(0, Answer.objects.filter(creator=u, is_spam=False).count())
| bsd-3-clause |
akash1808/nova_test_latest | nova/tests/unit/scheduler/filters/test_disk_filters.py | 58 | 4487 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import disk_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestDiskFilter(test.NoDBTestCase):
def setUp(self):
super(TestDiskFilter, self).setUp()
def test_disk_filter_passes(self):
self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 1,
'ephemeral_gb': 1, 'swap': 512}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_fails(self):
self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 10,
'ephemeral_gb': 1, 'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_oversubscribe(self):
self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 18, 'swap': 1024}}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
def test_disk_filter_oversubscribe_fail(self):
self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 19, 'swap': 1024}}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_value_error(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {
'context': mock.sentinel.ctx,
'instance_type': {'root_gb': 1,
'ephemeral_gb': 1,
'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
'total_usable_disk_gb': 1})
agg_mock.return_value = set(['XXX'])
self.assertTrue(filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'disk_allocation_ratio')
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_default_value(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {
'context': mock.sentinel.ctx,
'instance_type': {'root_gb': 2,
'ephemeral_gb': 1,
'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
'total_usable_disk_gb': 1})
# Uses global conf.
agg_mock.return_value = set([])
self.assertFalse(filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'disk_allocation_ratio')
agg_mock.return_value = set(['2'])
self.assertTrue(filt_cls.host_passes(host, filter_properties))
| apache-2.0 |
yosriayed/yate5 | share/scripts/yaypm/setup.py | 10 | 1275 | '''
setup.py
========
setup.py is part of the YAYPM library
Copyright 2006 Maciek Kaminski.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
This is the distutils installation script for YAYPM.
'''
from distutils.core import setup
setup(
name = 'yaypm',
version = '0.2',
description = 'Yet Another YATE Python Module',
author = 'Maciek Kaminski',
author_email = 'maciejka@tiger.com.pl',
url = 'http://yate.null.ro/pmwiki/index.php?n=Main.YAYPM',
packages = ['yaypm',
'yaypm.utils',
'yaypm.utils.resources',
'yaypm.utils.tester',
'yaypm.examples']
)
| gpl-2.0 |
nils-wisiol/pypuf | pypuf/studies/why_attackers_lose/fig_04_a.py | 1 | 3434 | """
Figure 4 (a) of "Why attackers lose: design and security analysis of arbitrarily large
XOR arbiter PUFs", Accepted 26 Feb 2019 Journal of Cryptographic Engineering.
This study examines the minimum number of votes needed such
that for a uniformly random challenge c we have Pr[Stab(c) ≥ 95%] ≥
80% for different k, as determined by a simulation (Sect. 6.2). The
simulation uses arbiter chain length of n = 32; however, we showed
that the results are independent of n. This log–log graph confirms the
result that the number of votes required grows polynomially.
"""
from matplotlib import pyplot
from matplotlib.ticker import FixedLocator, ScalarFormatter
from seaborn import lineplot, scatterplot
from pypuf.experiments.experiment.majority_vote import ExperimentMajorityVoteFindVotes, Parameters
from pypuf.studies.base import Study
class NumberOfVotesRequiredStudy(Study):
"""
Generates Figure 4 (a) of "Why attackers lose: design and security analysis of arbitrarily large XOR arbiter PUFs"
"""
SHUFFLE = True
COMPRESSION = True
RESTARTS = 200
K_RANGE = 2
K_MAX = 32
LOWERCASE_N = 32
UPPERCASE_N = 2000
S_RATIO = .033
ITERATIONS = 10
SEED_CHALLENGES = 0xf000
STAB_C = .95
STAB_ALL = .80
def experiments(self):
e = []
for i in range(self.RESTARTS):
for k in range(self.K_RANGE, self.K_MAX + 1, self.K_RANGE):
e.append(ExperimentMajorityVoteFindVotes(
progress_log_prefix=None,
parameters=Parameters(
n=self.LOWERCASE_N,
k=k,
challenge_count=self.UPPERCASE_N,
seed_instance=0xC0DEBA5E + i,
seed_instance_noise=0xdeadbeef + i,
transformation='id',
combiner='xor',
mu=0,
sigma=1,
sigma_noise_ratio=self.S_RATIO,
seed_challenges=self.SEED_CHALLENGES + i,
desired_stability=self.STAB_C,
overall_desired_stability=self.STAB_ALL,
minimum_vote_count=1,
iterations=self.ITERATIONS,
bias=None
)
))
return e
def plot(self):
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set(xscale='log', yscale='log')
ax.xaxis.set_major_locator(FixedLocator([2, 4, 6, 8, 12, 16, 20, 24, 28, 32]))
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.set_major_locator(FixedLocator([1, 2, 5, 10, 20, 50]))
ax.yaxis.set_major_formatter(ScalarFormatter())
r = self.experimenter.results[['k', 'vote_count']].groupby(['k']).mean().reset_index()
lineplot(
x='k', y='vote_count', data=r,
ax=ax, estimator=None, ci=None
)
scatterplot(
x='k', y='vote_count', data=r, ax=ax
)
fig = ax.get_figure()
fig.set_size_inches(6, 2.5)
ax.set_xlabel('number of arbiter chains in the MV XOR Arbiter PUF')
ax.set_ylabel('number of votes')
ax.set_title('Number of votes required for Pr[Stab(c)>95%] > 80%')
fig.savefig('figures/{}.pdf'.format(self.name()), bbox_inches='tight', pad_inches=0)
| gpl-3.0 |
hep-gc/glint-horizon | openstack_dashboard/dashboards/admin/volumes/views.py | 7 | 3141 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes.
"""
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin.volumes \
import forms as project_forms
from openstack_dashboard.dashboards.admin.volumes \
import tables as project_tables
from openstack_dashboard.dashboards.project.volumes \
import tabs as project_tabs
from openstack_dashboard.dashboards.project.volumes \
.volumes import views as volume_views
class IndexView(tables.MultiTableView, project_tabs.VolumeTableMixIn):
table_classes = (project_tables.VolumesTable,
project_tables.VolumeTypesTable)
template_name = "admin/volumes/index.html"
def get_volumes_data(self):
volumes = self._get_volumes(search_opts={'all_tenants': True})
instances = self._get_instances(search_opts={'all_tenants': True})
self._set_attachments_string(volumes, instances)
# Gather our tenants to correlate against IDs
try:
tenants, has_more = keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve volume project information.')
exceptions.handle(self.request, msg)
tenant_dict = SortedDict([(t.id, t) for t in tenants])
for volume in volumes:
tenant_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
tenant = tenant_dict.get(tenant_id, None)
volume.tenant_name = getattr(tenant, "name", None)
return volumes
def get_volume_types_data(self):
try:
volume_types = cinder.volume_type_list(self.request)
except Exception:
volume_types = []
exceptions.handle(self.request,
_("Unable to retrieve volume types"))
return volume_types
class DetailView(volume_views.DetailView):
template_name = "admin/volumes/detail.html"
class CreateVolumeTypeView(forms.ModalFormView):
form_class = project_forms.CreateVolumeType
template_name = 'admin/volumes/create_volume_type.html'
success_url = 'horizon:admin:volumes:index'
def get_success_url(self):
return reverse(self.success_url)
| apache-2.0 |
willemneal/Docky | lib/pygments/tests/test_java.py | 4 | 1062 | # -*- coding: utf-8 -*-
"""
Basic JavaLexer Test
~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.token import Text, Name, Operator, Keyword
from pygments.lexers import JavaLexer
class JavaTest(unittest.TestCase):
def setUp(self):
self.lexer = JavaLexer()
self.maxDiff = None
def testEnhancedFor(self):
fragment = u'label:\nfor(String var2: var1) {}\n'
tokens = [
(Name.Label, u'label:'),
(Text, u'\n'),
(Keyword, u'for'),
(Operator, u'('),
(Name, u'String'),
(Text, u' '),
(Name, u'var2'),
(Operator, u':'),
(Text, u' '),
(Name, u'var1'),
(Operator, u')'),
(Text, u' '),
(Operator, u'{'),
(Operator, u'}'),
(Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
| mit |
cyril51/Sick-Beard | lib/requests/packages/chardet2/charsetprober.py | 77 | 1929 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| gpl-3.0 |
diox/olympia | src/olympia/ratings/tests/test_serializers.py | 4 | 13770 | # -*- coding: utf-8 -*-
from unittest.mock import Mock
from django.test import override_settings
from rest_framework.test import APIRequestFactory
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import TestCase, addon_factory, user_factory
from olympia.ratings.models import Rating, RatingFlag
from olympia.ratings.serializers import RatingSerializer
class TestBaseRatingSerializer(TestCase):
def setUp(self):
self.request = APIRequestFactory().get('/')
self.view = Mock(spec=['get_addon_object', 'should_include_flags'])
self.view.get_addon_object.return_value = None
self.view.should_include_flags.return_value = False
self.user = user_factory()
def serialize(self, **extra_context):
context = {
'request': self.request,
'view': self.view,
}
context.update(extra_context)
serializer = RatingSerializer(context=context)
return serializer.to_representation(self.rating)
def test_basic(self):
addon = addon_factory()
self.view.get_addon_object.return_value = addon
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
result = self.serialize()
assert result['id'] == self.rating.pk
assert result['addon'] == {
'id': addon.pk,
'slug': addon.slug,
'name': {'en-US': addon.name},
'icon_url': absolutify(addon.get_icon_url(64)),
}
assert result['body'] == str(self.rating.body)
assert result['created'] == (
self.rating.created.replace(microsecond=0).isoformat() + 'Z'
)
assert result['is_deleted'] is False
assert result['previous_count'] == int(self.rating.previous_count)
assert result['is_developer_reply'] is False
assert result['is_latest'] == self.rating.is_latest
assert result['score'] == int(self.rating.rating)
assert result['reply'] is None
assert result['user'] == {
'id': self.user.pk,
'name': str(self.user.name),
'url': None,
'username': self.user.username,
}
assert result['version'] == {
'id': self.rating.version.id,
'version': self.rating.version.version,
}
self.rating.update(version=None)
result = self.serialize()
assert result['version'] is None
# Check the default, when DRF_API_GATES['ratings-title-shim'] isn't set
assert 'title' not in result
# Check the default, when `show_flags_for=...` isn't sent.
assert 'flags' not in result
def test_deleted_rating_but_view_allowing_it_to_be_shown(self):
# We don't need to change self.view.should_access_deleted_ratings
# because the serializer is not fetching the rating itself, it's just
# serializing whatever instance we passed.
addon = addon_factory()
self.view.get_addon_object.return_value = addon
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
self.rating.delete()
result = self.serialize()
assert result['id'] == self.rating.pk
assert result['addon'] == {
'id': addon.pk,
'slug': addon.slug,
'name': {'en-US': addon.name},
'icon_url': absolutify(addon.get_icon_url(64)),
}
assert result['body'] == str(self.rating.body)
assert result['created'] == (
self.rating.created.replace(microsecond=0).isoformat() + 'Z'
)
assert result['is_deleted'] is True
assert result['previous_count'] == int(self.rating.previous_count)
assert result['is_developer_reply'] is False
assert result['is_latest'] == self.rating.is_latest
assert result['score'] == int(self.rating.rating)
assert result['reply'] is None
assert result['user'] == {
'id': self.user.pk,
'name': str(self.user.name),
'url': None,
'username': self.user.username,
}
assert result['version'] == {
'id': self.rating.version.id,
'version': self.rating.version.version,
}
@override_settings(DRF_API_GATES={None: ('ratings-rating-shim',)})
def test_ratings_score_is_rating_with_gate(self):
addon = addon_factory()
self.view.get_addon_object.return_value = addon
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
result = self.serialize()
assert result['id'] == self.rating.pk
assert result['rating'] == int(self.rating.rating)
@override_settings(DRF_API_GATES={None: ('ratings-title-shim',)})
def test_ratings_title_is_returned_with_gate(self):
addon = addon_factory()
self.view.get_addon_object.return_value = addon
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
result = self.serialize()
assert result['id'] == self.rating.pk
assert 'title' in result
assert result['title'] is None
def test_url_for_yourself(self):
addon = addon_factory()
self.view.get_addon_object.return_value = addon
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
# should include the account profile for your own requests
self.request.user = self.user
result = self.serialize()
assert result['user']['url'] == absolutify(self.user.get_url_path())
def test_url_for_admins(self):
addon = addon_factory()
self.view.get_addon_object.return_value = addon
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
# should include account profile url for admins
admin = user_factory()
self.grant_permission(admin, 'Users:Edit')
self.request.user = admin
result = self.serialize()
assert result['user']['url'] == absolutify(self.user.get_url_path())
def test_addon_slug_even_if_view_doesnt_return_addon_object(self):
addon = addon_factory()
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
result = self.serialize()
assert result['id'] == self.rating.pk
assert result['addon'] == {
'id': addon.pk,
'slug': addon.slug,
'name': {'en-US': addon.name},
'icon_url': absolutify(addon.get_icon_url(64)),
}
def test_with_previous_count(self):
addon = addon_factory()
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
self.rating.update(is_latest=False, previous_count=42)
result = self.serialize()
assert result['id'] == self.rating.pk
assert result['previous_count'] == 42
assert result['is_latest'] is False
def test_with_reply(self):
def _test_reply(data):
assert data['id'] == reply.pk
assert data['body'] == str(reply.body)
assert data['created'] == (
reply.created.replace(microsecond=0).isoformat() + 'Z'
)
assert data['is_deleted'] is False
assert data['is_developer_reply'] is True
assert data['user'] == {
'id': reply_user.pk,
'name': str(reply_user.name),
# should be the profile for a developer
'url': absolutify(reply_user.get_url_path()),
'username': reply_user.username,
}
reply_user = user_factory()
addon = addon_factory(users=[reply_user])
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
version=addon.current_version,
body='This is my rëview. Like ît ?',
)
reply = Rating.objects.create(
addon=addon,
user=reply_user,
version=addon.current_version,
body='Thîs is a reply.',
reply_to=self.rating,
)
result = self.serialize()
assert result['reply']
assert 'score' not in result['reply']
assert 'reply' not in result['reply']
_test_reply(result['reply'])
# If we instantiate a standard RatingSerializer class with a reply, it
# should work like normal. `score` and `reply` should be there, blank.
self.rating = reply
result = self.serialize()
_test_reply(result)
assert result['score'] is None
assert result['reply'] is None
def test_reply_profile_url_for_yourself(self):
addon = addon_factory()
reply_user = user_factory()
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
version=addon.current_version,
body='This is my rëview. Like ît ?',
)
Rating.objects.create(
addon=addon,
user=reply_user,
version=addon.current_version,
body='Thîs is a reply.',
reply_to=self.rating,
)
# should be the profile for your own requests
self.request.user = reply_user
result = self.serialize()
assert result['reply']['user']['url'] == absolutify(reply_user.get_url_path())
def test_with_deleted_reply(self):
addon = addon_factory()
reply_user = user_factory()
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
version=addon.current_version,
body='This is my rëview. Like ît ?',
)
reply = Rating.objects.create(
addon=addon,
user=reply_user,
version=addon.current_version,
body='Thîs is a reply.',
reply_to=self.rating,
)
reply.delete()
result = self.serialize()
assert result['reply'] is None
def test_with_deleted_reply_but_view_allowing_it_to_be_shown(self):
reply_user = user_factory()
addon = addon_factory(users=[reply_user])
self.view.get_addon_object.return_value = addon
self.view.should_access_deleted_ratings = True
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
version=addon.current_version,
body='This is my rëview. Like ît ?',
)
reply = Rating.objects.create(
addon=addon,
user=reply_user,
version=addon.current_version,
body='Thîs is a reply.',
reply_to=self.rating,
)
reply.delete()
result = self.serialize()
assert result['reply']
assert 'rating' not in result['reply']
assert 'reply' not in result['reply']
assert result['reply']['id'] == reply.pk
assert result['reply']['body'] == str(reply.body)
assert result['reply']['created'] == (
reply.created.replace(microsecond=0).isoformat() + 'Z'
)
assert result['reply']['is_deleted'] is True
assert result['reply']['user'] == {
'id': reply_user.pk,
'name': str(reply_user.name),
'url': absolutify(reply_user.get_url_path()),
'username': reply_user.username,
}
def test_readonly_fields(self):
serializer = RatingSerializer(context={'request': self.request})
assert serializer.fields['created'].read_only is True
assert serializer.fields['id'].read_only is True
assert serializer.fields['reply'].read_only is True
assert serializer.fields['user'].read_only is True
def test_include_flags(self):
addon = addon_factory()
self.request.user = user_factory()
self.view.get_addon_object.return_value = addon
self.view.should_include_flags.return_value = True
self.rating = Rating.objects.create(
addon=addon,
user=self.user,
rating=4,
version=addon.current_version,
body='This is my rëview. Like ît?',
)
result = self.serialize()
assert 'flags' in result
assert result['flags'] == []
RatingFlag.objects.create(
rating=self.rating,
user=self.request.user,
flag=RatingFlag.OTHER,
note='foo',
)
result = self.serialize()
assert 'flags' in result
assert result['flags'] == [{'flag': RatingFlag.OTHER, 'note': 'foo'}]
| bsd-3-clause |
liuzhaoguo/FreeROI-1 | froi/gui/base/labelconfig.py | 2 | 4992 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import collections
from PyQt4.QtGui import *
class LabelConfig(object):
"""Mainly to config the labels."""
defalut_color = QColor(255, 0, 0)
def __init__(self, filepath, is_global=True):
"""Init the related label data."""
self.filepath = filepath
self.name = os.path.basename(filepath).split('.')[0]
self.label_index = collections.OrderedDict()
self.label_color = {}
self.label_list = []
self.load(filepath)
self._is_global = is_global
def load(self, filepath):
"""Load label data from the filepath."""
with open(filepath, 'r+') as f:
for line in f:
line = line.split()
if line:
self.label_index[line[1]] = int(line[0])
self.label_color[line[1]] = QColor(int(line[2]), int(line[3]), int(line[4]))
def dump(self):
"""Dump the label config info to the disk."""
if hasattr(self, 'filepath'):
with open(self.filepath, 'w') as f:
for label in self.label_index:
color = self.label_color[label]
f.write('%3d\t%-25s\t%3d %3d %3d\n' % (self.label_index[label], label, color.red(), color.green(), color.blue()))
def get_filepath(self):
"""Return the label filepath."""
return self.filepath
def new_index(self):
""" Create the new label index."""
if self.label_index:
return max(self.label_index.values()) + 1
else:
return 1
def default_color(self):
"""Return the default label color."""
return QColor(255, 0, 0)
def add_label(self, label, index=None, color=None):
"""Add a new label."""
if index is None:
index = self.new_index()
if color is None:
color = self.default_color()
if self.has_index(index):
raise ValueError, 'Index already exists, choose another one'
self.label_index[label] = index
self.label_color[label] = color
def remove_label(self, label):
"""Remove the given label."""
if self.has_label(label):
del self.label_index[label]
del self.label_color[label]
def edit_label(self, old_label, label, color):
"""Edit the given label."""
if self.has_label(old_label):
self.label_index[label] = self.get_label_index(old_label)
self.label_color[label] = color
if old_label != label:
del self.label_index[old_label]
def has_label(self, label):
"""Check if the current label config contains the given label."""
return label in self.label_index.keys()
def has_index(self, index):
"""Check if the current label config contains the given index."""
return index in self.label_index.values()
def get_label_list(self):
"""Return all labels in the current label config."""
return self.label_index.keys()
def get_index_list(self):
"""Return all indexes in the current label config."""
return sorted(self.label_index.values())
def get_label_index(self, label):
"""Return the index of the given label."""
if label:
if self.has_label(label):
return self.label_index[label]
def get_index_label(self, index):
"""Return the label of the given index."""
for label, ind in self.label_index.iteritems():
if ind == index:
return label
return ''
def get_label_color(self, label):
"""Return the color of the given label."""
if label:
if self.has_label(label):
return self.label_color[label]
def update_label_color(self, label, color):
"""Update the color of the given label to the given color."""
if self.has_label(label):
self.label_color[label] = color
def save(self):
"""Save the label config info to the disk."""
self.dump()
def get_colormap(self):
"""Return the colormap."""
rgb = lambda color: [color.red(), color.green(), color.blue()]
return dict([(self.label_index[label], rgb(self.label_color[label])) for
label in self.label_index.keys()])
def __str__(self):
"""Override the __str__ method to get all labels."""
return str(self.label_index.keys())
def get_name(self):
"""Get the name which the current label belongs to."""
return self.name
@property
def is_global(self):
"""Return whether the current label config is global."""
return self._is_global
def get_label_index_pair(self):
"""Return all labels and indexed in pairs."""
return list(self.label_index.iteritems())
| bsd-3-clause |
radiosilence/pip | tests/test_compat.py | 12 | 2103 | """
Tests for compatibility workarounds.
"""
import os
from tests.test_pip import (here, reset_env, run_pip, pyversion,
assert_all_changes)
def test_debian_egg_name_workaround():
"""
We can uninstall packages installed with the pyversion removed from the
egg-info metadata directory name.
Refs:
http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
https://bugs.launchpad.net/ubuntu/+source/distribute/+bug/725178
https://bitbucket.org/ianb/pip/issue/104/pip-uninstall-on-ubuntu-linux
"""
env = reset_env()
result = run_pip('install', 'INITools==0.2', expect_error=True)
egg_info = os.path.join(
env.site_packages, "INITools-0.2-py%s.egg-info" % pyversion)
# Debian only removes pyversion for global installs, not inside a venv
# so even if this test runs on a Debian/Ubuntu system with broken setuptools,
# since our test runs inside a venv we'll still have the normal .egg-info
assert egg_info in result.files_created, "Couldn't find %s" % egg_info
# The Debian no-pyversion version of the .egg-info
mangled = os.path.join(env.site_packages, "INITools-0.2.egg-info")
assert mangled not in result.files_created, "Found unexpected %s" % mangled
# Simulate a Debian install by copying the .egg-info to their name for it
full_egg_info = os.path.join(env.root_path, egg_info)
assert os.path.isdir(full_egg_info)
full_mangled = os.path.join(env.root_path, mangled)
os.renames(full_egg_info, full_mangled)
assert os.path.isdir(full_mangled)
# Try the uninstall and verify that everything is removed.
result2 = run_pip("uninstall", "INITools", "-y")
assert_all_changes(result, result2, [env.venv/'build', 'cache'])
def test_setup_py_with_dos_line_endings():
"""
It doesn't choke on a setup.py file that uses DOS line endings (\\r\\n).
Refs https://github.com/pypa/pip/issues/237
"""
reset_env()
to_install = os.path.abspath(os.path.join(here, 'packages', 'LineEndings'))
run_pip('install', to_install, expect_error=False)
| mit |
techdragon/django | django/utils/formats.py | 72 | 9188 | import datetime
import decimal
import unicodedata
from importlib import import_module
from django.conf import settings
from django.utils import dateformat, datetime_safe, numberformat, six
from django.utils.encoding import force_str
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.translation import (
check_for_language, get_language, to_locale,
)
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
'DATE_INPUT_FORMATS': ['%Y-%m-%d'],
'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'],
'DATETIME_INPUT_FORMATS': [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d'
],
}
FORMAT_SETTINGS = frozenset([
'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR',
'NUMBER_GROUPING',
'FIRST_DAY_OF_WEEK',
'MONTH_DAY_FORMAT',
'TIME_FORMAT',
'DATE_FORMAT',
'DATETIME_FORMAT',
'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT',
'YEAR_MONTH_FORMAT',
'DATE_INPUT_FORMATS',
'TIME_INPUT_FORMATS',
'DATETIME_INPUT_FORMATS',
])
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang, format_module_path=None):
"""
Does the heavy lifting of finding format modules.
"""
if not check_for_language(lang):
return
if format_module_path is None:
format_module_path = settings.FORMAT_MODULE_PATH
format_locations = []
if format_module_path:
if isinstance(format_module_path, six.string_types):
format_module_path = [format_module_path]
for path in format_module_path:
format_locations.append(path + '.%s')
format_locations.append('django.conf.locale.%s')
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('%s.formats' % (location % loc))
except ImportError:
pass
def get_format_modules(lang=None, reverse=False):
"""
Returns a list of the format modules found
"""
if lang is None:
lang = get_language()
if lang not in _format_modules_cache:
_format_modules_cache[lang] = list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))
modules = _format_modules_cache[lang]
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
format_type = force_str(format_type)
if use_l10n or (use_l10n is None and settings.USE_L10N):
if lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
cached = _format_cache[cache_key]
if cached is not None:
return cached
except KeyError:
for module in get_format_modules(lang):
try:
val = getattr(module, format_type)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
if isinstance(val, tuple):
val = list(val)
val.append(iso_input)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
if format_type not in FORMAT_SETTINGS:
return format_type
# Return the general setting by default
return getattr(settings, format_type)
get_format_lazy = lazy(get_format, six.text_type, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Formats a datetime.time object using a localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Formats a numeric value using localization settings
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping
)
def localize(value, use_l10n=None):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, six.string_types): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Make sure booleans don't get treated as numbers
return mark_safe(six.text_type(value))
elif isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, six.string_types): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Don't treat booleans as numbers.
return six.text_type(value)
elif isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = force_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = force_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N and isinstance(value, six.string_types):
parts = []
decimal_separator = get_format('DECIMAL_SEPARATOR')
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
thousand_sep = get_format('THOUSAND_SEPARATOR')
if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3:
# Special case where we suspect a dot meant decimal separator (see #22171)
pass
else:
for replacement in {
thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}:
value = value.replace(replacement, '')
parts.append(value)
value = '.'.join(reversed(parts))
return value
| bsd-3-clause |
patrickrolanddg/jaikuengine | bin/collect_profiling_data.py | 34 | 1860 | #!/usr/local/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import optparse
import urllib
import urllib2
parser = optparse.OptionParser()
parser.add_option('--db', action='store_const', const='db',
dest='profile_type',
help='profile the db calls')
parser.add_option('-o', '--out', action='store',
dest='output_file',
help='directory to put profiling data')
parser.add_option('-d', '--data', action='store', dest='data',
help='post data to include in the request')
parser.set_defaults(profile_type='db',
)
def fetch_profile(url, profile_type='db', data=None):
headers = {'X-Profile': profile_type}
req = urllib2.Request(url, data, headers)
resp = urllib2.urlopen(req)
return resp.read()
def main(options, args):
if not args:
raise Exception('need to specify a url')
url = args[0]
profile_type = getattr(options, 'profile_type', 'db')
data = getattr(options, 'data', None)
output_file = getattr(options, 'output_file', None)
rv = fetch_profile(url, profile_type, data)
if output_file:
f = open(output_file, 'w')
f.write(rv)
f.close()
else:
print rv
if __name__ == '__main__':
(options, args) = parser.parse_args()
main(options, args)
| apache-2.0 |
tinkerthaler/odoo | addons/project_timesheet/report/task_report.py | 336 | 4030 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields,osv
from openerp import tools
class report_timesheet_task_user(osv.osv):
_name = "report.timesheet.task.user"
_auto = False
_order = "name"
def get_hrs_timesheet(self, cr, uid, ids, name, args, context):
result = {}
for record in self.browse(cr, uid, ids, context):
last_date = datetime.strptime(record.name, '%Y-%m-%d') + relativedelta(months=1) - relativedelta(days=1)
obj = self.pool.get('hr_timesheet_sheet.sheet.day')
sheet_ids = obj.search(cr, uid, [('sheet_id.user_id','=',record.user_id.id),('name','>=',record.name),('name','<=',last_date.strftime('%Y-%m-%d'))])
data_days = obj.read(cr, uid, sheet_ids, ['name','sheet_id.user_id','total_attendance'])
total = 0.0
for day_attendance in data_days:
total += day_attendance['total_attendance']
result[record.id] = total
return result
_columns = {
'name': fields.char('Date'),
'year': fields.char('Year', size=4, required=False, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
'user_id': fields.many2one('res.users', 'User',readonly=True),
'timesheet_hrs': fields.function(get_hrs_timesheet, string="Timesheet Hours"),
'task_hrs' : fields.float('Task Hours'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_timesheet_task_user')
cr.execute(""" create or replace view report_timesheet_task_user as (
select
((r.id*12)+to_number(months.m_id,'999999'))::integer as id,
months.name as name,
r.id as user_id,
to_char(to_date(months.name, 'YYYY/MM/DD'),'YYYY') as year,
to_char(to_date(months.name, 'YYYY/MM/DD'),'MM') as month,
(select sum(hours) from project_task_work where user_id = r.id and date between to_date(months.name, 'YYYY/MM/DD') and (to_date(months.name, 'YYYY/MM/DD') + interval '1 month' -
interval '1 day') ) as task_hrs
from res_users r,
(select to_char(p.date,'YYYY-MM-01') as name,
to_char(p.date,'YYYYMM') as m_id
from project_task_work p
union
select to_char(h.name,'YYYY-MM-01') as name,
to_char(h.name,'YYYYMM') as m_id
from hr_timesheet_sheet_sheet_day h) as months
group by
r.id,months.m_id,months.name,
to_char(to_date(months.name, 'YYYY/MM/DD'),'YYYY') ,
to_char(to_date(months.name, 'YYYY/MM/DD'),'MM')
) """)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
scality/manila | manila/tests/api/v1/test_share_types_extra_specs.py | 2 | 17163 | # Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import strutils
import webob
from manila.api.v1 import share_types_extra_specs
from manila.common import constants
from manila import exception
from manila import policy
from manila import test
from manila.tests.api import fakes
from manila.tests import fake_notifier
import manila.wsgi
DRIVER_HANDLES_SHARE_SERVERS = (
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS)
def return_create_share_type_extra_specs(context, share_type_id, extra_specs):
return stub_share_type_extra_specs()
def return_share_type_extra_specs(context, share_type_id):
return stub_share_type_extra_specs()
def return_empty_share_type_extra_specs(context, share_type_id):
return {}
def delete_share_type_extra_specs(context, share_type_id, key):
pass
def delete_share_type_extra_specs_not_found(context, share_type_id, key):
raise exception.ShareTypeExtraSpecsNotFound("Not Found")
def stub_share_type_extra_specs():
specs = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
def share_type_get(context, id, inactive=False, expected_fields=None):
pass
def get_large_string():
return "s" * 256
def get_extra_specs_dict(extra_specs, include_required=True):
if not extra_specs:
extra_specs = {}
if include_required:
extra_specs[DRIVER_HANDLES_SHARE_SERVERS] = False
return {'extra_specs': extra_specs}
@ddt.ddt
class ShareTypesExtraSpecsTest(test.TestCase):
def setUp(self):
super(ShareTypesExtraSpecsTest, self).setUp()
self.flags(host='fake')
self.mock_object(manila.db, 'share_type_get', share_type_get)
self.api_path = '/v2/fake/os-share-types/1/extra_specs'
self.controller = (
share_types_extra_specs.ShareTypeExtraSpecsController())
self.resource_name = self.controller.resource_name
self.mock_policy_check = self.mock_object(policy, 'check_policy')
"""to reset notifier drivers left over from other api/contrib tests"""
self.addCleanup(fake_notifier.reset)
def test_index(self):
self.mock_object(manila.db, 'share_type_extra_specs_get',
return_share_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
res_dict = self.controller.index(req, 1)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'index')
def test_index_no_data(self):
self.mock_object(manila.db, 'share_type_extra_specs_get',
return_empty_share_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'index')
def test_show(self):
self.mock_object(manila.db, 'share_type_extra_specs_get',
return_share_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path + '/key5')
req_context = req.environ['manila.context']
res_dict = self.controller.show(req, 1, 'key5')
self.assertEqual('value5', res_dict['key5'])
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'show')
def test_show_spec_not_found(self):
self.mock_object(manila.db, 'share_type_extra_specs_get',
return_empty_share_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path + '/key6')
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key6')
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'show')
def test_delete(self):
self.mock_object(manila.db, 'share_type_extra_specs_delete',
delete_share_type_extra_specs)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(self.api_path + '/key5')
req_context = req.environ['manila.context']
self.controller.delete(req, 1, 'key5')
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'delete')
def test_delete_not_found(self):
self.mock_object(manila.db, 'share_type_extra_specs_delete',
delete_share_type_extra_specs_not_found)
req = fakes.HTTPRequest.blank(self.api_path + '/key6')
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key6')
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'delete')
def test_delete_forbidden(self):
req = fakes.HTTPRequest.blank(
self.api_path + '/' + DRIVER_HANDLES_SHARE_SERVERS)
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, 1, DRIVER_HANDLES_SHARE_SERVERS)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'delete')
@ddt.data(
get_extra_specs_dict({}),
{'foo': 'bar'},
{DRIVER_HANDLES_SHARE_SERVERS + 'foo': True},
{'foo' + DRIVER_HANDLES_SHARE_SERVERS: False},
*[{DRIVER_HANDLES_SHARE_SERVERS: v}
for v in strutils.TRUE_STRINGS + strutils.FALSE_STRINGS]
)
def test_create(self, data):
body = {'extra_specs': data}
self.mock_object(
manila.db, 'share_type_extra_specs_update_or_create',
mock.Mock(return_value=return_create_share_type_extra_specs))
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
for k, v in data.items():
self.assertIn(k, res_dict['extra_specs'])
self.assertEqual(v, res_dict['extra_specs'][k])
manila.db.share_type_extra_specs_update_or_create.\
assert_called_once_with(
req.environ['manila.context'], 1, body['extra_specs'])
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'create')
def test_create_with_too_small_key(self):
self.mock_object(
manila.db, 'share_type_extra_specs_update_or_create',
mock.Mock(return_value=return_create_share_type_extra_specs))
too_small_key = ""
body = {"extra_specs": {too_small_key: "value"}}
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, 1, body)
self.assertFalse(
manila.db.share_type_extra_specs_update_or_create.called)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'create')
def test_create_with_too_big_key(self):
self.mock_object(
manila.db, 'share_type_extra_specs_update_or_create',
mock.Mock(return_value=return_create_share_type_extra_specs))
too_big_key = "k" * 256
body = {"extra_specs": {too_big_key: "value"}}
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, 1, body)
self.assertFalse(
manila.db.share_type_extra_specs_update_or_create.called)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'create')
def test_create_with_too_small_value(self):
self.mock_object(
manila.db, 'share_type_extra_specs_update_or_create',
mock.Mock(return_value=return_create_share_type_extra_specs))
too_small_value = ""
body = {"extra_specs": {"key": too_small_value}}
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, 1, body)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'create')
self.assertFalse(
manila.db.share_type_extra_specs_update_or_create.called)
def test_create_with_too_big_value(self):
self.mock_object(
manila.db, 'share_type_extra_specs_update_or_create',
mock.Mock(return_value=return_create_share_type_extra_specs))
too_big_value = "v" * 256
body = {"extra_specs": {"key": too_big_value}}
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, 1, body)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'create')
self.assertFalse(
manila.db.share_type_extra_specs_update_or_create.called)
def test_create_key_allowed_chars(self):
mock_return_value = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
self.mock_object(
manila.db, 'share_type_extra_specs_update_or_create',
mock.Mock(return_value=mock_return_value))
body = get_extra_specs_dict({"other_alphanum.-_:": "value1"})
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
self.assertEqual(mock_return_value['key1'],
res_dict['extra_specs']['other_alphanum.-_:'])
manila.db.share_type_extra_specs_update_or_create.\
assert_called_once_with(
req.environ['manila.context'], 1, body['extra_specs'])
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'create')
def test_create_too_many_keys_allowed_chars(self):
mock_return_value = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
self.mock_object(
manila.db, 'share_type_extra_specs_update_or_create',
mock.Mock(return_value=mock_return_value))
body = get_extra_specs_dict({
"other_alphanum.-_:": "value1",
"other2_alphanum.-_:": "value2",
"other3_alphanum.-_:": "value3"
})
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(self.api_path)
req_context = req.environ['manila.context']
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
self.assertEqual(mock_return_value['key1'],
res_dict['extra_specs']['other_alphanum.-_:'])
self.assertEqual(mock_return_value['key2'],
res_dict['extra_specs']['other2_alphanum.-_:'])
self.assertEqual(mock_return_value['key3'],
res_dict['extra_specs']['other3_alphanum.-_:'])
manila.db.share_type_extra_specs_update_or_create.\
assert_called_once_with(req_context, 1, body['extra_specs'])
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'create')
def test_update_item(self):
self.mock_object(
manila.db, 'share_type_extra_specs_update_or_create',
mock.Mock(return_value=return_create_share_type_extra_specs))
body = {DRIVER_HANDLES_SHARE_SERVERS: True}
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
req = fakes.HTTPRequest.blank(
self.api_path + '/' + DRIVER_HANDLES_SHARE_SERVERS)
req_context = req.environ['manila.context']
res_dict = self.controller.update(
req, 1, DRIVER_HANDLES_SHARE_SERVERS, body)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
self.assertTrue(res_dict[DRIVER_HANDLES_SHARE_SERVERS])
manila.db.share_type_extra_specs_update_or_create.\
assert_called_once_with(req_context, 1, body)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'update')
def test_update_item_too_many_keys(self):
self.mock_object(manila.db, 'share_type_extra_specs_update_or_create')
body = {"key1": "value1", "key2": "value2"}
req = fakes.HTTPRequest.blank(self.api_path + '/key1')
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body)
self.assertFalse(
manila.db.share_type_extra_specs_update_or_create.called)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'update')
def test_update_item_body_uri_mismatch(self):
self.mock_object(manila.db, 'share_type_extra_specs_update_or_create')
body = {"key1": "value1"}
req = fakes.HTTPRequest.blank(self.api_path + '/bad')
req_context = req.environ['manila.context']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body)
self.assertFalse(
manila.db.share_type_extra_specs_update_or_create.called)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'update')
@ddt.data(None, {}, {"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: ""}})
def test_update_invalid_body(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs')
req_context = req.environ['manila.context']
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '1', body)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'update')
@ddt.data(
None, {}, {'foo': {'a': 'b'}}, {'extra_specs': 'string'},
{"extra_specs": {"ke/y1": "value1"}},
{"key1": "value1", "ke/y2": "value2", "key3": "value3"},
{"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: ""}},
{"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: "111"}},
{"extra_specs": {"": "value"}},
{"extra_specs": {"t": get_large_string()}},
{"extra_specs": {get_large_string(): get_large_string()}},
{"extra_specs": {get_large_string(): "v"}},
{"extra_specs": {"k": ""}})
def test_create_invalid_body(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs')
req_context = req.environ['manila.context']
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, '1', body)
self.mock_policy_check.assert_called_once_with(
req_context, self.resource_name, 'create')
| apache-2.0 |
ultrabox/Ultra.Stream | servers/zinwa.py | 43 | 1641 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para zinwa
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[zinwa.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
mediaurl = scrapertools.get_match(data,'file\: "([^"]+)"')
extension = scrapertools.get_filename_from_url(mediaurl)[-4:]
video_urls.append( [ extension + " [zinwa]",mediaurl ] )
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://zinwa.com/frap5b3uhesl
patronvideos = '(zinwa.com/[a-z0-9]+)'
logger.info("[zinwa.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[zinwa]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'zinwa' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://zinwa.com/frap5b3uhesl")
return len(video_urls)>0 | gpl-2.0 |
Eaglemania/ASS | pyglet/graphics/vertexattribute.py | 42 | 17435 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Access byte arrays as arrays of vertex attributes.
Use `create_attribute` to create an attribute accessor given a simple format
string. Alternatively, the classes may be constructed directly.
Attribute format strings
========================
An attribute format string specifies the format of a vertex attribute. Format
strings are accepted by the `create_attribute` function as well as most
methods in the `pyglet.graphics` module.
Format strings have the following (BNF) syntax::
attribute ::= ( name | index 'g' 'n'? | texture 't' ) count type
``name`` describes the vertex attribute, and is one of the following
constants for the predefined attributes:
``c``
Vertex color
``e``
Edge flag
``f``
Fog coordinate
``n``
Normal vector
``s``
Secondary color
``t``
Texture coordinate
``v``
Vertex coordinate
You can alternatively create a generic indexed vertex attribute by
specifying its index in decimal followed by the constant ``g``. For
example, ``0g`` specifies the generic vertex attribute with index 0.
If the optional constant ``n`` is present after the ``g``, the
attribute is normalised to the range ``[0, 1]`` or ``[-1, 1]`` within
the range of the data type.
Texture coordinates for multiple texture units can be specified with the
texture number before the constant 't'. For example, ``1t`` gives the
texture coordinate attribute for texture unit 1.
``count`` gives the number of data components in the attribute. For
example, a 3D vertex position has a count of 3. Some attributes
constrain the possible counts that can be used; for example, a normal
vector must have a count of 3.
``type`` gives the data type of each component of the attribute. The
following types can be used:
``b``
``GLbyte``
``B``
``GLubyte``
``s``
``GLshort``
``S``
``GLushort``
``i``
``GLint``
``I``
``GLuint``
``f``
``GLfloat``
``d``
``GLdouble``
Some attributes constrain the possible data types; for example,
normal vectors must use one of the signed data types. The use of
some data types, while not illegal, may have severe performance
concerns. For example, the use of ``GLdouble`` is discouraged,
and colours should be specified with ``GLubyte``.
Whitespace is prohibited within the format string.
Some examples follow:
``v3f``
3-float vertex position
``c4b``
4-byte colour
``1eb``
Edge flag
``0g3f``
3-float generic vertex attribute 0
``1gn1i``
Integer generic vertex attribute 1, normalized to [-1, 1]
``2gn4B``
4-byte generic vertex attribute 2, normalized to [0, 1] (because
the type is unsigned)
``3t2f``
2-float texture coordinate for texture unit 3.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import re
from pyglet.gl import *
from pyglet.graphics import vertexbuffer
_c_types = {
GL_BYTE: ctypes.c_byte,
GL_UNSIGNED_BYTE: ctypes.c_ubyte,
GL_SHORT: ctypes.c_short,
GL_UNSIGNED_SHORT: ctypes.c_ushort,
GL_INT: ctypes.c_int,
GL_UNSIGNED_INT: ctypes.c_uint,
GL_FLOAT: ctypes.c_float,
GL_DOUBLE: ctypes.c_double,
}
_gl_types = {
'b': GL_BYTE,
'B': GL_UNSIGNED_BYTE,
's': GL_SHORT,
'S': GL_UNSIGNED_SHORT,
'i': GL_INT,
'I': GL_UNSIGNED_INT,
'f': GL_FLOAT,
'd': GL_DOUBLE,
}
_attribute_format_re = re.compile(r'''
(?P<name>
[cefnstv] |
(?P<generic_index>[0-9]+) g (?P<generic_normalized>n?) |
(?P<texcoord_texture>[0-9]+) t)
(?P<count>[1234])
(?P<type>[bBsSiIfd])
''', re.VERBOSE)
_attribute_cache = {}
def _align(v, align):
return ((v - 1) & ~(align - 1)) + align
def interleave_attributes(attributes):
'''Interleave attribute offsets.
Adjusts the offsets and strides of the given attributes so that
they are interleaved. Alignment constraints are respected.
:Parameters:
`attributes` : sequence of `AbstractAttribute`
Attributes to interleave in-place.
'''
stride = 0
max_size = 0
for attribute in attributes:
stride = _align(stride, attribute.align)
attribute.offset = stride
stride += attribute.size
max_size = max(max_size, attribute.size)
stride = _align(stride, max_size)
for attribute in attributes:
attribute.stride = stride
def serialize_attributes(count, attributes):
'''Serialize attribute offsets.
Adjust the offsets of the given attributes so that they are
packed serially against each other for `count` vertices.
:Parameters:
`count` : int
Number of vertices.
`attributes` : sequence of `AbstractAttribute`
Attributes to serialize in-place.
'''
offset = 0
for attribute in attributes:
offset = _align(offset, attribute.align)
attribute.offset = offset
offset += count * attribute.stride
def create_attribute(format):
'''Create a vertex attribute description from a format string.
The initial stride and offset of the attribute will be 0.
:Parameters:
`format` : str
Attribute format string. See the module summary for details.
:rtype: `AbstractAttribute`
'''
try:
cls, args = _attribute_cache[format]
return cls(*args)
except KeyError:
pass
match = _attribute_format_re.match(format)
assert match, 'Invalid attribute format %r' % format
count = int(match.group('count'))
gl_type = _gl_types[match.group('type')]
generic_index = match.group('generic_index')
texcoord_texture = match.group('texcoord_texture')
if generic_index:
normalized = match.group('generic_normalized')
attr_class = GenericAttribute
args = int(generic_index), normalized, count, gl_type
elif texcoord_texture:
attr_class = MultiTexCoordAttribute
args = int(texcoord_texture), count, gl_type
else:
name = match.group('name')
attr_class = _attribute_classes[name]
if attr_class._fixed_count:
assert count == attr_class._fixed_count, \
'Attributes named "%s" must have count of %d' % (
name, attr_class._fixed_count)
args = (gl_type,)
else:
args = (count, gl_type)
_attribute_cache[format] = attr_class, args
return attr_class(*args)
class AbstractAttribute(object):
'''Abstract accessor for an attribute in a mapped buffer.
'''
_fixed_count = None
def __init__(self, count, gl_type):
'''Create the attribute accessor.
:Parameters:
`count` : int
Number of components in the attribute.
`gl_type` : int
OpenGL type enumerant; for example, ``GL_FLOAT``
'''
assert count in (1, 2, 3, 4), 'Component count out of range'
self.gl_type = gl_type
self.c_type = _c_types[gl_type]
self.count = count
self.align = ctypes.sizeof(self.c_type)
self.size = count * self.align
self.stride = self.size
self.offset = 0
def enable(self):
'''Enable the attribute using ``glEnableClientState``.'''
raise NotImplementedError('abstract')
def set_pointer(self, offset):
'''Setup this attribute to point to the currently bound buffer at
the given offset.
``offset`` should be based on the currently bound buffer's ``ptr``
member.
:Parameters:
`offset` : int
Pointer offset to the currently bound buffer for this
attribute.
'''
raise NotImplementedError('abstract')
def get_region(self, buffer, start, count):
'''Map a buffer region using this attribute as an accessor.
The returned region can be modified as if the buffer was a contiguous
array of this attribute (though it may actually be interleaved or
otherwise non-contiguous).
The returned region consists of a contiguous array of component
data elements. For example, if this attribute uses 3 floats per
vertex, and the `count` parameter is 4, the number of floats mapped
will be ``3 * 4 = 12``.
:Parameters:
`buffer` : `AbstractMappable`
The buffer to map.
`start` : int
Offset of the first vertex to map.
`count` : int
Number of vertices to map
:rtype: `AbstractBufferRegion`
'''
byte_start = self.stride * start
byte_size = self.stride * count
array_count = self.count * count
if self.stride == self.size or not array_count:
# non-interleaved
ptr_type = ctypes.POINTER(self.c_type * array_count)
return buffer.get_region(byte_start, byte_size, ptr_type)
else:
# interleaved
byte_start += self.offset
byte_size -= self.offset
elem_stride = self.stride // ctypes.sizeof(self.c_type)
elem_offset = self.offset // ctypes.sizeof(self.c_type)
ptr_type = ctypes.POINTER(
self.c_type * (count * elem_stride - elem_offset))
region = buffer.get_region(byte_start, byte_size, ptr_type)
return vertexbuffer.IndirectArrayRegion(
region, array_count, self.count, elem_stride)
def set_region(self, buffer, start, count, data):
'''Set the data over a region of the buffer.
:Parameters:
`buffer` : AbstractMappable`
The buffer to modify.
`start` : int
Offset of the first vertex to set.
`count` : int
Number of vertices to set.
`data` : sequence
Sequence of data components.
'''
if self.stride == self.size:
# non-interleaved
byte_start = self.stride * start
byte_size = self.stride * count
array_count = self.count * count
data = (self.c_type * array_count)(*data)
buffer.set_data_region(data, byte_start, byte_size)
else:
# interleaved
region = self.get_region(buffer, start, count)
region[:] = data
class ColorAttribute(AbstractAttribute):
'''Color vertex attribute.'''
plural = 'colors'
def __init__(self, count, gl_type):
assert count in (3, 4), 'Color attributes must have count of 3 or 4'
super(ColorAttribute, self).__init__(count, gl_type)
def enable(self):
glEnableClientState(GL_COLOR_ARRAY)
def set_pointer(self, pointer):
glColorPointer(self.count, self.gl_type, self.stride,
self.offset + pointer)
class EdgeFlagAttribute(AbstractAttribute):
'''Edge flag attribute.'''
plural = 'edge_flags'
_fixed_count = 1
def __init__(self, gl_type):
assert gl_type in (GL_BYTE, GL_UNSIGNED_BYTE, GL_BOOL), \
'Edge flag attribute must have boolean type'
super(EdgeFlagAttribute, self).__init__(1, gl_type)
def enable(self):
glEnableClientState(GL_EDGE_FLAG_ARRAY)
def set_pointer(self, pointer):
glEdgeFlagPointer(self.stride, self.offset + pointer)
class FogCoordAttribute(AbstractAttribute):
'''Fog coordinate attribute.'''
plural = 'fog_coords'
def __init__(self, count, gl_type):
super(FogCoordAttribute, self).__init__(count, gl_type)
def enable(self):
glEnableClientState(GL_FOG_COORD_ARRAY)
def set_pointer(self, pointer):
glFogCoordPointer(self.count, self.gl_type, self.stride,
self.offset + pointer)
class NormalAttribute(AbstractAttribute):
'''Normal vector attribute.'''
plural = 'normals'
_fixed_count = 3
def __init__(self, gl_type):
assert gl_type in (GL_BYTE, GL_SHORT, GL_INT, GL_FLOAT, GL_DOUBLE), \
'Normal attribute must have signed type'
super(NormalAttribute, self).__init__(3, gl_type)
def enable(self):
glEnableClientState(GL_NORMAL_ARRAY)
def set_pointer(self, pointer):
glNormalPointer(self.gl_type, self.stride, self.offset + pointer)
class SecondaryColorAttribute(AbstractAttribute):
'''Secondary color attribute.'''
plural = 'secondary_colors'
_fixed_count = 3
def __init__(self, gl_type):
super(SecondaryColorAttribute, self).__init__(3, gl_type)
def enable(self):
glEnableClientState(GL_SECONDARY_COLOR_ARRAY)
def set_pointer(self, pointer):
glSecondaryColorPointer(3, self.gl_type, self.stride,
self.offset + pointer)
class TexCoordAttribute(AbstractAttribute):
'''Texture coordinate attribute.'''
plural = 'tex_coords'
def __init__(self, count, gl_type):
assert gl_type in (GL_SHORT, GL_INT, GL_INT, GL_FLOAT, GL_DOUBLE), \
'Texture coord attribute must have non-byte signed type'
super(TexCoordAttribute, self).__init__(count, gl_type)
def enable(self):
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
def set_pointer(self, pointer):
glTexCoordPointer(self.count, self.gl_type, self.stride,
self.offset + pointer)
def convert_to_multi_tex_coord_attribute(self):
'''Changes the class of the attribute to `MultiTexCoordAttribute`.
'''
self.__class__ = MultiTexCoordAttribute
self.texture = 0
class MultiTexCoordAttribute(AbstractAttribute):
'''Texture coordinate attribute.'''
def __init__(self, texture, count, gl_type):
assert gl_type in (GL_SHORT, GL_INT, GL_INT, GL_FLOAT, GL_DOUBLE), \
'Texture coord attribute must have non-byte signed type'
self.texture = texture
super(MultiTexCoordAttribute, self).__init__(count, gl_type)
def enable(self):
glClientActiveTexture(GL_TEXTURE0 + self.texture)
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
def set_pointer(self, pointer):
glTexCoordPointer(self.count, self.gl_type, self.stride,
self.offset + pointer)
class VertexAttribute(AbstractAttribute):
'''Vertex coordinate attribute.'''
plural = 'vertices'
def __init__(self, count, gl_type):
assert count > 1, \
'Vertex attribute must have count of 2, 3 or 4'
assert gl_type in (GL_SHORT, GL_INT, GL_INT, GL_FLOAT, GL_DOUBLE), \
'Vertex attribute must have signed type larger than byte'
super(VertexAttribute, self).__init__(count, gl_type)
def enable(self):
glEnableClientState(GL_VERTEX_ARRAY)
def set_pointer(self, pointer):
glVertexPointer(self.count, self.gl_type, self.stride,
self.offset + pointer)
class GenericAttribute(AbstractAttribute):
'''Generic vertex attribute, used by shader programs.'''
def __init__(self, index, normalized, count, gl_type):
self.normalized = bool(normalized)
self.index = index
super(GenericAttribute, self).__init__(count, gl_type)
def enable(self):
glEnableVertexAttribArray(self.index)
def set_pointer(self, pointer):
glVertexAttribPointer(self.index, self.count, self.gl_type,
self.normalized, self.stride,
self.offset + pointer)
_attribute_classes = {
'c': ColorAttribute,
'e': EdgeFlagAttribute,
'f': FogCoordAttribute,
'n': NormalAttribute,
's': SecondaryColorAttribute,
't': TexCoordAttribute,
'v': VertexAttribute,
}
| gpl-2.0 |
prakxys/flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/werkzeug/wsgi.py | 146 | 37745 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
| apache-2.0 |
alexthered/kienhoc-platform | common/djangoapps/student/migrations/0026_auto__remove_index_student_testcenterregistration_accommodation_request.py | 183 | 14980 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.db.utils import DatabaseError
class Migration(SchemaMigration):
"""
Remove an unwanted index from environments that have it.
This is a one-way migration in that backwards is a no-op and will not undo the removal.
This migration is only relevant to dev environments that existed before a migration rewrite
which removed the creation of this index.
"""
def forwards(self, orm):
try:
# Removing index on 'TestCenterRegistration', fields ['accommodation_request']
db.delete_index('student_testcenterregistration', ['accommodation_request'])
except DatabaseError:
print "-- skipping delete_index of student_testcenterregistration.accommodation_request (index does not exist)"
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.testcenterregistration': {
'Meta': {'object_name': 'TestCenterRegistration'},
'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.testcenteruser': {
'Meta': {'object_name': 'TestCenterUser'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
westinedu/newertrends | django/contrib/admin/__init__.py | 246 | 1608 | # ACTION_CHECKBOX_NAME is unused, but should stay since its import from here
# has been referenced in documentation.
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.options import ModelAdmin, HORIZONTAL, VERTICAL
from django.contrib.admin.options import StackedInline, TabularInline
from django.contrib.admin.sites import AdminSite, site
def autodiscover():
"""
Auto-discover INSTALLED_APPS admin.py modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
"""
import copy
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's admin module.
try:
before_import_registry = copy.copy(site._registry)
import_module('%s.admin' % app)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
site._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'admin'):
raise
| bsd-3-clause |
vponomaryov/manila | manila_tempest_tests/tests/api/test_replication.py | 1 | 18604 | # Copyright 2015 Yogesh Kshirsagar
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import data_utils
import testtools
from testtools import testcase as tc
from manila_tempest_tests.common import constants
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
CONF = config.CONF
_MIN_SUPPORTED_MICROVERSION = '2.11'
SUMMARY_KEYS = ['share_id', 'id', 'replica_state', 'status']
DETAIL_KEYS = SUMMARY_KEYS + ['availability_zone', 'updated_at',
'share_network_id', 'created_at']
@testtools.skipUnless(CONF.share.run_replication_tests,
'Replication tests are disabled.')
@base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION)
class ReplicationTest(base.BaseSharesMixedTest):
@classmethod
def resource_setup(cls):
super(ReplicationTest, cls).resource_setup()
# Create share_type
name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX)
cls.admin_client = cls.admin_shares_v2_client
cls.replication_type = CONF.share.backend_replication_type
if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES:
raise share_exceptions.ShareReplicationTypeException(
replication_type=cls.replication_type
)
cls.zones = cls.get_availability_zones(client=cls.admin_client)
cls.share_zone = cls.zones[0]
cls.replica_zone = cls.zones[-1]
cls.extra_specs = cls.add_extra_specs_to_dict(
{"replication_type": cls.replication_type})
share_type = cls.create_share_type(
name,
extra_specs=cls.extra_specs,
client=cls.admin_client)
cls.share_type = share_type["share_type"]
# Create share with above share_type
cls.creation_data = {'kwargs': {
'share_type_id': cls.share_type['id'],
'availability_zone': cls.share_zone,
}}
# Data for creating shares in parallel
data = [cls.creation_data, cls.creation_data]
cls.shares = cls.create_shares(data)
cls.shares = [cls.shares_v2_client.get_share(s['id']) for s in
cls.shares]
cls.instance_id1 = cls._get_instance(cls.shares[0])
cls.instance_id2 = cls._get_instance(cls.shares[1])
@classmethod
def _get_instance(cls, share):
share_instances = cls.admin_client.get_instances_of_share(share["id"])
return share_instances[0]["id"]
def _verify_create_replica(self):
# Create the replica
share_replica = self.create_share_replica(self.shares[0]["id"],
self.replica_zone,
cleanup_in_class=False)
share_replicas = self.shares_v2_client.list_share_replicas(
share_id=self.shares[0]["id"])
# Ensure replica is created successfully.
replica_ids = [replica["id"] for replica in share_replicas]
self.assertIn(share_replica["id"], replica_ids)
return share_replica
def _verify_active_replica_count(self, share_id):
# List replicas
replica_list = self.shares_v2_client.list_share_replicas(
share_id=share_id)
# Check if there is only 1 'active' replica before promotion.
active_replicas = self._filter_replica_list(
replica_list, constants.REPLICATION_STATE_ACTIVE)
self.assertEqual(1, len(active_replicas))
def _filter_replica_list(self, replica_list, r_state):
# Iterate through replica list to filter based on replica_state
return [replica for replica in replica_list
if replica['replica_state'] == r_state]
def _verify_in_sync_replica_promotion(self, share, original_replica):
# Verify that 'in-sync' replica has been promoted successfully
# NOTE(Yogi1): Cleanup needs to be disabled for replica that is
# being promoted since it will become the 'primary'/'active' replica.
replica = self.create_share_replica(share["id"], self.replica_zone,
cleanup=False)
# Wait for replica state to update after creation
self.shares_v2_client.wait_for_share_replica_status(
replica['id'], constants.REPLICATION_STATE_IN_SYNC,
status_attr='replica_state')
# Promote the first in_sync replica to active state
promoted_replica = self.promote_share_replica(replica['id'])
# Delete the demoted replica so promoted replica can be cleaned
# during the cleanup of the share.
self.addCleanup(self.delete_share_replica, original_replica['id'])
self._verify_active_replica_count(share["id"])
# Verify the replica_state for promoted replica
promoted_replica = self.shares_v2_client.get_share_replica(
promoted_replica["id"])
self.assertEqual(constants.REPLICATION_STATE_ACTIVE,
promoted_replica["replica_state"])
def _check_skip_promotion_tests(self):
# Check if the replication type is right for replica promotion tests
if (self.replication_type
not in constants.REPLICATION_PROMOTION_CHOICES):
msg = "Option backend_replication_type should be one of (%s)!"
raise self.skipException(
msg % ','.join(constants.REPLICATION_PROMOTION_CHOICES))
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_add_delete_share_replica(self):
# Create the replica
share_replica = self._verify_create_replica()
# Delete the replica
self.delete_share_replica(share_replica["id"])
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_add_access_rule_create_replica_delete_rule(self):
# Add access rule to the share
access_type, access_to = self._get_access_rule_data_from_config()
rule = self.shares_v2_client.create_access_rule(
self.shares[0]["id"], access_type, access_to, 'ro')
self.shares_v2_client.wait_for_access_rule_status(
self.shares[0]["id"], rule["id"], constants.RULE_STATE_ACTIVE)
# Create the replica
self._verify_create_replica()
# Verify access_rules_status transitions to 'active' state.
self.shares_v2_client.wait_for_share_status(
self.shares[0]["id"], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
# Delete rule and wait for deletion
self.shares_v2_client.delete_access_rule(self.shares[0]["id"],
rule["id"])
self.shares_v2_client.wait_for_resource_deletion(
rule_id=rule["id"], share_id=self.shares[0]['id'])
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_create_replica_add_access_rule_delete_replica(self):
access_type, access_to = self._get_access_rule_data_from_config()
# Create the replica
share_replica = self._verify_create_replica()
# Add access rule
self.shares_v2_client.create_access_rule(
self.shares[0]["id"], access_type, access_to, 'ro')
self.shares_v2_client.wait_for_share_status(
self.shares[0]["id"], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
# Delete the replica
self.delete_share_replica(share_replica["id"])
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@testtools.skipUnless(CONF.share.run_multiple_share_replicas_tests,
'Multiple share replicas tests are disabled.')
def test_add_multiple_share_replicas(self):
rep_domain, pools = self.get_pools_for_replication_domain()
if len(pools) < 3:
msg = ("Replication domain %(domain)s has only %(count)s pools. "
"Need at least 3 pools to run this test." %
{"domain": rep_domain, "count": len(pools)})
raise self.skipException(msg)
# Add the replicas
share_replica1 = self.create_share_replica(self.shares[0]["id"],
self.replica_zone,
cleanup_in_class=False)
share_replica2 = self.create_share_replica(self.shares[0]["id"],
self.replica_zone,
cleanup_in_class=False)
self.shares_v2_client.get_share_replica(share_replica2['id'])
share_replicas = self.admin_client.list_share_replicas(
share_id=self.shares[0]["id"])
replica_host_set = {r['host'] for r in share_replicas}
# Assert that replicas are created on different pools.
msg = "More than one replica is created on the same pool."
self.assertEqual(3, len(replica_host_set), msg)
# Verify replicas are in the replica list
replica_ids = [replica["id"] for replica in share_replicas]
self.assertIn(share_replica1["id"], replica_ids)
self.assertIn(share_replica2["id"], replica_ids)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_promote_in_sync_share_replica(self):
# Test promote 'in_sync' share_replica to 'active' state
self._check_skip_promotion_tests()
share = self.create_shares([self.creation_data])[0]
original_replica = self.shares_v2_client.list_share_replicas(
share["id"])[0]
self._verify_in_sync_replica_promotion(share, original_replica)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_add_rule_promote_share_replica_verify_rule(self):
# Verify the access rule stays intact after share replica promotion
self._check_skip_promotion_tests()
share = self.create_shares([self.creation_data])[0]
# Add access rule
access_type, access_to = self._get_access_rule_data_from_config()
rule = self.shares_v2_client.create_access_rule(
share["id"], access_type, access_to, 'ro')
self.shares_v2_client.wait_for_access_rule_status(
share["id"], rule["id"], constants.RULE_STATE_ACTIVE)
original_replica = self.shares_v2_client.list_share_replicas(
share["id"])[0]
self._verify_in_sync_replica_promotion(share, original_replica)
# verify rule's values
rules_list = self.shares_v2_client.list_access_rules(share["id"])
self.assertEqual(1, len(rules_list))
self.assertEqual(access_type, rules_list[0]["access_type"])
self.assertEqual(access_to, rules_list[0]["access_to"])
self.assertEqual('ro', rules_list[0]["access_level"])
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_promote_and_promote_back(self):
# Test promote back and forth between 2 share replicas
self._check_skip_promotion_tests()
# Create a new share
share = self.create_shares([self.creation_data])[0]
# Discover the original replica
initial_replicas = self.shares_v2_client.list_share_replicas(
share_id=share['id'])
self.assertEqual(1, len(initial_replicas),
'%s replicas initially created for share %s' %
(len(initial_replicas), share['id']))
original_replica = initial_replicas[0]
# Create a new replica
new_replica = self.create_share_replica(share["id"],
self.replica_zone,
cleanup_in_class=False)
self.shares_v2_client.wait_for_share_replica_status(
new_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
status_attr='replica_state')
# Promote the new replica to active and verify the replica states
self.promote_share_replica(new_replica['id'])
self._verify_active_replica_count(share["id"])
self.shares_v2_client.wait_for_share_replica_status(
original_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
status_attr='replica_state')
# Promote the original replica back to active
self.promote_share_replica(original_replica['id'])
self._verify_active_replica_count(share["id"])
self.shares_v2_client.wait_for_share_replica_status(
new_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
status_attr='replica_state')
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_active_replication_state(self):
# Verify the replica_state of first instance is set to active.
replica = self.shares_v2_client.get_share_replica(self.instance_id1)
self.assertEqual(
constants.REPLICATION_STATE_ACTIVE, replica['replica_state'])
@testtools.skipUnless(CONF.share.run_replication_tests,
'Replication tests are disabled.')
@base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION)
class ReplicationActionsTest(base.BaseSharesMixedTest):
@classmethod
def resource_setup(cls):
super(ReplicationActionsTest, cls).resource_setup()
# Create share_type
name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX)
cls.admin_client = cls.admin_shares_v2_client
cls.replication_type = CONF.share.backend_replication_type
if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES:
raise share_exceptions.ShareReplicationTypeException(
replication_type=cls.replication_type
)
cls.zones = cls.get_availability_zones(client=cls.admin_client)
cls.share_zone = cls.zones[0]
cls.replica_zone = cls.zones[-1]
cls.extra_specs = cls.add_extra_specs_to_dict(
{"replication_type": cls.replication_type})
share_type = cls.create_share_type(
name,
extra_specs=cls.extra_specs,
client=cls.admin_client)
cls.share_type = share_type["share_type"]
# Create share with above share_type
cls.creation_data = {'kwargs': {
'share_type_id': cls.share_type['id'],
'availability_zone': cls.share_zone,
}}
# Data for creating shares in parallel
data = [cls.creation_data, cls.creation_data]
cls.shares = cls.create_shares(data)
cls.shares = [cls.shares_v2_client.get_share(s['id']) for s in
cls.shares]
cls.instance_id1 = cls._get_instance(cls.shares[0])
cls.instance_id2 = cls._get_instance(cls.shares[1])
# Create replicas to 2 shares
cls.replica1 = cls.create_share_replica(cls.shares[0]["id"],
cls.replica_zone,
cleanup_in_class=True)
cls.replica2 = cls.create_share_replica(cls.shares[1]["id"],
cls.replica_zone,
cleanup_in_class=True)
@classmethod
def _get_instance(cls, share):
share_instances = cls.admin_client.get_instances_of_share(share["id"])
return share_instances[0]["id"]
def _validate_replica_list(self, replica_list, detail=True):
# Verify keys
if detail:
keys = DETAIL_KEYS
else:
keys = SUMMARY_KEYS
for replica in replica_list:
self.assertEqual(sorted(keys), sorted(replica.keys()))
# Check for duplicates
replica_id_list = [sr["id"] for sr in replica_list
if sr["id"] == replica["id"]]
msg = "Replica %s appears %s times in replica list." % (
replica['id'], len(replica_id_list))
self.assertEqual(1, len(replica_id_list), msg)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_show_share_replica(self):
replica = self.shares_v2_client.get_share_replica(self.replica1["id"])
actual_keys = sorted(list(replica.keys()))
detail_keys = sorted(DETAIL_KEYS)
self.assertEqual(detail_keys, actual_keys,
'Share Replica %s has incorrect keys; '
'expected %s, got %s.' % (replica["id"],
detail_keys, actual_keys))
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_detail_list_share_replicas_for_share(self):
# List replicas for share
replica_list = self.shares_v2_client.list_share_replicas(
share_id=self.shares[0]["id"])
replica_ids_list = [rep['id'] for rep in replica_list]
self.assertIn(self.replica1['id'], replica_ids_list,
'Replica %s was not returned in the list of replicas: %s'
% (self.replica1['id'], replica_list))
# Verify keys
self._validate_replica_list(replica_list)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_detail_list_share_replicas_for_all_shares(self):
# List replicas for all available shares
replica_list = self.shares_v2_client.list_share_replicas()
replica_ids_list = [rep['id'] for rep in replica_list]
for replica in [self.replica1, self.replica2]:
self.assertIn(replica['id'], replica_ids_list,
'Replica %s was not returned in the list of '
'replicas: %s' % (replica['id'], replica_list))
# Verify keys
self._validate_replica_list(replica_list)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_summary_list_share_replicas_for_all_shares(self):
# List replicas
replica_list = self.shares_v2_client.list_share_replicas_summary()
# Verify keys
self._validate_replica_list(replica_list, detail=False)
| apache-2.0 |
dribnet/keras | keras/layers/embeddings.py | 67 | 4853 | from __future__ import absolute_import
import theano
import theano.tensor as T
from .. import activations, initializations, regularizers, constraints
from ..layers.core import Layer, MaskedLayer
from ..utils.theano_utils import sharedX
from ..constraints import unitnorm
class Embedding(Layer):
'''
Turn positive integers (indexes) into denses vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
@input_dim: size of vocabulary (highest input integer + 1)
@out_dim: size of dense representation
'''
def __init__(self, input_dim, output_dim, init='uniform',
W_regularizer=None, activity_regularizer=None, W_constraint=None,
mask_zero=False, weights=None):
super(Embedding, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.imatrix()
self.W = self.init((self.input_dim, self.output_dim))
self.mask_zero = mask_zero
self.params = [self.W]
self.W_constraint = constraints.get(W_constraint)
self.constraints = [self.W_constraint]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
if weights is not None:
self.set_weights(weights)
def get_output_mask(self, train=None):
X = self.get_input(train)
if not self.mask_zero:
return None
else:
return T.ones_like(X) * (1 - T.eq(X, 0))
def get_output(self, train=False):
X = self.get_input(train)
out = self.W[X]
return out
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None}
class WordContextProduct(Layer):
'''
This layer turns a pair of words (a pivot word + a context word,
ie. a word from the same context, or a random, out-of-context word),
indentified by their index in a vocabulary, into two dense reprensentations
(word representation and context representation).
Then it returns activation(dot(pivot_embedding, context_embedding)),
which can be trained to encode the probability
of finding the context word in the context of the pivot word
(or reciprocally depending on your training procedure).
The layer ingests integer tensors of shape:
(nb_samples, 2)
and outputs a float tensor of shape
(nb_samples, 1)
The 2nd dimension encodes (pivot, context).
input_dim is the size of the vocabulary.
For more context, see Mikolov et al.:
Efficient Estimation of Word reprensentations in Vector Space
http://arxiv.org/pdf/1301.3781v3.pdf
'''
def __init__(self, input_dim, proj_dim=128,
init='uniform', activation='sigmoid', weights=None):
super(WordContextProduct, self).__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input = T.imatrix()
# two different embeddings for pivot word and its context
# because p(w|c) != p(c|w)
self.W_w = self.init((input_dim, proj_dim))
self.W_c = self.init((input_dim, proj_dim))
self.params = [self.W_w, self.W_c]
if weights is not None:
self.set_weights(weights)
def get_output(self, train=False):
X = self.get_input(train)
w = self.W_w[X[:, 0]] # nb_samples, proj_dim
c = self.W_c[X[:, 1]] # nb_samples, proj_dim
dot = T.sum(w * c, axis=1)
dot = theano.tensor.reshape(dot, (X.shape[0], 1))
return self.activation(dot)
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"proj_dim": self.proj_dim,
"init": self.init.__name__,
"activation": self.activation.__name__}
| mit |
conejoninja/pelisalacarta | python/main-classic/channels/torrentestrenos.py | 6 | 22735 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para pordede
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__category__ = "A"
__type__ = "generic"
__title__ = "Torrentestrenos"
__channel__ = "torrentestrenos"
__language__ = "ES"
host = "http://www.torrentestrenos.com"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def login():
url = "http://www.torrentestrenos.com/index.php"
post = "login="+config.get_setting("torrentestrenosuser")+"&password="+config.get_setting("torrentestrenospassword")+"&Submit=ENTRAR"
data = scrapertools.cache_page(url,post=post)
def mainlist(item):
logger.info("pelisalacarta.torrentestrenos mainlist")
itemlist = []
if config.get_setting("torrentestrenosaccount")!="true":
itemlist.append( Item( channel=__channel__ , title="Habilita tu cuenta en la configuracin..." , action="openconfig" , url="" , folder=False ) )
else:
login()
itemlist.append( Item(channel=__channel__, title="Exterenos Cartelera" , action="peliculas" , url="http://www.torrentestrenos.com/ver_torrents_1-id_en_estrenos_de_cartelera.html", thumbnail="http://s6.postimg.org/7fpx8gnrl/tecartelerath.jpg", fanart="http://s6.postimg.org/c84bxn7td/tecartelera.jpg"))
itemlist.append( Item(channel=__channel__, title="PelisMicroHD" , action="peliculas" , url="http://www.torrentestrenos.com/ver_torrents_41-id_en_peliculas_microhd.html", thumbnail="http://s6.postimg.org/copjk2vkh/temhdthu.jpg", fanart="http://s6.postimg.org/e8zgw7tch/temhdfan.jpg"))
itemlist.append( Item(channel=__channel__, title="PelisBluray-rip" , action="peliculas" , url="http://www.torrentestrenos.com/ver_torrents_33-id_en_peliculas_bluray--rip.html", thumbnail="http://s6.postimg.org/wptvn19ap/teripthub.jpg", fanart="http://s6.postimg.org/j3t5uhro1/teripfan6.jpg"))
itemlist.append( Item(channel=__channel__, title="PelisDvd-Rip" , action="peliculas" , url="http://www.torrentestrenos.com/ver_torrents_9-id_en_peliculas_dvd--rip.html", thumbnail="http://s6.postimg.org/gcaeasmkx/tethubdvdrip.jpg", fanart="http://s6.postimg.org/m6pak4h8x/tefandvdrip.jpg"))
itemlist.append( Item(channel=__channel__, title="Series" , action="peliculas" , url="http://www.torrentestrenos.com/ver_torrents_10-id_en_series.html", thumbnail="http://s6.postimg.org/gl7gtt5xt/teserthub2.jpg", fanart="http://s6.postimg.org/oa4tcfzv5/teserfan.jpg"))
itemlist.append( Item(channel=__channel__, title="SeriesHD" , action="peliculas" , url="http://www.torrentestrenos.com/ver_torrents_38-id_en_series_hd.html", thumbnail="http://s6.postimg.org/bk02sfyhd/tesehdth.jpg", fanart="http://s6.postimg.org/ralnszb69/teserhdfan.jpg" ))
itemlist.append( Item(channel=__channel__, title="Documentales" , action="peliculas" , url="http://www.torrentestrenos.com/ver_torrents_23-id_en_documentales.html", thumbnail="http://s6.postimg.org/5t5b0z13l/tedocuthub.jpg", fanart="http://s6.postimg.org/aqivm332p/tedocufan.jpg" ))
itemlist.append( Item(channel=__channel__, action="search", title="Buscar...", url="", thumbnail="http://s6.postimg.org/42qvd88y9/tesearchfan.jpg", fanart="http://s6.postimg.org/x7p12vyvl/tesearchfan2.jpg"))
return itemlist
def openconfig(item):
if "xbmc" in config.get_platform() or "boxee" in config.get_platform():
config.open_settings( )
return []
def search(item,texto):
logger.info("pelisalacarta.torrentestrenos search")
texto = texto.replace(" ","+")
item.url = "http://www.torrentestrenos.com/main.php?q=%s" % (texto)
try:
return buscador(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
return itemlist
def buscador(item):
logger.info("pelisalacarta.torrentstrenos buscador")
itemlist = []
# Descarga la pgina
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
#<div class="torrent-container-2 clearfix"><img class="torrent-image" src="uploads/torrents/images/thumbnails2/4441_step--up--all--in----blurayrip.jpg" alt="Imagen de Presentación" /><div class="torrent-info"><h4><a href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Step Up All In MicroHD 1080p AC3 5.1-Castellano-AC3 5.1 Ingles Subs</a> </h4><p>19-12-2014</p><p>Subido por: <strong>TorrentEstrenos</strong> en <a href="/ver_torrents_41-id_en_peliculas_microhd.html" title="Peliculas MICROHD">Peliculas MICROHD</a><br />Descargas <strong><a href="#" style="cursor:default">46</a></strong></p><a class="btn-download" href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Descargar</a></div></div>
patron = '<div class="torrent-container-2 clearfix">.*?'
patron += 'src="([^"]+)".*? '
patron += 'href ="([^"]+)".*?'
patron += '>([^<]+)</a>.*?'
patron += '<p>([^<]+)</p>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)==0 :
itemlist.append( Item(channel=__channel__, title="[COLOR gold][B]No se encontraron coincidencias...[/B][/COLOR]", thumbnail ="http://s6.postimg.org/w7nc1wh8x/torrnoisethumb.png", fanart ="http://s6.postimg.org/jez81z5n5/torrnoisefan.jpg",folder=False) )
for scrapedthumbnail, scrapedurl, scrapedtitulo, scrapedcreatedate in matches:
scrapedtitulo = scrapedtitulo + "(Torrent:" + scrapedcreatedate + ")"
scrapedthumbnail = "http://www.torrentestrenos.com/" + scrapedthumbnail
scrapedurl = "http://www.torrentestrenos.com" + scrapedurl
itemlist.append( Item(channel=__channel__, title=scrapedtitulo, url=scrapedurl, action="findvideos", thumbnail=scrapedthumbnail, fanart="http://s6.postimg.org/44tc7dtg1/tefanartgeneral.jpg", fulltitle=scrapedtitulo, folder=True) )
return itemlist
def peliculas(item):
logger.info("pelisalacarta.torrentstrenos peliculas")
itemlist = []
# Descarga la pgina
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
#<div class="torrent-container-2 clearfix"><img class="torrent-image" src="uploads/torrents/images/thumbnails2/4441_step--up--all--in----blurayrip.jpg" alt="Imagen de Presentación" /><div class="torrent-info"><h4><a href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Step Up All In MicroHD 1080p AC3 5.1-Castellano-AC3 5.1 Ingles Subs</a> </h4><p>19-12-2014</p><p>Subido por: <strong>TorrentEstrenos</strong> en <a href="/ver_torrents_41-id_en_peliculas_microhd.html" title="Peliculas MICROHD">Peliculas MICROHD</a><br />Descargas <strong><a href="#" style="cursor:default">46</a></strong></p><a class="btn-download" href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Descargar</a></div></div>
patron = '<div class="torrent-container-2 clearfix">.*?'
patron += 'src="([^"]+)".*? '
patron += 'href ="([^"]+)".*?'
patron += '>([^<]+)</a>.*?'
patron += '<p>([^<]+)</p>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitulo, scrapedcreatedate in matches:
scrapedtitulo= scrapedtitulo.replace(scrapedtitulo,"[COLOR khaki]"+scrapedtitulo+"[/COLOR]")
scrapedcreatedate= scrapedcreatedate.replace(scrapedcreatedate,"[COLOR white]"+scrapedcreatedate+"[/COLOR]")
torrent_tag="[COLOR green]Torrent:[/COLOR]"
scrapedtitulo = scrapedtitulo + "(" +torrent_tag + scrapedcreatedate + ")"
scrapedthumbnail = "http://www.torrentestrenos.com/" + scrapedthumbnail
scrapedurl = "http://www.torrentestrenos.com" + scrapedurl
if "peliculas" in item.url or "cartelera" in item.url:
action = "fanart_pelis"
else:
action = "fanart_series"
itemlist.append( Item(channel=__channel__, title=scrapedtitulo, url=scrapedurl, action=action, thumbnail=scrapedthumbnail, fulltitle=scrapedtitulo, fanart="http://s6.postimg.org/44tc7dtg1/tefanartgeneral.jpg", folder=True) )
## Extrae el paginador ##
# a class="paginator-items" href="/ver_torrents_41-id_en_peliculas_microhd_pag_1.html" title="Pagina de torrent 1">1</a>
if "_pag_" in item.url:
current_page_number = int(scrapertools.get_match(item.url,'_pag_(\d+)'))
item.url = re.sub(r"_pag_\d+","_pag_%s",item.url)
else:
current_page_number = 1
item.url = item.url.replace(".html","_pag_%s.html")
next_page_number = current_page_number + 1
next_page = item.url % (next_page_number)
title= "[COLOR green]Pagina siguiente>>[/COLOR]"
if next_page.replace("http://www.torrentestrenos.com","") in data:
itemlist.append( Item(channel=__channel__, title=title, url=next_page, action="peliculas", thumbnail="http://s6.postimg.org/4hpbrb13l/texflecha2.png", fanart="http://s6.postimg.org/44tc7dtg1/tefanartgeneral.jpg", folder=True) )
return itemlist
def fanart_pelis(item):
logger.info("pelisalacarta.torrentestrenos fanart_pelis")
itemlist = []
url = item.url
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|DTS|TS|DVD|\(.*?\)|HDTV|HDRip|Rip|RIP|\(.*?\)|Duologia|Espa.*?ol|2015|2014| ","",data)
title= scrapertools.get_match(data,'<h4>([^<]+)</h4>')
title= re.sub(r"3D|SBS|-|","",title)
title=title.replace('Espaol','Espanol')
title=title.replace('MicroHD','')
title=title.replace('1080p','')
title=title.replace('720p','')
title=title.replace('Bluray','')
title=title.replace('BluRay','')
title=title.replace('Line','')
title=title.replace('LINE','')
title=title.replace('CamRip','')
title=title.replace('Camrip','')
title=title.replace('Web','')
title=title.replace('Screener','')
title=title.replace('AC3','')
title=title.replace('Ac3','')
title=title.replace('5.1Castellano','')
title=title.replace('5.1Thailandes','')
title=title.replace('5.1','')
title=title.replace('2.0','')
title=title.replace('Castellano','')
title=title.replace('Frances','')
title=title.replace('Ingles','')
title=title.replace('Latino','')
title=title.replace('Part 1','')
title=title.replace('Subs','')
title= title.replace(' ','%20')
url="http://api.themoviedb.org/3/search/movie?api_key=57983e31fb435df4df77afb854740ea9&query=" + title + "&language=es&include_adult=false"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
patron = '"page":1.*?"backdrop_path":"(.*?)".*?,"id"'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)==0:
item.extra=item.thumbnail
itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
else:
for fan in matches:
fanart="https://image.tmdb.org/t/p/original" + fan
item.extra= fanart
itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
title ="Info"
title = title.replace(title,"[COLOR skyblue][B]"+title+"[/B][/COLOR]")
itemlist.append( Item(channel=__channel__, action="info" , title=title , url=item.url, thumbnail=item.thumbnail, fanart=item.extra, folder=False ))
return itemlist
def fanart_series(item):
logger.info("pelisalacarta.torrentestrenos fanart_series")
itemlist = []
if "_s" in item.url:
url = item.url
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|DTS|TS|\(.*?\)|DVD|HDTV|HDRip|Rip|RIP|\(.*?\)|Duologia|Espa.*?ol|2015|2014| ","",data)
title= scrapertools.get_match(data,'<h4>(.*?) S')
title= re.sub(r"3D|SBS|-|","",title)
title=title.replace('Espaol','Espanol')
title=title.replace('MicroHD','')
title=title.replace('1080p','')
title=title.replace('720p','')
title=title.replace('Bluray','')
title=title.replace('BluRay','')
title=title.replace('Line','')
title=title.replace('LINE','')
title=title.replace('CamRip','')
title=title.replace('Camrip','')
title=title.replace('Web','')
title=title.replace('Screener','')
title=title.replace('AC3','')
title=title.replace('Ac3','')
title=title.replace('5.1Castellano','')
title=title.replace('5.1Thailandes','')
title=title.replace('5.1','')
title=title.replace('2.0','')
title=title.replace('Castellano','')
title=title.replace('Frances','')
title=title.replace('Ingles','')
title=title.replace('Latino','')
title=title.replace('Part 1','')
title=title.replace('Subs','')
title= title.replace(' ','%20')
url="http://thetvdb.com/api/GetSeries.php?seriesname=" + title + "&language=es"
if "Erase%20Una%20Vez" in title:
url ="http://thetvdb.com/api/GetSeries.php?seriesname=Erase%20una%20vez%20(2011)&language=es"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
patron = '<Data><Series><seriesid>([^<]+)</seriesid>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)==0:
itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
else:
for id in matches:
id_serie = id
url ="http://thetvdb.com/api/1D62F2F90030C444/series/"+id_serie+"/banners.xml"
if "Castle" in title:
url ="http://thetvdb.com/api/1D62F2F90030C444/series/83462/banners.xml"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
patron = '<Banners><Banner>.*?<VignettePath>(.*?)</VignettePath>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)==0:
itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
for fan in matches:
fanart="http://thetvdb.com/banners/" + fan
item.extra= fanart
itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
else:
url = item.url
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|DTS|TS|\(.*?\)|DVD|HDTV|HDRip|Rip|RIP|\(.*?\)|Episodio [0-9]|Espa.*?ol|2015|2014| ","",data)
title= scrapertools.get_match(data,'<h4>(.*?)</h4>')
title= re.sub(r"3D|SBS|-|","",title)
title=title.replace('Espaol','Espanol')
title=title.replace('MicroHD','')
title=title.replace('1080p','')
title=title.replace('720p','')
title=title.replace('Bluray','')
title=title.replace('BluRay','')
title=title.replace('Line','')
title=title.replace('LINE','')
title=title.replace('CamRip','')
title=title.replace('Camrip','')
title=title.replace('Web','')
title=title.replace('Screener','')
title=title.replace('AC3','')
title=title.replace('Ac3','')
title=title.replace('5.1Castellano','')
title=title.replace('5.1Thailandes','')
title=title.replace('5.1','')
title=title.replace('2.0','')
title=title.replace('Castellano','')
title=title.replace('Frances','')
title=title.replace('Ingles','')
title=title.replace('Latino','')
title=title.replace('Part 1','')
title=title.replace('Subs','')
title= title.replace(' ','%20')
url="http://thetvdb.com/api/GetSeries.php?seriesname=" + title + "&language=es"
if "Erase%20Una%20Vez" in title:
url ="http://thetvdb.com/api/GetSeries.php?seriesname=Erase%20una%20vez%20(2011)&language=es"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
patron = '<Data><Series><seriesid>([^<]+)</seriesid>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)==0:
itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
else:
for id in matches:
id_serie = id
url ="http://thetvdb.com/api/1D62F2F90030C444/series/"+id_serie+"/banners.xml"
if "Castle" in title:
url ="http://thetvdb.com/api/1D62F2F90030C444/series/83462/banners.xml"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
patron = '<Banners><Banner>.*?<VignettePath>(.*?)</VignettePath>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)==0:
itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
for fan in matches:
fanart="http://thetvdb.com/banners/" + fan
item.extra= fanart
itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
title ="Info"
title = title.replace(title,"[COLOR skyblue][B]"+title+"[/B][/COLOR]")
itemlist.append( Item(channel=__channel__, action="info" , title=title , url=item.url, thumbnail=item.thumbnail, fanart=item.extra, folder=False ))
return itemlist
def findvideos(item):
logger.info("pelisalacarta.torrentestrenos findvideos")
itemlist = []
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
patron = '<img class="torrent-image.*? '
patron+= 'src="([^"]+)".*?'
patron+= '<h4>([^<]+)</h4>.*?'
patron+= '</p><p>([^<]+)</p><p>.*?'
patron+= 'href =".*?l=([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitulo, scrapedplot, scrapedurl in matches:
title_tag="[COLOR green]Ver--[/COLOR]"
scrapedtitulo= scrapedtitulo.replace(scrapedtitulo,"[COLOR white]"+scrapedtitulo+"[/COLOR]")
scrapedtitulo= title_tag + scrapedtitulo
scrapedthumbnail = "http://www.torrentestrenos.com/" + scrapedthumbnail
scrapedplot = scrapedplot.replace("á","a")
scrapedplot = scrapedplot.replace("í","i")
scrapedplot = scrapedplot.replace("é","e")
scrapedplot = scrapedplot.replace("ó","o")
scrapedplot = scrapedplot.replace("ú","u")
scrapedplot = scrapedplot.replace("ñ","")
scrapedplot = scrapedplot.replace("Á","A")
scrapedplot = scrapedplot.replace("Í","I")
scrapedplot = scrapedplot.replace("É","E")
scrapedplot = scrapedplot.replace("Ó","O")
scrapedplot = scrapedplot.replace("Ú","U")
scrapedplot = scrapedplot.replace("Ñ","")
itemlist.append( Item(channel=__channel__, title =scrapedtitulo , url=scrapedurl, action="play", server="torrent", thumbnail=scrapedthumbnail, fanart=item.fanart, plot=scrapedplot, folder=False) )
return itemlist
def info(item):
logger.info("pelisalacarta.torrentestrenos info")
url=item.url
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
title= scrapertools.get_match(data,'<h4>(.*?)</h4>')
title = title.replace(title,"[COLOR aqua][B]"+title+"[/B][/COLOR]")
scrapedplot = scrapertools.get_match(data,'</p><p>([^<]+)</p><p>')
scrapedplot = scrapedplot.replace(scrapedplot,"[COLOR white]"+scrapedplot+"[/COLOR]")
plot_tag="[COLOR green][B]Sinopsis[/B][/COLOR]" + "[CR]"
scrapedplot= plot_tag + scrapedplot
scrapedplot = scrapedplot.replace("á","a")
scrapedplot = scrapedplot.replace("í","i")
scrapedplot = scrapedplot.replace("é","e")
scrapedplot = scrapedplot.replace("ó","o")
scrapedplot = scrapedplot.replace("ú","u")
scrapedplot = scrapedplot.replace("ñ","")
scrapedplot = scrapedplot.replace("Á","A")
scrapedplot = scrapedplot.replace("Í","I")
scrapedplot = scrapedplot.replace("É","E")
scrapedplot = scrapedplot.replace("Ó","O")
scrapedplot = scrapedplot.replace("Ú","U")
scrapedplot = scrapedplot.replace("Ñ","")
fanart="http://s11.postimg.org/qu66qpjz7/zentorrentsfanart.jpg"
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
tbd.ask(title, scrapedplot,fanart)
del tbd
return
try:
import xbmc, xbmcgui
class TextBox( xbmcgui.WindowXMLDialog ):
""" Create a skinned textbox window """
def __init__( self, *args, **kwargs):
pass
def onInit( self ):
try:
self.getControl( 5 ).setText( self.text )
self.getControl( 1 ).setLabel( self.title )
except: pass
def onClick( self, controlId ):
pass
def onFocus( self, controlId ):
pass
def onAction( self, action ):
self.close()
def ask(self, title, text, image ):
self.title = title
self.text = text
self.doModal()
except:
pass
| gpl-3.0 |
lunyang/pylearn2 | pylearn2/datasets/filetensor.py | 45 | 8836 | """
Read and write the matrix file format described at
http://www.cs.nyu.edu/~ylclab/data/norb-v1.0/index.html
The format is for dense tensors:
- magic number indicating type and endianness - 4bytes
- rank of tensor - int32
- dimensions - int32, int32, int32, ...
- <data>
The number of dimensions and rank is slightly tricky:
- for scalar: rank=0, dimensions = [1, 1, 1]
- for vector: rank=1, dimensions = [?, 1, 1]
- for matrix: rank=2, dimensions = [?, ?, 1]
For rank >= 3, the number of dimensions matches the rank exactly.
"""
import bz2
import gzip
import logging
import numpy
from pylearn2.utils.exc import reraise_as
logger = logging.getLogger(__name__)
def _prod(lst):
"""
.. todo::
WRITEME
"""
p = 1
for l in lst:
p *= l
return p
_magic_dtype = {
0x1E3D4C51: ('float32', 4),
# 0x1E3D4C52 : ('packed matrix', 0), #what is a packed matrix?
0x1E3D4C53: ('float64', 8),
0x1E3D4C54: ('int32', 4),
0x1E3D4C55: ('uint8', 1),
0x1E3D4C56: ('int16', 2),
}
_dtype_magic = {
'float32': 0x1E3D4C51,
# 'packed matrix': 0x1E3D4C52,
'float64': 0x1E3D4C53,
'int32': 0x1E3D4C54,
'uint8': 0x1E3D4C55,
'int16': 0x1E3D4C56
}
def _read_int32(f):
"""unpack a 4-byte integer from the current position in file f"""
s = f.read(4)
s_array = numpy.fromstring(s, dtype='int32')
return s_array.item()
def read_header(f, debug=False, fromgzip=None):
"""
Parameters
----------
f : file or gzip.GzipFile
An open file handle.
fromgzip : bool or None
If None determine the type of file handle.
Returns
-------
data type, element size, rank, shape, size
"""
if fromgzip is None:
fromgzip = isinstance(f, (gzip.GzipFile, bz2.BZ2File))
# what is the data type of this matrix?
# magic_s = f.read(4)
# magic = numpy.fromstring(magic_s, dtype='int32')
magic = _read_int32(f)
magic_t, elsize = _magic_dtype[magic]
if debug:
logger.debug('header magic {0} {1} {2}'.format(magic, magic_t, elsize))
if magic_t == 'packed matrix':
raise NotImplementedError('packed matrix not supported')
# what is the rank of the tensor?
ndim = _read_int32(f)
if debug:
logger.debug('header ndim {0}'.format(ndim))
# what are the dimensions of the tensor?
if fromgzip:
d = f.read(max(ndim, 3) * 4)
dim = numpy.fromstring(d, dtype='int32')[:ndim]
else:
dim = numpy.fromfile(f, dtype='int32', count=max(ndim, 3))[:ndim]
dim_size = _prod(dim)
if debug:
logger.debug('header dim {0} {1}'.format(dim, dim_size))
return magic_t, elsize, ndim, dim, dim_size
class arraylike(object):
"""
Provide an array-like interface to the filetensor in f.
The rank parameter to __init__ controls how this object interprets the
underlying tensor.
Its behaviour should be clear from the following example.
Suppose the underlying tensor is MxNxK.
- If rank is 0, self[i] will be a scalar and len(self) == M*N*K.
- If rank is 1, self[i] is a vector of length K, and len(self) == M*N.
- If rank is 3, self[i] is a tensor of shape (M, N, K), and len(self) == 1.
- If rank is 5, self[i] is a tensor of shape (1, 1, M, N, K), and
len(self) == 1.
Note: Objects of this class generally require exclusive use of the
underlying file handle, because they call seek() every time you
access an element.
"""
f = None
"""File-like object"""
magic_t = None
"""numpy data type of array"""
elsize = None
"""number of bytes per scalar element"""
ndim = None
"""Rank of underlying tensor"""
dim = None
"""tuple of array dimensions (aka shape)"""
dim_size = None
"""number of scalars in the tensor (prod of dim)"""
f_start = None
"""The file position of the first element of the tensor"""
readshape = None
"""tuple of array dimensions of the block that we read"""
readsize = None
"""
number of elements we must read for each block
"""
def __init__(self, f, rank=0, debug=False):
"""
.. todo::
WRITEME
"""
self.f = f
(self.magic_t,
self.elsize,
self.ndim,
self.dim,
self.dim_size) = read_header(f, debug)
self.f_start = f.tell()
if rank <= self.ndim:
self.readshape = tuple(self.dim[self.ndim - rank:])
else:
self.readshape = tuple(self.dim)
# self.readshape = tuple(self.dim[self.ndim-rank:]) if rank <=
# self.ndim else tuple(self.dim)
if rank <= self.ndim:
padding = tuple()
else:
padding = (1,) * (rank - self.ndim)
# padding = tuple() if rank <= self.ndim else (1,) * (rank - self.ndim)
self.returnshape = padding + self.readshape
self.readsize = _prod(self.readshape)
if debug:
logger.debug('READ PARAM {0} {1}'.format(self.readshape,
self.returnshape,
self.readsize))
def __len__(self):
"""
.. todo::
WRITEME
"""
return _prod(self.dim[:self.ndim - len(self.readshape)])
def __getitem__(self, idx):
"""
.. todo::
WRITEME
"""
if idx >= len(self):
raise IndexError(idx)
self.f.seek(self.f_start + idx * self.elsize * self.readsize)
return numpy.fromfile(self.f,
dtype=self.magic_t,
count=self.readsize).reshape(self.returnshape)
#
# TODO: implement item selection:
# e.g. load('some mat', subtensor=(:6, 2:5))
#
# This function should be memory efficient by:
# - allocating an output matrix at the beginning
# - seeking through the file, reading subtensors from multiple places
def read(f, subtensor=None, debug=False):
"""
Load all or part of file tensorfile 'f' into a numpy ndarray
Parameters
----------
f : file, gzip.Gzip or bz2.BZ2File like object
Open file descriptor to read data from
subtensor : None or a slice argument accepted __getitem__
If subtensor is not None, it should be like the argument to
numpy.ndarray.__getitem__. The following two expressions should return
equivalent ndarray objects, but the one on the left may be faster and
more memory efficient if the underlying file f is big.
.. code-block:: none
read(f, subtensor) <===> read(f)[*subtensor]
Support for subtensors is currently spotty, so check the code to see if
your particular type of subtensor is supported.
Returns
-------
y : ndarray
Data read from disk
"""
magic_t, elsize, ndim, dim, dim_size = read_header(f, debug)
f_start = f.tell()
rval = None
if isinstance(f, (gzip.GzipFile, bz2.BZ2File)):
assert subtensor is None, \
"Haven't implemented the subtensor case for gzip file"
d = f.read(_prod(dim) * elsize)
rval = numpy.fromstring(d, dtype=magic_t).reshape(dim)
del d
elif subtensor is None:
rval = numpy.fromfile(f, dtype=magic_t, count=_prod(dim)).reshape(dim)
elif isinstance(subtensor, slice):
if subtensor.step not in (None, 1):
raise NotImplementedError('slice with step', subtensor.step)
if subtensor.start not in (None, 0):
bytes_per_row = _prod(dim[1:]) * elsize
f.seek(f_start + subtensor.start * bytes_per_row)
dim[0] = min(dim[0], subtensor.stop) - subtensor.start
rval = numpy.fromfile(f, dtype=magic_t, count=_prod(dim)).reshape(dim)
else:
raise NotImplementedError('subtensor access not written yet: ',
subtensor)
return rval
def write(f, mat):
""" Write a ndarray to tensorfile.
Parameters
----------
f : file
Open file to write into
mat : ndarray
Array to save
"""
def _write_int32(f, i):
i_array = numpy.asarray(i, dtype='int32')
if 0:
logger.debug('writing int32 {0} {1}'.format(i, i_array))
i_array.tofile(f)
try:
_write_int32(f, _dtype_magic[str(mat.dtype)])
except KeyError:
reraise_as(TypeError('Invalid ndarray dtype for filetensor format',
mat.dtype))
_write_int32(f, len(mat.shape))
shape = mat.shape
if len(shape) < 3:
shape = list(shape) + [1] * (3 - len(shape))
if 0:
logger.debug('writing shape = {0}'.format(shape))
for sh in shape:
_write_int32(f, sh)
mat.tofile(f)
| bsd-3-clause |
EdLogan18/logan-repository | plugin.video.SportsDevil/lib/utils/datetimeUtils.py | 4 | 7096 | # -*- coding: utf-8 -*-
import time, datetime
import re
import sys, traceback
#######################################
# Time and Date Helpers
#######################################
def convDateUtil(timestring, newfrmt='default', in_zone='UTC'):
import xbmc
try: import json
except ImportError: import simplejson as json
from dateutil.parser import parse
from dateutil.tz import gettz
try:
locale_timezone = json.loads(xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Settings.GetSettingValue", "params": {"setting": "locale.timezone"}, "id": 1}'))
local_tzinfo = gettz(locale_timezone['result']['value'])
if local_tzinfo == None:
raise ValueError()
except ValueError:
from dateutil.tz import tzlocal
local_tzinfo = tzlocal()
if newfrmt == 'default':
newfrmt = xbmc.getRegion('time').replace(':%S','')
try:
in_time = parse(timestring)
in_time_with_timezone = in_time.replace(tzinfo=gettz(in_zone))
local_time = in_time_with_timezone.astimezone(local_tzinfo)
return local_time.strftime(newfrmt)
except ValueError:
traceback.print_exc(file = sys.stdout)
return ''
def timediff(mytime, unit='seconds'):
dtNow = datetime.datetime.utcnow()
datePart = mytime.split(' ')[0]
dpArr = datePart.split('/')
timePart = mytime.split(' ')[1]
tpArr = timePart.split(':')
d = datetime.date(int(dpArr[0]), int(dpArr[1]), int(dpArr[2]))
t = datetime.time(int(tpArr[0]), int(tpArr[1]))
dt = datetime.datetime.combine(d,t)
diff = dtNow - dt
if unit == 'seconds':
return str(diff.seconds)
elif unit == 'minutes':
return str(diff.seconds/60)
elif unit == 'sapo':
#Math.floor(new Date().getTime()/1000)-Math.floor(new Date().getTime()/1000)-time
#return str(1304805500 + diff.seconds*75)
return time.time()
else:
return '0'
def convTimestamp(timestamp, newfrmt = '', offsetStr = ''):
date = datetime.datetime.fromtimestamp(int(timestamp))
if offsetStr:
date = datetimeoffset(date, offsetStr)
if newfrmt == '':
if date.year != 1900:
newfrmt = "%y/%m/%d"
else:
newfrmt = "%m/%d"
return date.strftime(newfrmt)
def convDate(language, datestr, frmt, newfrmt = '', offsetStr = ''):
''''
locale.setlocale(locale.LC_ALL, '')
try:
c = time.strptime(str(datestr).rstrip(),str(smart_unicode(frmt)).rstrip())
except:
xbmc.output('conversion failed')
return datestr
if c.tm_year != 1900:
return time.strftime("%y/%m/%d",c)
else:
return time.strftime("%m/%d",c)
'''
try:
datestr = datestr.encode('utf-8')
except:
datestr = datestr
monthsEN = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September':9,
'October': 10,
'November': 11,
'December': 12
}
monthsDE = {
'Januar': 1,
'Februar': 2,
u'März': 3,
'Maerz': 3,
'April': 4,
'Mai': 5,
'Juni': 6,
'Juli': 7,
'August': 8,
'September':9,
'Oktober': 10,
'November': 11,
'Dezember': 12
}
datesyms = {
#DAY
'%d':'\d{1,2}',
'%a':'\w{3}',
'%A':'[A-Za-z]{3,}',
#MONTH
'%m':'\d{2}',
'%b':'\w{3}',
'%B':'\w{3,}',
#YEAR
'%y':'\d{2}',
'%Y':'\d{4}',
#HOUR
'%H':'\d{2}',
'%I':'\d{1,2}',
#AM/PM
'%p':'\w{2}',
'%P':'\w{2}',
#MINUTE/SECOND
'%M':'\d{2}',
'%S':'\d{2}'
}
patFrmt = '(%\w)'
idxFrmt = re.findall(patFrmt,frmt, re.DOTALL + re.IGNORECASE)
try:
for item in idxFrmt:
if datesyms.has_key(item):
frmt = frmt.replace(item,'(' + datesyms[item] + ')')
p = re.compile(frmt, re.DOTALL + re.IGNORECASE)
try:
datestr = datestr.replace('ä','ae') # ä
except:
datestr = datestr.replace(u'ä','ae') # ä
try:
datestr = datestr.replace('\xe4','ae')
except:
pass
m = p.match(datestr)
if not m:
return datestr
second = 0
minute = 0
hour = 0
dayhalf = ''
day = 1
month = 1
year = 1900
for item in m.groups(0):
if not (idxFrmt[list(m.groups(0)).index(item)] is None):
sym = idxFrmt[list(m.groups(0)).index(item)]
if sym == '%B':
if monthsDE.has_key(item.capitalize()):
month = monthsDE[item.capitalize()]
continue
if monthsEN.has_key(item.capitalize()):
month = monthsEN[item.capitalize()]
continue
elif sym == '%m':
month = int(item)
elif sym == '%d':
day = int(item)
elif sym == '%y' or sym == '%Y':
year = int(item)
elif sym in ['%H','%I']:
hour = int(item)
elif sym == '%M':
minute = int(item)
elif sym == '%S':
second = int(item)
elif sym == '%P':
dayhalf = str(item)
if dayhalf != '' and dayhalf.lower() == 'pm' and hour < 12:
hour = hour + 12
if dayhalf != '' and dayhalf.lower() == 'am' and hour == 12:
hour = 0
date = datetime.datetime(year, month, day, hour, minute, second)
if offsetStr:
date = datetimeoffset(date, offsetStr)
if newfrmt == '':
if date.year != 1900:
newfrmt = "%y/%m/%d"
else:
newfrmt = "%m/%d"
return date.strftime(newfrmt)
except:
traceback.print_exc(file = sys.stdout)
return datestr
def datetimeoffset(date, offsetStr):
fak = 1
if offsetStr[0] == '-':
fak = -1
offsetStr = offsetStr[1:]
offsethours = int(offsetStr.split(':')[0])
offsetminutes = int(offsetStr.split(':')[1])
pageOffSeconds = fak*(offsethours * 3600 + offsetminutes *60)
localOffSeconds = -1 * time.timezone
offSeconds = localOffSeconds - pageOffSeconds
offset=date + datetime.timedelta(seconds=offSeconds)
return offset
def getUnixTimestamp():
return str(int(time.time()))
def utcToGmt(date):
return date - datetime.timedelta(seconds = time.timezone)
def strToDatetime(dateStr, dformat):
try:
result = datetime.datetime.strptime(dateStr, dformat)
except TypeError:
result = datetime.datetime(*(time.strptime(dateStr, dformat)[0:6]))
return result
| gpl-2.0 |
EmreAtes/spack | lib/spack/external/py/_path/common.py | 24 | 14399 | """
"""
import os, sys, posixpath
import fnmatch
import py
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
try:
from os import fspath
except ImportError:
def fspath(path):
"""
Return the string representation of the path.
If str or bytes is passed in, it is returned unchanged.
This code comes from PEP 519, modified to support earlier versions of
python.
This is required for python < 3.6.
"""
if isinstance(path, (py.builtin.text, py.builtin.bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
return path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
try:
import pathlib
except ImportError:
pass
else:
if isinstance(path, pathlib.PurePath):
return py.builtin.text(path)
raise TypeError("expected str, bytes or os.PathLike object, not "
+ path_type.__name__)
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(fspath(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if sys.version_info < (3, ):
mode = 'rU'
else: # python 3 deprecates mode "U" in favor of "newline" option
mode = 'r'
if not cr:
content = self.read(mode)
return content.split('\n')
else:
f = self.open(mode)
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = self.strpath
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
def __fspath__(self):
return self.strpath
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, py.builtin._basestring):
fil = FNMatcher(fil)
if isinstance(rec, py.builtin._basestring):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (pattern.find(path.sep) == -1 and
iswin32 and
pattern.find(posixpath.sep) != -1):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return fnmatch.fnmatch(name, pattern)
| lgpl-2.1 |
doebi/winkekatze | winkekatze_mqtt.py | 2 | 5308 | #!/usr/bin/python3
###
# Copyright 2015, Aurel Wildfellner.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import time
import serial
import argparse
import mosquitto
import topicconfig
from topictypes import TopicTypes
class WinkekatzeConnection:
def __init__(self):
self._serial = None
self.isConnected = False
def connect(self, device, baudrate):
if self.isConnected:
return True
try:
self._serial = serial.Serial(device, baudrate)
except:
return False
# update open and locked info
self.isConnected = self._serial.isOpen()
return self.isConnected
def wink(self):
print("Wink")
self._sendCommand("WINK;")
def wink3(self):
print("Wink 3")
self._sendCommand("WINK3;")
def reset(self):
print("Reset")
self._send_Command("RESET;")
def _sendCommand(self, cmd):
""" Sends a command string to the arduino. """
if self._serial and self._serial.isOpen():
try:
self._serial.write(bytearray(cmd, "ASCII"))
print("printed to serial:", cmd)
except:
self.isConnected = False;
else:
self.isConnected = False;
class MQTT_TOPICS:
winkekatze = "pusteblume/winkekatze"
winkekatze = "devlol/winkekatze"
hi5 = "devlol/h19/mainroom/craftui/button/buttonHi5"
def on_message(client, winkekatze, msg):
payload = msg.payload.decode("utf-8")
print(payload)
""" Callback for mqtt message."""
for ctopic in topicconfig.cat_topics:
if ctopic['topic'] == msg.topic:
if ctopic['type'] == TopicTypes.WINKEKATZE:
if payload == "WINK":
winkekatze.wink()
elif payload == "WINK3":
winkekatze.wink3()
elif payload == "RESET":
winkekatze.reset()
elif "CMD:" in payload: # send a raw command to the cat
cmd = payload[4:].strip() + ";"
print(cmd)
winkekatze._sendCommand(cmd)
elif ctopic['type'] == TopicTypes.TRIGGER_ON_PAYLOAD:
if ctopic['payload'] == payload:
winkekatze._sendCommand(ctopic['command'])
elif ctopic['type'] == TopicTypes.TRIGGER_ON_POST:
winkekatze._sendCommand(ctopic['command'])
def on_disconnect(client, userdata, foo):
connected = False
while not connected:
try:
client.reconnect()
connected = True
# resubscribe to the topics
for ctopic in topicconfig.cat_topics:
client.subscribe(ctopic['topic'])
except:
print("Failed to reconnect...")
time.sleep(1)
def tryToConnectArduino(winkekatze, devname="ttyACM", num=10):
devices = ["/dev/" + devname + str(x) for x in range(num)]
for device in devices:
winkekatze.connect(device, 9600)
if winkekatze.isConnected:
break
def main():
## Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="192.168.7.2")
parser.add_argument("-d")
args = parser.parse_args()
devicePath = args.d
gotDevicePath = (devicePath != None)
brokerHost = args.host
## setup MQTT client
client = mosquitto.Mosquitto()
client.on_message = on_message
client.on_disconnect = on_disconnect
try:
client.connect(brokerHost)
except:
print("failed to connect")
on_disconnect(client, None, None)
## subscribe to topics
for ctopic in topicconfig.cat_topics:
client.subscribe(ctopic['topic'])
## winkekatze
winkeKatze = WinkekatzeConnection()
print("Connecting to Arduino.... ", end="")
if (gotDevicePath):
winkeKatze.connect(devicePath, 9600)
else:
tryToConnectArduino(winkeKatze)
if winkeKatze.isConnected:
print("done.")
else:
print("ERROR!")
client.user_data_set(winkeKatze)
while True:
if not winkeKatze.isConnected:
print("Arduino not connected.")
if (gotDevicePath):
winkeKatze.connect(devicePath, 9600)
else:
tryToConnectArduino(winkeKatze, "ttyACM", 5)
if winkeKatze.isConnected:
print("Reconnected!")
else:
time.sleep(1)
client.loop()
if __name__ == "__main__":
main()
| gpl-3.0 |
geraldoandradee/mysql-5.6 | xtrabackup/test/python/testtools/tests/test_testresult.py | 42 | 49985 | # Copyright (c) 2008 testtools developers. See LICENSE for details.
"""Test TestResults and related things."""
__metaclass__ = type
import codecs
import datetime
import doctest
import os
import shutil
import sys
import tempfile
import threading
import warnings
from testtools import (
ExtendedToOriginalDecorator,
MultiTestResult,
TestCase,
TestResult,
TextTestResult,
ThreadsafeForwardingResult,
testresult,
)
from testtools.compat import (
_b,
_get_exception_encoding,
_r,
_u,
str_is_unicode,
StringIO,
)
from testtools.content import Content
from testtools.content_type import ContentType, UTF8_TEXT
from testtools.matchers import (
DocTestMatches,
MatchesException,
Raises,
)
from testtools.tests.helpers import (
LoggingResult,
Python26TestResult,
Python27TestResult,
ExtendedTestResult,
an_exc_info
)
from testtools.testresult.real import utc
class Python26Contract(object):
def test_fresh_result_is_successful(self):
# A result is considered successful before any tests are run.
result = self.makeResult()
self.assertTrue(result.wasSuccessful())
def test_addError_is_failure(self):
# addError fails the test run.
result = self.makeResult()
result.startTest(self)
result.addError(self, an_exc_info)
result.stopTest(self)
self.assertFalse(result.wasSuccessful())
def test_addFailure_is_failure(self):
# addFailure fails the test run.
result = self.makeResult()
result.startTest(self)
result.addFailure(self, an_exc_info)
result.stopTest(self)
self.assertFalse(result.wasSuccessful())
def test_addSuccess_is_success(self):
# addSuccess does not fail the test run.
result = self.makeResult()
result.startTest(self)
result.addSuccess(self)
result.stopTest(self)
self.assertTrue(result.wasSuccessful())
class Python27Contract(Python26Contract):
def test_addExpectedFailure(self):
# Calling addExpectedFailure(test, exc_info) completes ok.
result = self.makeResult()
result.startTest(self)
result.addExpectedFailure(self, an_exc_info)
def test_addExpectedFailure_is_success(self):
# addExpectedFailure does not fail the test run.
result = self.makeResult()
result.startTest(self)
result.addExpectedFailure(self, an_exc_info)
result.stopTest(self)
self.assertTrue(result.wasSuccessful())
def test_addSkipped(self):
# Calling addSkip(test, reason) completes ok.
result = self.makeResult()
result.startTest(self)
result.addSkip(self, _u("Skipped for some reason"))
def test_addSkip_is_success(self):
# addSkip does not fail the test run.
result = self.makeResult()
result.startTest(self)
result.addSkip(self, _u("Skipped for some reason"))
result.stopTest(self)
self.assertTrue(result.wasSuccessful())
def test_addUnexpectedSuccess(self):
# Calling addUnexpectedSuccess(test) completes ok.
result = self.makeResult()
result.startTest(self)
result.addUnexpectedSuccess(self)
def test_addUnexpectedSuccess_was_successful(self):
# addUnexpectedSuccess does not fail the test run in Python 2.7.
result = self.makeResult()
result.startTest(self)
result.addUnexpectedSuccess(self)
result.stopTest(self)
self.assertTrue(result.wasSuccessful())
def test_startStopTestRun(self):
# Calling startTestRun completes ok.
result = self.makeResult()
result.startTestRun()
result.stopTestRun()
class DetailsContract(Python27Contract):
"""Tests for the contract of TestResults."""
def test_addExpectedFailure_details(self):
# Calling addExpectedFailure(test, details=xxx) completes ok.
result = self.makeResult()
result.startTest(self)
result.addExpectedFailure(self, details={})
def test_addError_details(self):
# Calling addError(test, details=xxx) completes ok.
result = self.makeResult()
result.startTest(self)
result.addError(self, details={})
def test_addFailure_details(self):
# Calling addFailure(test, details=xxx) completes ok.
result = self.makeResult()
result.startTest(self)
result.addFailure(self, details={})
def test_addSkipped_details(self):
# Calling addSkip(test, reason) completes ok.
result = self.makeResult()
result.startTest(self)
result.addSkip(self, details={})
def test_addUnexpectedSuccess_details(self):
# Calling addUnexpectedSuccess(test) completes ok.
result = self.makeResult()
result.startTest(self)
result.addUnexpectedSuccess(self, details={})
def test_addSuccess_details(self):
# Calling addSuccess(test) completes ok.
result = self.makeResult()
result.startTest(self)
result.addSuccess(self, details={})
class FallbackContract(DetailsContract):
"""When we fallback we take our policy choice to map calls.
For instance, we map unexpectedSuccess to an error code, not to success.
"""
def test_addUnexpectedSuccess_was_successful(self):
# addUnexpectedSuccess fails test run in testtools.
result = self.makeResult()
result.startTest(self)
result.addUnexpectedSuccess(self)
result.stopTest(self)
self.assertFalse(result.wasSuccessful())
class StartTestRunContract(FallbackContract):
"""Defines the contract for testtools policy choices.
That is things which are not simply extensions to unittest but choices we
have made differently.
"""
def test_startTestRun_resets_unexpected_success(self):
result = self.makeResult()
result.startTest(self)
result.addUnexpectedSuccess(self)
result.stopTest(self)
result.startTestRun()
self.assertTrue(result.wasSuccessful())
def test_startTestRun_resets_failure(self):
result = self.makeResult()
result.startTest(self)
result.addFailure(self, an_exc_info)
result.stopTest(self)
result.startTestRun()
self.assertTrue(result.wasSuccessful())
def test_startTestRun_resets_errors(self):
result = self.makeResult()
result.startTest(self)
result.addError(self, an_exc_info)
result.stopTest(self)
result.startTestRun()
self.assertTrue(result.wasSuccessful())
class TestTestResultContract(TestCase, StartTestRunContract):
def makeResult(self):
return TestResult()
class TestMultiTestResultContract(TestCase, StartTestRunContract):
def makeResult(self):
return MultiTestResult(TestResult(), TestResult())
class TestTextTestResultContract(TestCase, StartTestRunContract):
def makeResult(self):
return TextTestResult(StringIO())
class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract):
def makeResult(self):
result_semaphore = threading.Semaphore(1)
target = TestResult()
return ThreadsafeForwardingResult(target, result_semaphore)
class TestExtendedTestResultContract(TestCase, StartTestRunContract):
def makeResult(self):
return ExtendedTestResult()
class TestPython26TestResultContract(TestCase, Python26Contract):
def makeResult(self):
return Python26TestResult()
class TestAdaptedPython26TestResultContract(TestCase, FallbackContract):
def makeResult(self):
return ExtendedToOriginalDecorator(Python26TestResult())
class TestPython27TestResultContract(TestCase, Python27Contract):
def makeResult(self):
return Python27TestResult()
class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
def makeResult(self):
return ExtendedToOriginalDecorator(Python27TestResult())
class TestTestResult(TestCase):
"""Tests for 'TestResult'."""
def makeResult(self):
"""Make an arbitrary result for testing."""
return TestResult()
def test_addSkipped(self):
# Calling addSkip on a TestResult records the test that was skipped in
# its skip_reasons dict.
result = self.makeResult()
result.addSkip(self, _u("Skipped for some reason"))
self.assertEqual({_u("Skipped for some reason"):[self]},
result.skip_reasons)
result.addSkip(self, _u("Skipped for some reason"))
self.assertEqual({_u("Skipped for some reason"):[self, self]},
result.skip_reasons)
result.addSkip(self, _u("Skipped for another reason"))
self.assertEqual({_u("Skipped for some reason"):[self, self],
_u("Skipped for another reason"):[self]},
result.skip_reasons)
def test_now_datetime_now(self):
result = self.makeResult()
olddatetime = testresult.real.datetime
def restore():
testresult.real.datetime = olddatetime
self.addCleanup(restore)
class Module:
pass
now = datetime.datetime.now(utc)
stubdatetime = Module()
stubdatetime.datetime = Module()
stubdatetime.datetime.now = lambda tz: now
testresult.real.datetime = stubdatetime
# Calling _now() looks up the time.
self.assertEqual(now, result._now())
then = now + datetime.timedelta(0, 1)
# Set an explicit datetime, which gets returned from then on.
result.time(then)
self.assertNotEqual(now, result._now())
self.assertEqual(then, result._now())
# go back to looking it up.
result.time(None)
self.assertEqual(now, result._now())
def test_now_datetime_time(self):
result = self.makeResult()
now = datetime.datetime.now(utc)
result.time(now)
self.assertEqual(now, result._now())
class TestWithFakeExceptions(TestCase):
def makeExceptionInfo(self, exceptionFactory, *args, **kwargs):
try:
raise exceptionFactory(*args, **kwargs)
except:
return sys.exc_info()
class TestMultiTestResult(TestWithFakeExceptions):
"""Tests for 'MultiTestResult'."""
def setUp(self):
TestWithFakeExceptions.setUp(self)
self.result1 = LoggingResult([])
self.result2 = LoggingResult([])
self.multiResult = MultiTestResult(self.result1, self.result2)
def assertResultLogsEqual(self, expectedEvents):
"""Assert that our test results have received the expected events."""
self.assertEqual(expectedEvents, self.result1._events)
self.assertEqual(expectedEvents, self.result2._events)
def test_empty(self):
# Initializing a `MultiTestResult` doesn't do anything to its
# `TestResult`s.
self.assertResultLogsEqual([])
def test_startTest(self):
# Calling `startTest` on a `MultiTestResult` calls `startTest` on all
# its `TestResult`s.
self.multiResult.startTest(self)
self.assertResultLogsEqual([('startTest', self)])
def test_stopTest(self):
# Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
# its `TestResult`s.
self.multiResult.stopTest(self)
self.assertResultLogsEqual([('stopTest', self)])
def test_addSkipped(self):
# Calling `addSkip` on a `MultiTestResult` calls addSkip on its
# results.
reason = _u("Skipped for some reason")
self.multiResult.addSkip(self, reason)
self.assertResultLogsEqual([('addSkip', self, reason)])
def test_addSuccess(self):
# Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
# all its `TestResult`s.
self.multiResult.addSuccess(self)
self.assertResultLogsEqual([('addSuccess', self)])
def test_done(self):
# Calling `done` on a `MultiTestResult` calls `done` on all its
# `TestResult`s.
self.multiResult.done()
self.assertResultLogsEqual([('done')])
def test_addFailure(self):
# Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
# all its `TestResult`s.
exc_info = self.makeExceptionInfo(AssertionError, 'failure')
self.multiResult.addFailure(self, exc_info)
self.assertResultLogsEqual([('addFailure', self, exc_info)])
def test_addError(self):
# Calling `addError` on a `MultiTestResult` calls `addError` on all
# its `TestResult`s.
exc_info = self.makeExceptionInfo(RuntimeError, 'error')
self.multiResult.addError(self, exc_info)
self.assertResultLogsEqual([('addError', self, exc_info)])
def test_startTestRun(self):
# Calling `startTestRun` on a `MultiTestResult` forwards to all its
# `TestResult`s.
self.multiResult.startTestRun()
self.assertResultLogsEqual([('startTestRun')])
def test_stopTestRun(self):
# Calling `stopTestRun` on a `MultiTestResult` forwards to all its
# `TestResult`s.
self.multiResult.stopTestRun()
self.assertResultLogsEqual([('stopTestRun')])
def test_stopTestRun_returns_results(self):
# `MultiTestResult.stopTestRun` returns a tuple of all of the return
# values the `stopTestRun`s that it forwards to.
class Result(LoggingResult):
def stopTestRun(self):
super(Result, self).stopTestRun()
return 'foo'
multi_result = MultiTestResult(Result([]), Result([]))
result = multi_result.stopTestRun()
self.assertEqual(('foo', 'foo'), result)
def test_time(self):
# the time call is dispatched, not eaten by the base class
self.multiResult.time('foo')
self.assertResultLogsEqual([('time', 'foo')])
class TestTextTestResult(TestCase):
"""Tests for 'TextTestResult'."""
def setUp(self):
super(TestTextTestResult, self).setUp()
self.result = TextTestResult(StringIO())
def make_erroring_test(self):
class Test(TestCase):
def error(self):
1/0
return Test("error")
def make_failing_test(self):
class Test(TestCase):
def failed(self):
self.fail("yo!")
return Test("failed")
def make_unexpectedly_successful_test(self):
class Test(TestCase):
def succeeded(self):
self.expectFailure("yo!", lambda: None)
return Test("succeeded")
def make_test(self):
class Test(TestCase):
def test(self):
pass
return Test("test")
def getvalue(self):
return self.result.stream.getvalue()
def test__init_sets_stream(self):
result = TextTestResult("fp")
self.assertEqual("fp", result.stream)
def reset_output(self):
self.result.stream = StringIO()
def test_startTestRun(self):
self.result.startTestRun()
self.assertEqual("Tests running...\n", self.getvalue())
def test_stopTestRun_count_many(self):
test = self.make_test()
self.result.startTestRun()
self.result.startTest(test)
self.result.stopTest(test)
self.result.startTest(test)
self.result.stopTest(test)
self.result.stream = StringIO()
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("Ran 2 tests in ...s\n...", doctest.ELLIPSIS))
def test_stopTestRun_count_single(self):
test = self.make_test()
self.result.startTestRun()
self.result.startTest(test)
self.result.stopTest(test)
self.reset_output()
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("Ran 1 test in ...s\n\nOK\n", doctest.ELLIPSIS))
def test_stopTestRun_count_zero(self):
self.result.startTestRun()
self.reset_output()
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("Ran 0 tests in ...s\n\nOK\n", doctest.ELLIPSIS))
def test_stopTestRun_current_time(self):
test = self.make_test()
now = datetime.datetime.now(utc)
self.result.time(now)
self.result.startTestRun()
self.result.startTest(test)
now = now + datetime.timedelta(0, 0, 0, 1)
self.result.time(now)
self.result.stopTest(test)
self.reset_output()
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
def test_stopTestRun_successful(self):
self.result.startTestRun()
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("...\n\nOK\n", doctest.ELLIPSIS))
def test_stopTestRun_not_successful_failure(self):
test = self.make_failing_test()
self.result.startTestRun()
test.run(self.result)
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
def test_stopTestRun_not_successful_error(self):
test = self.make_erroring_test()
self.result.startTestRun()
test.run(self.result)
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
def test_stopTestRun_not_successful_unexpected_success(self):
test = self.make_unexpectedly_successful_test()
self.result.startTestRun()
test.run(self.result)
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
def test_stopTestRun_shows_details(self):
self.result.startTestRun()
self.make_erroring_test().run(self.result)
self.make_unexpectedly_successful_test().run(self.result)
self.make_failing_test().run(self.result)
self.reset_output()
self.result.stopTestRun()
self.assertThat(self.getvalue(),
DocTestMatches("""...======================================================================
ERROR: testtools.tests.test_testresult.Test.error
----------------------------------------------------------------------
Text attachment: traceback
------------
Traceback (most recent call last):
File "...testtools...runtest.py", line ..., in _run_user...
return fn(*args, **kwargs)
File "...testtools...testcase.py", line ..., in _run_test_method
return self._get_test_method()()
File "...testtools...tests...test_testresult.py", line ..., in error
1/0
ZeroDivisionError:... divi... by zero...
------------
======================================================================
FAIL: testtools.tests.test_testresult.Test.failed
----------------------------------------------------------------------
Text attachment: traceback
------------
Traceback (most recent call last):
File "...testtools...runtest.py", line ..., in _run_user...
return fn(*args, **kwargs)
File "...testtools...testcase.py", line ..., in _run_test_method
return self._get_test_method()()
File "...testtools...tests...test_testresult.py", line ..., in failed
self.fail("yo!")
AssertionError: yo!
------------
======================================================================
UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded
----------------------------------------------------------------------
...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF))
class TestThreadSafeForwardingResult(TestWithFakeExceptions):
"""Tests for `TestThreadSafeForwardingResult`."""
def setUp(self):
TestWithFakeExceptions.setUp(self)
self.result_semaphore = threading.Semaphore(1)
self.target = LoggingResult([])
self.result1 = ThreadsafeForwardingResult(self.target,
self.result_semaphore)
def test_nonforwarding_methods(self):
# startTest and stopTest are not forwarded because they need to be
# batched.
self.result1.startTest(self)
self.result1.stopTest(self)
self.assertEqual([], self.target._events)
def test_startTestRun(self):
self.result1.startTestRun()
self.result2 = ThreadsafeForwardingResult(self.target,
self.result_semaphore)
self.result2.startTestRun()
self.assertEqual(["startTestRun", "startTestRun"], self.target._events)
def test_stopTestRun(self):
self.result1.stopTestRun()
self.result2 = ThreadsafeForwardingResult(self.target,
self.result_semaphore)
self.result2.stopTestRun()
self.assertEqual(["stopTestRun", "stopTestRun"], self.target._events)
def test_forwarding_methods(self):
# error, failure, skip and success are forwarded in batches.
exc_info1 = self.makeExceptionInfo(RuntimeError, 'error')
starttime1 = datetime.datetime.utcfromtimestamp(1.489)
endtime1 = datetime.datetime.utcfromtimestamp(51.476)
self.result1.time(starttime1)
self.result1.startTest(self)
self.result1.time(endtime1)
self.result1.addError(self, exc_info1)
exc_info2 = self.makeExceptionInfo(AssertionError, 'failure')
starttime2 = datetime.datetime.utcfromtimestamp(2.489)
endtime2 = datetime.datetime.utcfromtimestamp(3.476)
self.result1.time(starttime2)
self.result1.startTest(self)
self.result1.time(endtime2)
self.result1.addFailure(self, exc_info2)
reason = _u("Skipped for some reason")
starttime3 = datetime.datetime.utcfromtimestamp(4.489)
endtime3 = datetime.datetime.utcfromtimestamp(5.476)
self.result1.time(starttime3)
self.result1.startTest(self)
self.result1.time(endtime3)
self.result1.addSkip(self, reason)
starttime4 = datetime.datetime.utcfromtimestamp(6.489)
endtime4 = datetime.datetime.utcfromtimestamp(7.476)
self.result1.time(starttime4)
self.result1.startTest(self)
self.result1.time(endtime4)
self.result1.addSuccess(self)
self.assertEqual([
('time', starttime1),
('startTest', self),
('time', endtime1),
('addError', self, exc_info1),
('stopTest', self),
('time', starttime2),
('startTest', self),
('time', endtime2),
('addFailure', self, exc_info2),
('stopTest', self),
('time', starttime3),
('startTest', self),
('time', endtime3),
('addSkip', self, reason),
('stopTest', self),
('time', starttime4),
('startTest', self),
('time', endtime4),
('addSuccess', self),
('stopTest', self),
], self.target._events)
class TestExtendedToOriginalResultDecoratorBase(TestCase):
def make_26_result(self):
self.result = Python26TestResult()
self.make_converter()
def make_27_result(self):
self.result = Python27TestResult()
self.make_converter()
def make_converter(self):
self.converter = ExtendedToOriginalDecorator(self.result)
def make_extended_result(self):
self.result = ExtendedTestResult()
self.make_converter()
def check_outcome_details(self, outcome):
"""Call an outcome with a details dict to be passed through."""
# This dict is /not/ convertible - thats deliberate, as it should
# not hit the conversion code path.
details = {'foo': 'bar'}
getattr(self.converter, outcome)(self, details=details)
self.assertEqual([(outcome, self, details)], self.result._events)
def get_details_and_string(self):
"""Get a details dict and expected string."""
text1 = lambda: [_b("1\n2\n")]
text2 = lambda: [_b("3\n4\n")]
bin1 = lambda: [_b("5\n")]
details = {'text 1': Content(ContentType('text', 'plain'), text1),
'text 2': Content(ContentType('text', 'strange'), text2),
'bin 1': Content(ContentType('application', 'binary'), bin1)}
return (details, "Binary content: bin 1\n"
"Text attachment: text 1\n------------\n1\n2\n"
"------------\nText attachment: text 2\n------------\n"
"3\n4\n------------\n")
def check_outcome_details_to_exec_info(self, outcome, expected=None):
"""Call an outcome with a details dict to be made into exc_info."""
# The conversion is a done using RemoteError and the string contents
# of the text types in the details dict.
if not expected:
expected = outcome
details, err_str = self.get_details_and_string()
getattr(self.converter, outcome)(self, details=details)
err = self.converter._details_to_exc_info(details)
self.assertEqual([(expected, self, err)], self.result._events)
def check_outcome_details_to_nothing(self, outcome, expected=None):
"""Call an outcome with a details dict to be swallowed."""
if not expected:
expected = outcome
details = {'foo': 'bar'}
getattr(self.converter, outcome)(self, details=details)
self.assertEqual([(expected, self)], self.result._events)
def check_outcome_details_to_string(self, outcome):
"""Call an outcome with a details dict to be stringified."""
details, err_str = self.get_details_and_string()
getattr(self.converter, outcome)(self, details=details)
self.assertEqual([(outcome, self, err_str)], self.result._events)
def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None):
"""Call an outcome with a details dict to have an arg extracted."""
details, _ = self.get_details_and_string()
if extra_detail:
details.update(extra_detail)
getattr(self.converter, outcome)(self, details=details)
self.assertEqual([(outcome, self, arg)], self.result._events)
def check_outcome_exc_info(self, outcome, expected=None):
"""Check that calling a legacy outcome still works."""
# calling some outcome with the legacy exc_info style api (no keyword
# parameters) gets passed through.
if not expected:
expected = outcome
err = sys.exc_info()
getattr(self.converter, outcome)(self, err)
self.assertEqual([(expected, self, err)], self.result._events)
def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
"""Check that calling a legacy outcome on a fallback works."""
# calling some outcome with the legacy exc_info style api (no keyword
# parameters) gets passed through.
if not expected:
expected = outcome
err = sys.exc_info()
getattr(self.converter, outcome)(self, err)
self.assertEqual([(expected, self)], self.result._events)
def check_outcome_nothing(self, outcome, expected=None):
"""Check that calling a legacy outcome still works."""
if not expected:
expected = outcome
getattr(self.converter, outcome)(self)
self.assertEqual([(expected, self)], self.result._events)
def check_outcome_string_nothing(self, outcome, expected):
"""Check that calling outcome with a string calls expected."""
getattr(self.converter, outcome)(self, "foo")
self.assertEqual([(expected, self)], self.result._events)
def check_outcome_string(self, outcome):
"""Check that calling outcome with a string works."""
getattr(self.converter, outcome)(self, "foo")
self.assertEqual([(outcome, self, "foo")], self.result._events)
class TestExtendedToOriginalResultDecorator(
TestExtendedToOriginalResultDecoratorBase):
def test_progress_py26(self):
self.make_26_result()
self.converter.progress(1, 2)
def test_progress_py27(self):
self.make_27_result()
self.converter.progress(1, 2)
def test_progress_pyextended(self):
self.make_extended_result()
self.converter.progress(1, 2)
self.assertEqual([('progress', 1, 2)], self.result._events)
def test_shouldStop(self):
self.make_26_result()
self.assertEqual(False, self.converter.shouldStop)
self.converter.decorated.stop()
self.assertEqual(True, self.converter.shouldStop)
def test_startTest_py26(self):
self.make_26_result()
self.converter.startTest(self)
self.assertEqual([('startTest', self)], self.result._events)
def test_startTest_py27(self):
self.make_27_result()
self.converter.startTest(self)
self.assertEqual([('startTest', self)], self.result._events)
def test_startTest_pyextended(self):
self.make_extended_result()
self.converter.startTest(self)
self.assertEqual([('startTest', self)], self.result._events)
def test_startTestRun_py26(self):
self.make_26_result()
self.converter.startTestRun()
self.assertEqual([], self.result._events)
def test_startTestRun_py27(self):
self.make_27_result()
self.converter.startTestRun()
self.assertEqual([('startTestRun',)], self.result._events)
def test_startTestRun_pyextended(self):
self.make_extended_result()
self.converter.startTestRun()
self.assertEqual([('startTestRun',)], self.result._events)
def test_stopTest_py26(self):
self.make_26_result()
self.converter.stopTest(self)
self.assertEqual([('stopTest', self)], self.result._events)
def test_stopTest_py27(self):
self.make_27_result()
self.converter.stopTest(self)
self.assertEqual([('stopTest', self)], self.result._events)
def test_stopTest_pyextended(self):
self.make_extended_result()
self.converter.stopTest(self)
self.assertEqual([('stopTest', self)], self.result._events)
def test_stopTestRun_py26(self):
self.make_26_result()
self.converter.stopTestRun()
self.assertEqual([], self.result._events)
def test_stopTestRun_py27(self):
self.make_27_result()
self.converter.stopTestRun()
self.assertEqual([('stopTestRun',)], self.result._events)
def test_stopTestRun_pyextended(self):
self.make_extended_result()
self.converter.stopTestRun()
self.assertEqual([('stopTestRun',)], self.result._events)
def test_tags_py26(self):
self.make_26_result()
self.converter.tags(1, 2)
def test_tags_py27(self):
self.make_27_result()
self.converter.tags(1, 2)
def test_tags_pyextended(self):
self.make_extended_result()
self.converter.tags(1, 2)
self.assertEqual([('tags', 1, 2)], self.result._events)
def test_time_py26(self):
self.make_26_result()
self.converter.time(1)
def test_time_py27(self):
self.make_27_result()
self.converter.time(1)
def test_time_pyextended(self):
self.make_extended_result()
self.converter.time(1)
self.assertEqual([('time', 1)], self.result._events)
class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
outcome = 'addError'
def test_outcome_Original_py26(self):
self.make_26_result()
self.check_outcome_exc_info(self.outcome)
def test_outcome_Original_py27(self):
self.make_27_result()
self.check_outcome_exc_info(self.outcome)
def test_outcome_Original_pyextended(self):
self.make_extended_result()
self.check_outcome_exc_info(self.outcome)
def test_outcome_Extended_py26(self):
self.make_26_result()
self.check_outcome_details_to_exec_info(self.outcome)
def test_outcome_Extended_py27(self):
self.make_27_result()
self.check_outcome_details_to_exec_info(self.outcome)
def test_outcome_Extended_pyextended(self):
self.make_extended_result()
self.check_outcome_details(self.outcome)
def test_outcome__no_details(self):
self.make_extended_result()
self.assertThat(
lambda: getattr(self.converter, self.outcome)(self),
Raises(MatchesException(ValueError)))
class TestExtendedToOriginalAddFailure(
TestExtendedToOriginalAddError):
outcome = 'addFailure'
class TestExtendedToOriginalAddExpectedFailure(
TestExtendedToOriginalAddError):
outcome = 'addExpectedFailure'
def test_outcome_Original_py26(self):
self.make_26_result()
self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
def test_outcome_Extended_py26(self):
self.make_26_result()
self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
class TestExtendedToOriginalAddSkip(
TestExtendedToOriginalResultDecoratorBase):
outcome = 'addSkip'
def test_outcome_Original_py26(self):
self.make_26_result()
self.check_outcome_string_nothing(self.outcome, 'addSuccess')
def test_outcome_Original_py27(self):
self.make_27_result()
self.check_outcome_string(self.outcome)
def test_outcome_Original_pyextended(self):
self.make_extended_result()
self.check_outcome_string(self.outcome)
def test_outcome_Extended_py26(self):
self.make_26_result()
self.check_outcome_string_nothing(self.outcome, 'addSuccess')
def test_outcome_Extended_py27_no_reason(self):
self.make_27_result()
self.check_outcome_details_to_string(self.outcome)
def test_outcome_Extended_py27_reason(self):
self.make_27_result()
self.check_outcome_details_to_arg(self.outcome, 'foo',
{'reason': Content(UTF8_TEXT, lambda:[_b('foo')])})
def test_outcome_Extended_pyextended(self):
self.make_extended_result()
self.check_outcome_details(self.outcome)
def test_outcome__no_details(self):
self.make_extended_result()
self.assertThat(
lambda: getattr(self.converter, self.outcome)(self),
Raises(MatchesException(ValueError)))
class TestExtendedToOriginalAddSuccess(
TestExtendedToOriginalResultDecoratorBase):
outcome = 'addSuccess'
expected = 'addSuccess'
def test_outcome_Original_py26(self):
self.make_26_result()
self.check_outcome_nothing(self.outcome, self.expected)
def test_outcome_Original_py27(self):
self.make_27_result()
self.check_outcome_nothing(self.outcome)
def test_outcome_Original_pyextended(self):
self.make_extended_result()
self.check_outcome_nothing(self.outcome)
def test_outcome_Extended_py26(self):
self.make_26_result()
self.check_outcome_details_to_nothing(self.outcome, self.expected)
def test_outcome_Extended_py27(self):
self.make_27_result()
self.check_outcome_details_to_nothing(self.outcome)
def test_outcome_Extended_pyextended(self):
self.make_extended_result()
self.check_outcome_details(self.outcome)
class TestExtendedToOriginalAddUnexpectedSuccess(
TestExtendedToOriginalResultDecoratorBase):
outcome = 'addUnexpectedSuccess'
expected = 'addFailure'
def test_outcome_Original_py26(self):
self.make_26_result()
getattr(self.converter, self.outcome)(self)
[event] = self.result._events
self.assertEqual((self.expected, self), event[:2])
def test_outcome_Original_py27(self):
self.make_27_result()
self.check_outcome_nothing(self.outcome)
def test_outcome_Original_pyextended(self):
self.make_extended_result()
self.check_outcome_nothing(self.outcome)
def test_outcome_Extended_py26(self):
self.make_26_result()
getattr(self.converter, self.outcome)(self)
[event] = self.result._events
self.assertEqual((self.expected, self), event[:2])
def test_outcome_Extended_py27(self):
self.make_27_result()
self.check_outcome_details_to_nothing(self.outcome)
def test_outcome_Extended_pyextended(self):
self.make_extended_result()
self.check_outcome_details(self.outcome)
class TestExtendedToOriginalResultOtherAttributes(
TestExtendedToOriginalResultDecoratorBase):
def test_other_attribute(self):
class OtherExtendedResult:
def foo(self):
return 2
bar = 1
self.result = OtherExtendedResult()
self.make_converter()
self.assertEqual(1, self.converter.bar)
self.assertEqual(2, self.converter.foo())
class TestNonAsciiResults(TestCase):
"""Test all kinds of tracebacks are cleanly interpreted as unicode
Currently only uses weak "contains" assertions, would be good to be much
stricter about the expected output. This would add a few failures for the
current release of IronPython for instance, which gets some traceback
lines muddled.
"""
_sample_texts = (
_u("pa\u026a\u03b8\u0259n"), # Unicode encodings only
_u("\u5357\u7121"), # In ISO 2022 encodings
_u("\xa7\xa7\xa7"), # In ISO 8859 encodings
)
# Everything but Jython shows syntax errors on the current character
_error_on_character = os.name != "java"
def _run(self, stream, test):
"""Run the test, the same as in testtools.run but not to stdout"""
result = TextTestResult(stream)
result.startTestRun()
try:
return test.run(result)
finally:
result.stopTestRun()
def _write_module(self, name, encoding, contents):
"""Create Python module on disk with contents in given encoding"""
try:
# Need to pre-check that the coding is valid or codecs.open drops
# the file without closing it which breaks non-refcounted pythons
codecs.lookup(encoding)
except LookupError:
self.skip("Encoding unsupported by implementation: %r" % encoding)
f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding)
try:
f.write(contents)
finally:
f.close()
def _test_external_case(self, testline, coding="ascii", modulelevel="",
suffix=""):
"""Create and run a test case in a seperate module"""
self._setup_external_case(testline, coding, modulelevel, suffix)
return self._run_external_case()
def _setup_external_case(self, testline, coding="ascii", modulelevel="",
suffix=""):
"""Create a test case in a seperate module"""
_, prefix, self.modname = self.id().rsplit(".", 2)
self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
self.addCleanup(shutil.rmtree, self.dir)
self._write_module(self.modname, coding,
# Older Python 2 versions don't see a coding declaration in a
# docstring so it has to be in a comment, but then we can't
# workaround bug: <http://ironpython.codeplex.com/workitem/26940>
"# coding: %s\n"
"import testtools\n"
"%s\n"
"class Test(testtools.TestCase):\n"
" def runTest(self):\n"
" %s\n" % (coding, modulelevel, testline))
def _run_external_case(self):
"""Run the prepared test case in a seperate module"""
sys.path.insert(0, self.dir)
self.addCleanup(sys.path.remove, self.dir)
module = __import__(self.modname)
self.addCleanup(sys.modules.pop, self.modname)
stream = StringIO()
self._run(stream, module.Test())
return stream.getvalue()
def _silence_deprecation_warnings(self):
"""Shut up DeprecationWarning for this test only"""
warnings.simplefilter("ignore", DeprecationWarning)
self.addCleanup(warnings.filters.remove, warnings.filters[0])
def _get_sample_text(self, encoding="unicode_internal"):
if encoding is None and str_is_unicode:
encoding = "unicode_internal"
for u in self._sample_texts:
try:
b = u.encode(encoding)
if u == b.decode(encoding):
if str_is_unicode:
return u, u
return u, b
except (LookupError, UnicodeError):
pass
self.skip("Could not find a sample text for encoding: %r" % encoding)
def _as_output(self, text):
return text
def test_non_ascii_failure_string(self):
"""Assertion contents can be non-ascii and should get decoded"""
text, raw = self._get_sample_text(_get_exception_encoding())
textoutput = self._test_external_case("self.fail(%s)" % _r(raw))
self.assertIn(self._as_output(text), textoutput)
def test_non_ascii_failure_string_via_exec(self):
"""Assertion via exec can be non-ascii and still gets decoded"""
text, raw = self._get_sample_text(_get_exception_encoding())
textoutput = self._test_external_case(
testline='exec ("self.fail(%s)")' % _r(raw))
self.assertIn(self._as_output(text), textoutput)
def test_control_characters_in_failure_string(self):
"""Control characters in assertions should be escaped"""
textoutput = self._test_external_case("self.fail('\\a\\a\\a')")
self.expectFailure("Defense against the beeping horror unimplemented",
self.assertNotIn, self._as_output("\a\a\a"), textoutput)
self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput)
def test_os_error(self):
"""Locale error messages from the OS shouldn't break anything"""
textoutput = self._test_external_case(
modulelevel="import os",
testline="os.mkdir('/')")
if os.name != "nt" or sys.version_info < (2, 5):
self.assertIn(self._as_output("OSError: "), textoutput)
else:
self.assertIn(self._as_output("WindowsError: "), textoutput)
def test_assertion_text_shift_jis(self):
"""A terminal raw backslash in an encoded string is weird but fine"""
example_text = _u("\u5341")
textoutput = self._test_external_case(
coding="shift_jis",
testline="self.fail('%s')" % example_text)
if str_is_unicode:
output_text = example_text
else:
output_text = example_text.encode("shift_jis").decode(
_get_exception_encoding(), "replace")
self.assertIn(self._as_output("AssertionError: %s" % output_text),
textoutput)
def test_file_comment_iso2022_jp(self):
"""Control character escapes must be preserved if valid encoding"""
example_text, _ = self._get_sample_text("iso2022_jp")
textoutput = self._test_external_case(
coding="iso2022_jp",
testline="self.fail('Simple') # %s" % example_text)
self.assertIn(self._as_output(example_text), textoutput)
def test_unicode_exception(self):
"""Exceptions that can be formated losslessly as unicode should be"""
example_text, _ = self._get_sample_text()
exception_class = (
"class FancyError(Exception):\n"
# A __unicode__ method does nothing on py3k but the default works
" def __unicode__(self):\n"
" return self.args[0]\n")
textoutput = self._test_external_case(
modulelevel=exception_class,
testline="raise FancyError(%s)" % _r(example_text))
self.assertIn(self._as_output(example_text), textoutput)
def test_unprintable_exception(self):
"""A totally useless exception instance still prints something"""
exception_class = (
"class UnprintableError(Exception):\n"
" def __str__(self):\n"
" raise RuntimeError\n"
" def __unicode__(self):\n"
" raise RuntimeError\n"
" def __repr__(self):\n"
" raise RuntimeError\n")
textoutput = self._test_external_case(
modulelevel=exception_class,
testline="raise UnprintableError")
self.assertIn(self._as_output(
"UnprintableError: <unprintable UnprintableError object>\n"),
textoutput)
def test_string_exception(self):
"""Raise a string rather than an exception instance if supported"""
if sys.version_info > (2, 6):
self.skip("No string exceptions in Python 2.6 or later")
elif sys.version_info > (2, 5):
self._silence_deprecation_warnings()
textoutput = self._test_external_case(testline="raise 'plain str'")
self.assertIn(self._as_output("\nplain str\n"), textoutput)
def test_non_ascii_dirname(self):
"""Script paths in the traceback can be non-ascii"""
text, raw = self._get_sample_text(sys.getfilesystemencoding())
textoutput = self._test_external_case(
# Avoid bug in Python 3 by giving a unicode source encoding rather
# than just ascii which raises a SyntaxError with no other details
coding="utf-8",
testline="self.fail('Simple')",
suffix=raw)
self.assertIn(self._as_output(text), textoutput)
def test_syntax_error(self):
"""Syntax errors should still have fancy special-case formatting"""
textoutput = self._test_external_case("exec ('f(a, b c)')")
self.assertIn(self._as_output(
' File "<string>", line 1\n'
' f(a, b c)\n'
+ ' ' * self._error_on_character +
' ^\n'
'SyntaxError: '
), textoutput)
def test_syntax_error_malformed(self):
"""Syntax errors with bogus parameters should break anything"""
textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)")
self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
def test_syntax_error_import_binary(self):
"""Importing a binary file shouldn't break SyntaxError formatting"""
if sys.version_info < (2, 5):
# Python 2.4 assumes the file is latin-1 and tells you off
self._silence_deprecation_warnings()
self._setup_external_case("import bad")
f = open(os.path.join(self.dir, "bad.py"), "wb")
try:
f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9"))
finally:
f.close()
textoutput = self._run_external_case()
self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
def test_syntax_error_line_iso_8859_1(self):
"""Syntax error on a latin-1 line shows the line decoded"""
text, raw = self._get_sample_text("iso-8859-1")
textoutput = self._setup_external_case("import bad")
self._write_module("bad", "iso-8859-1",
"# coding: iso-8859-1\n! = 0 # %s\n" % text)
textoutput = self._run_external_case()
self.assertIn(self._as_output(_u(
#'bad.py", line 2\n'
' ! = 0 # %s\n'
' ^\n'
'SyntaxError: ') %
(text,)), textoutput)
def test_syntax_error_line_iso_8859_5(self):
"""Syntax error on a iso-8859-5 line shows the line decoded"""
text, raw = self._get_sample_text("iso-8859-5")
textoutput = self._setup_external_case("import bad")
self._write_module("bad", "iso-8859-5",
"# coding: iso-8859-5\n%% = 0 # %s\n" % text)
textoutput = self._run_external_case()
self.assertIn(self._as_output(_u(
#'bad.py", line 2\n'
' %% = 0 # %s\n'
+ ' ' * self._error_on_character +
' ^\n'
'SyntaxError: ') %
(text,)), textoutput)
def test_syntax_error_line_euc_jp(self):
"""Syntax error on a euc_jp line shows the line decoded"""
text, raw = self._get_sample_text("euc_jp")
textoutput = self._setup_external_case("import bad")
self._write_module("bad", "euc_jp",
"# coding: euc_jp\n$ = 0 # %s\n" % text)
textoutput = self._run_external_case()
self.assertIn(self._as_output(_u(
#'bad.py", line 2\n'
' $ = 0 # %s\n'
+ ' ' * self._error_on_character +
' ^\n'
'SyntaxError: ') %
(text,)), textoutput)
def test_syntax_error_line_utf_8(self):
"""Syntax error on a utf-8 line shows the line decoded"""
text, raw = self._get_sample_text("utf-8")
textoutput = self._setup_external_case("import bad")
self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
textoutput = self._run_external_case()
self.assertIn(self._as_output(_u(
'bad.py", line 1\n'
' ^ = 0 # %s\n'
+ ' ' * self._error_on_character +
' ^\n'
'SyntaxError: ') %
text), textoutput)
class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
"""Test that running under unittest produces clean ascii strings"""
def _run(self, stream, test):
from unittest import TextTestRunner as _Runner
return _Runner(stream).run(test)
def _as_output(self, text):
if str_is_unicode:
return text
return text.encode("utf-8")
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| gpl-2.0 |
adelton/django | tests/auth_tests/models/invalid_models.py | 251 | 1340 | from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.db import models
class CustomUserNonUniqueUsername(AbstractBaseUser):
"""
A user with a non-unique username.
This model is not invalid if it is used with a custom authentication
backend which supports non-unique usernames.
"""
username = models.CharField(max_length=30)
email = models.EmailField(blank=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = UserManager()
class Meta:
app_label = 'auth'
class CustomUserNonListRequiredFields(AbstractBaseUser):
"A user with a non-list REQUIRED_FIELDS"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
class Meta:
app_label = 'auth'
class CustomUserBadRequiredFields(AbstractBaseUser):
"A user with a USERNAME_FIELD that appears in REQUIRED_FIELDS (invalid)"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
class Meta:
app_label = 'auth'
| bsd-3-clause |
Whisper-Cao/802.15.4-revision | python/qa_phr_prefixer.py | 4 | 2571 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <wunsch.felix@googlemail.com>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import numpy as np
import time
import ieee802_15_4_swig as ieee802_15_4
class qa_phr_prefixer (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
phr = np.random.randint(0,2,size=(12,))
data = np.array(np.random.randint(0,256, size=(6*3,)))
data_bin = np.unpackbits(np.array(data,dtype=np.uint8))
self.src = blocks.vector_source_b(data, False, 1, [])
self.s2ts = blocks.stream_to_tagged_stream(gr.sizeof_char, 1, 6, "packet_len")
self.ts2pdu = blocks.tagged_stream_to_pdu(blocks.byte_t, "packet_len")
self.pref = ieee802_15_4.phr_prefixer(phr)
self.pdu2ts = blocks.pdu_to_tagged_stream(blocks.byte_t, "packet_len")
self.snk = blocks.vector_sink_b(1)
self.tb.connect(self.src, self.s2ts, self.ts2pdu)
self.tb.msg_connect(self.ts2pdu, "pdus", self.pref, "in")
self.tb.msg_connect(self.pref, "out", self.pdu2ts, "pdus")
self.tb.connect(self.pdu2ts, self.snk)
self.tb.start()
time.sleep(1)
self.tb.stop()
# check data
data_out = self.snk.data()
# print "input:"
# for i in data:
# print i
# print "output:"
# for i in data_out:
# print data_out
expected_output = np.concatenate((phr,data_bin[0:6*8], phr, data_bin[6*8:12*8], phr, data_bin[12*8:18*8]))
self.assertFloatTuplesAlmostEqual(data_out, expected_output)
if __name__ == '__main__':
gr_unittest.run(qa_phr_prefixer)
| gpl-3.0 |
whuwxl/docker-registry | depends/docker-registry-core/docker_registry/core/lru.py | 35 | 3504 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Docker.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docker_registry.core.lru
~~~~~~~~~~~~~~~~~~~~~~~~~~
Redis based LRU.
Can be activated or de-activated globally.
Drivers are largely encouraged to use it.
By default, doesn't run, until one calls init().
"""
import functools
import logging
import redis
logger = logging.getLogger(__name__)
redis_conn = None
cache_prefix = None
def init(enable=True,
host='localhost', port=6379, db=0, password=None, path='/'):
global redis_conn, cache_prefix
if not enable:
redis_conn = None
return
logging.info('Enabling storage cache on Redis')
logging.info('Redis config: {0}'.format({
'host': host,
'port': port,
'db': db,
'password': password,
'path': path
}))
redis_conn = redis.StrictRedis(host=host,
port=int(port),
db=int(db),
password=password)
cache_prefix = 'cache_path:{0}'.format(path)
def cache_key(key):
return cache_prefix + key
def set(f):
@functools.wraps(f)
def wrapper(*args):
content = args[-1]
key = args[-2]
key = cache_key(key)
try:
cached_content = get_by_key(key)
if cached_content and cached_content == content:
# If cached content is the same as what we are about to
# write, we don't need to write again.
return args[-2]
redis_conn.set(key, content)
except redis.exceptions.ConnectionError as e:
logging.warning("LRU: Redis connection error: {0}".format(e))
return f(*args)
if redis_conn is None:
return f
return wrapper
def get(f):
@functools.wraps(f)
def wrapper(*args):
key = args[-1]
key = cache_key(key)
content = get_by_key(key)
if content is not None:
return content
# Refresh cache
content = f(*args)
if content is not None:
try:
redis_conn.set(key, content)
except redis.exceptions.ConnectionError as e:
logging.warning("LRU: Redis connection error: {0}".format(e))
return content
if redis_conn is None:
return f
return wrapper
def get_by_key(key):
try:
content = redis_conn.get(key)
except redis.exceptions.ConnectionError as e:
logging.warning("LRU: Redis connection error: {0}".format(e))
return None
return content
def remove(f):
@functools.wraps(f)
def wrapper(*args):
key = args[-1]
key = cache_key(key)
try:
redis_conn.delete(key)
except redis.exceptions.ConnectionError as e:
logging.warning("LRU: Redis connection error: {0}".format(e))
return f(*args)
if redis_conn is None:
return f
return wrapper
| apache-2.0 |
Sprytile/Sprytile | rx/linq/observable/zip.py | 2 | 3667 | from rx.core import Observable, AnonymousObservable
from rx.disposables import CompositeDisposable, SingleAssignmentDisposable
from rx.internal import extensionmethod, extensionclassmethod
@extensionmethod(Observable, instancemethod=True)
def zip(self, *args):
"""Merges the specified observable sequences into one observable
sequence by using the selector function whenever all of the observable
sequences or an array have produced an element at a corresponding index.
The last element in the arguments must be a function to invoke for each
series of elements at corresponding indexes in the sources.
1 - res = obs1.zip(obs2, fn)
2 - res = x1.zip([1,2,3], fn)
Returns an observable sequence containing the result of combining
elements of the sources using the specified result selector function.
"""
parent = self
sources = list(args)
result_selector = sources.pop()
sources.insert(0, parent)
if args and isinstance(args[0], list):
return _zip_list(self, *args)
def subscribe(observer):
n = len(sources)
queues = [[] for _ in range(n)]
is_done = [False] * n
def next(i):
if all([len(q) for q in queues]):
try:
queued_values = [x.pop(0) for x in queues]
res = result_selector(*queued_values)
except Exception as ex:
observer.on_error(ex)
return
observer.on_next(res)
elif all([x for j, x in enumerate(is_done) if j != i]):
observer.on_completed()
def done(i):
is_done[i] = True
if all(is_done):
observer.on_completed()
subscriptions = [None]*n
def func(i):
source = sources[i]
sad = SingleAssignmentDisposable()
source = Observable.from_future(source)
def on_next(x):
queues[i].append(x)
next(i)
sad.disposable = source.subscribe(on_next, observer.on_error, lambda: done(i))
subscriptions[i] = sad
for idx in range(n):
func(idx)
return CompositeDisposable(subscriptions)
return AnonymousObservable(subscribe)
@extensionclassmethod(Observable)
def zip(cls, *args):
"""Merges the specified observable sequences into one observable
sequence by using the selector function whenever all of the observable
sequences have produced an element at a corresponding index.
The last element in the arguments must be a function to invoke for each
series of elements at corresponding indexes in the sources.
Arguments:
args -- Observable sources.
Returns an observable {Observable} sequence containing the result of
combining elements of the sources using the specified result selector
function.
"""
first = args[0]
return first.zip(*args[1:])
def _zip_list(source, second, result_selector):
first = source
def subscribe(observer):
length = len(second)
index = [0]
def on_next(left):
if index[0] < length:
right = second[index[0]]
index[0] += 1
try:
result = result_selector(left, right)
except Exception as ex:
observer.on_error(ex)
return
observer.on_next(result)
else:
observer.on_completed()
return first.subscribe(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe)
| mit |
NeuroDataDesign/pan-synapse | pipeline_1/code/tests/clusterTests.py | 1 | 2720 | import sys
sys.path.insert(0, '../functions/')
from epsilonDifference import epsilonDifference as floatEq
from cluster import Cluster
import epsilonDifference as epDiff
import matplotlib.pyplot as plt
import connectLib as cLib
import pickle
testData1 = [[3,3,3], [3,3,2], [3,3,4], [3,2,3], [3,4,3], [2,3,3], [4,3,3]]
testData2 = [[3,3,3], [3,3,4], [3,4,4], [3,4,5], [3,5,5], [4,5,5], [4,5,6]]
testData3 = [[0, 0, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]
testData4 = pickle.load(open('synthDat/exponDecayIndexList.synth', 'r'))
testData5 = pickle.load(open('synthDat/smallTwoGaussian.synth', 'r'))
print 'Cluster in cluster.py'
testCluster1 = Cluster(testData1)
testCluster2 = Cluster(testData2)
testCluster3 = Cluster(testData3)
testCluster4 = Cluster(testData4)
#test the centroid method
print '\tTest 1: ', testCluster1.getCentroid() == [3., 3., 3.],'\n\t\tExpected: [3, 3, 3]\tResult: ', testCluster1.getCentroid()
print '\tTest 2: ', epDiff.epsilonDifference(3.28571429, testCluster2.getCentroid()[0], .001) and epDiff.epsilonDifference(4.14285714, testCluster2.getCentroid()[1], .001) and epDiff.epsilonDifference(4.57142857, testCluster2.getCentroid()[2], .001),'\n\t\tExpected: [3.28571429, 4.14285714, 4.57142857]\tResult: ', testCluster2.getCentroid()
print '\tTest 3: ', testCluster3.getCentroid() == [0.5, 0.5, 0.5],'\n\t\tExpected: [0.5, 0.5, 0.5]\tResult: ', testCluster3.getCentroid()
#test the std distance method
print '\tTest 4: ', testCluster3.getStdDeviation() == 0,'\n\t\tExpected: 0\tResult: ', testCluster3.getStdDeviation()
print '\tTest 5: ', epDiff.epsilonDifference(testCluster1.getStdDeviation(), 0.3499271),'\n\t\tExpected: 0.3499271\tResult: ', testCluster1.getStdDeviation()
#test the getVolume method
print '\tTest 6: ', testCluster1.getVolume() == 7,'\n\t\tExpected: 7\tResult: ', testCluster1.getVolume()
print '\tTest 7: ', testCluster2.getVolume() == 7,'\n\t\tExpected: 7\tResult: ', testCluster2.getVolume()
print '\tTest 8: ', testCluster3.getVolume() == 8,'\n\t\tExpected: 8\tResult: ', testCluster3.getVolume()
#test the densityOfSlice method
#NOTE:slicing from 1 to remove background cluster
clusterList = cLib.connectedComponents(cLib.otsuVox(testData5))[1:]
test10 = cLib.densityOfSlice(clusterList, 0, 5, 0, 5, 0, 5)
print '\tTest 10: ', epDiff.epsilonDifference(test10, 2.22222222),'\n\t\tExpected: 2.22222\tResult: ', test10
test11 = cLib.densityOfSlice(clusterList, 0, 2, 0, 2, 0, 2)
print '\tTest 11: ', epDiff.epsilonDifference(test11, 17.3611),'\n\t\tExpected: 17.31111\tResult: ', test11
test12 = cLib.densityOfSlice(clusterList, 2, 3, 2, 3, 2, 3)
print '\tTest 12: ', test12 == 0.,'\n\t\tExpected: 0.\tResult: ', test12
| apache-2.0 |
ttiurani/gsutil | gslib/commands/notification.py | 28 | 9348 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides the notification command to gsutil."""
from __future__ import absolute_import
import getopt
import uuid
from gslib.cloud_api import AccessDeniedException
from gslib.command import Command
from gslib.command import NO_MAX
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
_WATCHBUCKET_SYNOPSIS = """
gsutil notification watchbucket [-i id] [-t token] app_url bucket_url...
"""
_STOPCHANNEL_SYNOPSIS = """
gsutil notification stopchannel channel_id resource_id
"""
_SYNOPSIS = _WATCHBUCKET_SYNOPSIS + _STOPCHANNEL_SYNOPSIS.lstrip('\n')
_WATCHBUCKET_DESCRIPTION = """
<B>WATCHBUCKET</B>
The watchbucket sub-command can be used to watch a bucket for object changes.
A service account must be used when running this command.
The app_url parameter must be an HTTPS URL to an application that will be
notified of changes to any object in the bucket. The URL endpoint must be
a verified domain on your project. See
`Notification Authorization <https://developers.google.com/storage/docs/object-change-notification#_Authorization>`_
for details.
The optional id parameter can be used to assign a unique identifier to the
created notification channel. If not provided, a random UUID string will be
generated.
The optional token parameter can be used to validate notifications events.
To do this, set this custom token and store it to later verify that
notification events contain the client token you expect.
"""
_STOPCHANNEL_DESCRIPTION = """
<B>STOPCHANNEL</B>
The stopchannel sub-command can be used to stop sending change events to a
notification channel.
The channel_id and resource_id parameters should match the values from the
response of a bucket watch request.
"""
_DESCRIPTION = """
The notification command can be used to configure notifications.
For more information on the Object Change Notification feature, please see:
https://developers.google.com/storage/docs/object-change-notification
The notification command has two sub-commands:
""" + _WATCHBUCKET_DESCRIPTION + _STOPCHANNEL_DESCRIPTION + """
<B>EXAMPLES</B>
Watch the bucket example-bucket for changes and send notifications to an
application server running at example.com:
gsutil notification watchbucket https://example.com/notify \\
gs://example-bucket
Assign identifier my-channel-id to the created notification channel:
gsutil notification watchbucket -i my-channel-id \\
https://example.com/notify gs://example-bucket
Set a custom client token that will be included with each notification event:
gsutil notification watchbucket -t my-client-token \\
https://example.com/notify gs://example-bucket
Stop the notification event channel with channel identifier channel1 and
resource identifier SoGqan08XDIFWr1Fv_nGpRJBHh8:
gsutil notification stopchannel channel1 SoGqan08XDIFWr1Fv_nGpRJBHh8
<B>NOTIFICATIONS AND PARALLEL COMPOSITE UPLOADS</B>
By default, gsutil enables parallel composite uploads for large files (see
"gsutil help cp"), which means that an upload of a large object can result
in multiple temporary component objects being uploaded before the actual
intended object is created. Any subscriber to notifications for this bucket
will then see a notification for each of these components being created and
deleted. If this is a concern for you, note that parallel composite uploads
can be disabled by setting "parallel_composite_upload_threshold = 0" in your
boto config file.
"""
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE = """
Watch bucket attempt failed:
{watch_error}
You attempted to watch a bucket with an application URL of:
{watch_url}
which is not authorized for your project. Please ensure that you are using
Service Account authentication and that the Service Account's project is
authorized for the application URL. Notification endpoint URLs must also be
whitelisted in your Cloud Console project. To do that, the domain must also be
verified using Google Webmaster Tools. For instructions, please see:
https://developers.google.com/storage/docs/object-change-notification#_Authorization
"""
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_watchbucket_help_text = (
CreateHelpText(_WATCHBUCKET_SYNOPSIS, _WATCHBUCKET_DESCRIPTION))
_stopchannel_help_text = (
CreateHelpText(_STOPCHANNEL_SYNOPSIS, _STOPCHANNEL_DESCRIPTION))
class NotificationCommand(Command):
"""Implementation of gsutil notification command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'notification',
command_name_aliases=[
'notify', 'notifyconfig', 'notifications', 'notif'],
usage_synopsis=_SYNOPSIS,
min_args=3,
max_args=NO_MAX,
supported_sub_args='i:t:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'watchbucket': [
CommandArgument.MakeFreeTextArgument(),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument()
],
'stopchannel': []
}
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='notification',
help_name_aliases=['watchbucket', 'stopchannel', 'notifyconfig'],
help_type='command_help',
help_one_line_summary='Configure object change notification',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={'watchbucket': _watchbucket_help_text,
'stopchannel': _stopchannel_help_text},
)
def _WatchBucket(self):
"""Creates a watch on a bucket given in self.args."""
self.CheckArguments()
identifier = None
client_token = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-i':
identifier = a
if o == '-t':
client_token = a
identifier = identifier or str(uuid.uuid4())
watch_url = self.args[0]
bucket_arg = self.args[-1]
if not watch_url.lower().startswith('https://'):
raise CommandException('The application URL must be an https:// URL.')
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
self.logger.info('Watching bucket %s with application URL %s ...',
bucket_url, watch_url)
try:
channel = self.gsutil_api.WatchBucket(
bucket_url.bucket_name, watch_url, identifier, token=client_token,
provider=bucket_url.scheme)
except AccessDeniedException, e:
self.logger.warn(NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE.format(
watch_error=str(e), watch_url=watch_url))
raise
channel_id = channel.id
resource_id = channel.resourceId
client_token = channel.token
self.logger.info('Successfully created watch notification channel.')
self.logger.info('Watch channel identifier: %s', channel_id)
self.logger.info('Canonicalized resource identifier: %s', resource_id)
self.logger.info('Client state token: %s', client_token)
return 0
def _StopChannel(self):
channel_id = self.args[0]
resource_id = self.args[1]
self.logger.info('Removing channel %s with resource identifier %s ...',
channel_id, resource_id)
self.gsutil_api.StopChannel(channel_id, resource_id, provider='gs')
self.logger.info('Succesfully removed channel.')
return 0
def _RunSubCommand(self, func):
try:
(self.sub_opts, self.args) = getopt.getopt(
self.args, self.command_spec.supported_sub_args)
return func()
except getopt.GetoptError, e:
self.RaiseInvalidArgumentException()
def RunCommand(self):
"""Command entry point for the notification command."""
subcommand = self.args.pop(0)
if subcommand == 'watchbucket':
return self._RunSubCommand(self._WatchBucket)
elif subcommand == 'stopchannel':
return self._RunSubCommand(self._StopChannel)
else:
raise CommandException('Invalid subcommand "%s" for the %s command.' %
(subcommand, self.command_name))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.