repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
blckshrk/Weboob | contrib/windows-install/ez_setup.py | 1 | 11838 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
import platform
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "1.1.6"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _check_call_py24(cmd, *args, **kwargs):
res = subprocess.call(cmd, *args, **kwargs)
class CalledProcessError(Exception):
pass
if not res == 0:
msg = "Command '%s' return non-zero exit status %d" % (cmd, res)
raise CalledProcessError(msg)
vars(subprocess).setdefault('check_call', _check_call_py24)
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
subprocess.check_call(cmd)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
subprocess.check_call(cmd)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
subprocess.check_call(cmd)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return False
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base,
downloader_factory=options.downloader_factory)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 |
Juniper/contrail-dev-neutron | neutron/db/l3_gwmode_db.py | 13 | 2919 | # Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sqlalchemy as sa
from neutron.db import db_base_plugin_v2
from neutron.db import l3_db
from neutron.extensions import l3
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
# Modify the Router Data Model adding the enable_snat attribute
setattr(l3_db.Router, 'enable_snat',
sa.Column(sa.Boolean, default=True, nullable=False))
class L3_NAT_db_mixin(l3_db.L3_NAT_db_mixin):
"""Mixin class to add configurable gateway modes."""
# Register dict extend functions for ports and networks
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, ['_extend_router_dict_gw_mode'])
def _extend_router_dict_gw_mode(self, router_res, router_db):
if router_db.gw_port_id:
nw_id = router_db.gw_port['network_id']
router_res[EXTERNAL_GW_INFO] = {
'network_id': nw_id,
'enable_snat': router_db.enable_snat}
def _update_router_gw_info(self, context, router_id, info, router=None):
# Load the router only if necessary
if not router:
router = self._get_router(context, router_id)
# if enable_snat is not specified use the value
# stored in the database (default:True)
enable_snat = not info or info.get('enable_snat', router.enable_snat)
with context.session.begin(subtransactions=True):
router.enable_snat = enable_snat
# Calls superclass, pass router db object for avoiding re-loading
super(L3_NAT_db_mixin, self)._update_router_gw_info(
context, router_id, info, router=router)
# Returning the router might come back useful if this
# method is overriden in child classes
return router
def _build_routers_list(self, routers, gw_ports):
gw_port_id_gw_port_dict = {}
for gw_port in gw_ports:
gw_port_id_gw_port_dict[gw_port['id']] = gw_port
for rtr in routers:
gw_port_id = rtr['gw_port_id']
if gw_port_id:
rtr['gw_port'] = gw_port_id_gw_port_dict[gw_port_id]
# Add enable_snat key
rtr['enable_snat'] = rtr[EXTERNAL_GW_INFO]['enable_snat']
return routers
| apache-2.0 |
HAYASAKA-Ryosuke/faker | faker/providers/person/en_US/__init__.py | 2 | 45665 | from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = {
'{{first_name_female}} {{last_name}}': 0.97,
'{{prefix_female}} {{first_name_female}} {{last_name}}': 0.015,
'{{first_name_female}} {{last_name}} {{suffix_female}}': 0.02,
'{{prefix_female}} {{first_name_female}} {{last_name}} '
'{{suffix_female}}': 0.005
}
formats_male = {
'{{first_name_male}} {{last_name}}': 0.97,
'{{prefix_male}} {{first_name_male}} {{last_name}}': 0.015,
'{{first_name_male}} {{last_name}} {{suffix_male}}': 0.02,
'{{prefix_male}} {{first_name_male}} {{last_name}} '
'{{suffix_male}}': 0.005
}
# Using random_element's dictionary weighting means that the
# formats = formats_male + formats_female
# has to be replaced with something dict and python 2.x compatible
formats = formats_male.copy()
formats.update(formats_female)
# Top 200 names of the decade from the 60's-90's from:
# https://www.ssa.gov/OACT/babynames/decades/names1960s.html
# Weightings derived from total number on each name
first_names_female = {
'April': 0.004529083, 'Abigail': 0.002043839, 'Adriana': 0.000488767,
'Adrienne': 0.000622931, 'Aimee': 0.000424727,
'Alejandra': 0.000415754, 'Alexa': 0.000663005,
'Alexandra': 0.002835711, 'Alexandria': 0.000964993,
'Alexis': 0.003446735, 'Alice': 0.000589904, 'Alicia': 0.003766845,
'Alisha': 0.000475942, 'Alison': 0.001506047, 'Allison': 0.003740866,
'Alyssa': 0.00324341, 'Amanda': 0.015360768, 'Amber': 0.006928794,
'Amy': 0.012860314, 'Ana': 0.000853679, 'Andrea': 0.006747028,
'Angel': 0.001161117, 'Angela': 0.011954085, 'Angelica': 0.001102746,
'Angie': 0.00030166, 'Anita': 0.001383767, 'Ann': 0.002627483,
'Anna': 0.004691502, 'Anne': 0.002089582, 'Annette': 0.001487399,
'Ariana': 0.000412668, 'Ariel': 0.000615774, 'Ashlee': 0.000696534,
'Ashley': 0.014773009, 'Audrey': 0.001139165, 'Autumn': 0.000918594,
'Bailey': 0.000691916, 'Barbara': 0.004839169, 'Becky': 0.000960944,
'Belinda': 0.000502227, 'Beth': 0.002246113, 'Bethany': 0.001249385,
'Betty': 0.000840241, 'Beverly': 0.000990272, 'Bianca': 0.000624835,
'Bonnie': 0.001351901, 'Brandi': 0.002077216, 'Brandy': 0.002177499,
'Breanna': 0.000876003, 'Brenda': 0.005737124, 'Briana': 0.00093665,
'Brianna': 0.002543549, 'Bridget': 0.000787232,
'Brittany': 0.007258404, 'Brittney': 0.001566147,
'Brooke': 0.002410152, 'Caitlin': 0.001808319, 'Caitlyn': 0.000481194,
'Candace': 0.000550662, 'Candice': 0.000653199, 'Carla': 0.00195185,
'Carly': 0.000498725, 'Carmen': 0.000891783, 'Carol': 0.002972719,
'Caroline': 0.001198127, 'Carolyn': 0.002647225,
'Carrie': 0.002934659, 'Casey': 0.001177707, 'Cassandra': 0.002501243,
'Cassidy': 0.000452129, 'Cassie': 0.000344886,
'Catherine': 0.004460622, 'Cathy': 0.001413248,
'Charlene': 0.000538865, 'Charlotte': 0.000530417,
'Chelsea': 0.00280043, 'Chelsey': 0.000368501, 'Cheryl': 0.004166447,
'Cheyenne': 0.000696907, 'Chloe': 0.000565807,
'Christie': 0.000397873, 'Christina': 0.008735669,
'Christine': 0.007488758, 'Christy': 0.00141861, 'Cindy': 0.003360109,
'Claire': 0.000553835, 'Claudia': 0.00096055, 'Colleen': 0.001836203,
'Connie': 0.001821845, 'Courtney': 0.00484939,
'Cristina': 0.000328734, 'Crystal': 0.006365045,
'Cynthia': 0.007655379, 'Daisy': 0.000437443, 'Dana': 0.003395805,
'Danielle': 0.006671783, 'Darlene': 0.000952737, 'Dawn': 0.005014983,
'Deanna': 0.002049026, 'Debbie': 0.001842922, 'Deborah': 0.005386088,
'Debra': 0.004123572, 'Denise': 0.004592291, 'Desiree': 0.000991497,
'Destiny': 0.001055515, 'Diamond': 0.000331732, 'Diana': 0.003699348,
'Diane': 0.003058996, 'Dominique': 0.000847857, 'Donna': 0.00570819,
'Doris': 0.000398026, 'Dorothy': 0.000722426, 'Ebony': 0.000399624,
'Eileen': 0.000544271, 'Elaine': 0.000601175,
'Elizabeth': 0.014954075, 'Ellen': 0.000747267, 'Emily': 0.009100581,
'Emma': 0.001272059, 'Erica': 0.004344471, 'Erika': 0.002105537,
'Erin': 0.005450719, 'Evelyn': 0.000825095, 'Faith': 0.000427113,
'Felicia': 0.001717294, 'Frances': 0.000546897,
'Gabriela': 0.000526937, 'Gabriella': 0.00044123,
'Gabrielle': 0.001090096, 'Gail': 0.00071934, 'Gina': 0.002841095,
'Glenda': 0.000384982, 'Gloria': 0.001155623, 'Grace': 0.00087202,
'Gwendolyn': 0.000407831, 'Hailey': 0.000662917, 'Haley': 0.001557939,
'Hannah': 0.004189822, 'Hayley': 0.000478305, 'Heather': 0.010945254,
'Heidi': 0.002239941, 'Helen': 0.000636675, 'Holly': 0.003487028,
'Isabel': 0.000352305, 'Isabella': 0.000410282, 'Jackie': 0.000566748,
'Jaclyn': 0.00047708, 'Jacqueline': 0.004811242, 'Jade': 0.000446264,
'Jaime': 0.000853175, 'Jamie': 0.005067663, 'Jane': 0.0009486,
'Janet': 0.002489993, 'Janice': 0.001593308, 'Jasmin': 0.000333374,
'Jasmine': 0.003025422, 'Jean': 0.000815969, 'Jeanette': 0.000767293,
'Jeanne': 0.000515381, 'Jenna': 0.001804052, 'Jennifer': 0.029218839,
'Jenny': 0.000932667, 'Jessica': 0.020047608, 'Jill': 0.003253018,
'Jillian': 0.000988587, 'Jo': 0.000442083, 'Joan': 0.000802793,
'Joann': 0.000544336, 'Joanna': 0.001176284, 'Joanne': 0.000729824,
'Jocelyn': 0.000456878, 'Jodi': 0.001252405, 'Jody': 0.000741861,
'Jordan': 0.001653057, 'Joy': 0.000916515, 'Joyce': 0.001009488,
'Judith': 0.000870706, 'Judy': 0.001101586, 'Julia': 0.003301891,
'Julie': 0.008211731, 'Kaitlin': 0.000674473, 'Kaitlyn': 0.001478623,
'Kara': 0.001549119, 'Karen': 0.009643845, 'Kari': 0.000794323,
'Karina': 0.000494764, 'Karla': 0.000387696, 'Katelyn': 0.001476128,
'Katherine': 0.006581479, 'Kathleen': 0.00503549,
'Kathryn': 0.004177806, 'Kathy': 0.002710214, 'Katie': 0.003056216,
'Katrina': 0.001565446, 'Kayla': 0.004621465, 'Kaylee': 0.000551734,
'Kelli': 0.000932163, 'Kellie': 0.000299187, 'Kelly': 0.009342929,
'Kelsey': 0.002470383, 'Kendra': 0.001401079, 'Kerri': 0.000316215,
'Kerry': 0.000352984, 'Kiara': 0.000390037, 'Kim': 0.002518642,
'Kimberly': 0.015594077, 'Kirsten': 0.000369486,
'Krista': 0.001266872, 'Kristen': 0.004345587, 'Kristi': 0.001022926,
'Kristie': 0.000380189, 'Kristin': 0.003613728,
'Kristina': 0.002316281, 'Kristine': 0.000977709,
'Kristy': 0.001097734, 'Krystal': 0.001238113, 'Kylie': 0.00049739,
'Lacey': 0.00045469, 'Latasha': 0.00032904, 'Latoya': 0.000646371,
'Laura': 0.010815096, 'Lauren': 0.007015421, 'Laurie': 0.002200786,
'Leah': 0.001997571, 'Leslie': 0.003606134, 'Linda': 0.006437751,
'Lindsay': 0.002185466, 'Lindsey': 0.002646153, 'Lisa': 0.01872729,
'Loretta': 0.000482945, 'Lori': 0.006040316, 'Lorraine': 0.000486753,
'Lydia': 0.000370274, 'Lynn': 0.001522308, 'Mackenzie': 0.000761056,
'Madeline': 0.000808921, 'Madison': 0.002011184,
'Makayla': 0.000439391, 'Mallory': 0.000688633, 'Mandy': 0.000355566,
'Marcia': 0.000403213, 'Margaret': 0.003839968, 'Maria': 0.006593123,
'Mariah': 0.00097598, 'Marie': 0.001520229, 'Marilyn': 0.000590889,
'Marisa': 0.000339983, 'Marissa': 0.001582627, 'Martha': 0.001290028,
'Mary': 0.014288466, 'Maureen': 0.000753855, 'Mckenzie': 0.000334512,
'Meagan': 0.000729999, 'Megan': 0.007686786, 'Meghan': 0.001481578,
'Melanie': 0.003400117, 'Melinda': 0.002078113,
'Melissa': 0.014890692, 'Melody': 0.000404264,
'Mercedes': 0.000334643, 'Meredith': 0.000766987, 'Mia': 0.000319935,
'Michaela': 0.000506998, 'Michele': 0.003519551,
'Michelle': 0.01527423, 'Mikayla': 0.000410195, 'Mindy': 0.000306891,
'Miranda': 0.001421193, 'Misty': 0.001564614, 'Molly': 0.001710641,
'Monica': 0.004324095, 'Monique': 0.001272125, 'Morgan': 0.002527025,
'Nancy': 0.005023343, 'Natalie': 0.003658398, 'Natasha': 0.001739815,
'Nichole': 0.001001237, 'Nicole': 0.011156655, 'Nina': 0.000298115,
'Norma': 0.000470754, 'Olivia': 0.001967609, 'Paige': 0.001106313,
'Pam': 0.000374454, 'Pamela': 0.005816222, 'Patricia': 0.008349353,
'Patty': 0.000383493, 'Paula': 0.002478284, 'Peggy': 0.000810606,
'Penny': 0.000836564, 'Phyllis': 0.000562437,
'Priscilla': 0.000350226, 'Rachael': 0.001098128,
'Rachel': 0.00876108, 'Raven': 0.000404855, 'Rebecca': 0.010563161,
'Rebekah': 0.000858581, 'Regina': 0.001941739, 'Renee': 0.00257883,
'Rhonda': 0.002879221, 'Rita': 0.000719187, 'Roberta': 0.000461715,
'Robin': 0.00409199, 'Robyn': 0.00032138, 'Rose': 0.000697125,
'Ruth': 0.001041946, 'Sabrina': 0.001920969, 'Sally': 0.000532912,
'Samantha': 0.008186124, 'Sandra': 0.006473426, 'Sandy': 0.000497106,
'Sara': 0.005619879, 'Sarah': 0.014434273, 'Savannah': 0.000978344,
'Selena': 0.000329106, 'Shannon': 0.005952552, 'Shari': 0.000449043,
'Sharon': 0.004796469, 'Shawna': 0.000354209, 'Sheena': 0.000355763,
'Sheila': 0.00220129, 'Shelby': 0.001575601, 'Shelia': 0.000403673,
'Shelley': 0.000922227, 'Shelly': 0.001339469, 'Sheri': 0.000913166,
'Sherri': 0.001285038, 'Sherry': 0.002445235, 'Sheryl': 0.00057025,
'Shirley': 0.000833259, 'Sierra': 0.000954816, 'Sonia': 0.000332739,
'Sonya': 0.000914085, 'Sophia': 0.000535976, 'Stacey': 0.002836761,
'Stacie': 0.0003903, 'Stacy': 0.00311717, 'Stefanie': 0.00034644,
'Stephanie': 0.013595762, 'Sue': 0.000472877, 'Summer': 0.000411508,
'Susan': 0.0088973, 'Suzanne': 0.001943577, 'Sydney': 0.001220101,
'Sylvia': 0.000625798, 'Tabitha': 0.000428404, 'Tamara': 0.00212948,
'Tami': 0.000403651, 'Tammie': 0.00042337, 'Tammy': 0.006493584,
'Tanya': 0.002039024, 'Tara': 0.00316834, 'Tasha': 0.000355807,
'Taylor': 0.003996871, 'Teresa': 0.005060003, 'Terri': 0.001823903,
'Terry': 0.00060494, 'Theresa': 0.003492762, 'Tiffany': 0.006594283,
'Tina': 0.005186419, 'Toni': 0.000891695, 'Tonya': 0.002404133,
'Tracey': 0.001511146, 'Traci': 0.00086193, 'Tracie': 0.000301901,
'Tracy': 0.00498572, 'Tricia': 0.000449196, 'Valerie': 0.003218022,
'Vanessa': 0.003779189, 'Veronica': 0.003017805, 'Vicki': 0.00088653,
'Vickie': 0.000695199, 'Victoria': 0.005237677,
'Virginia': 0.001496482, 'Wanda': 0.001336186, 'Wendy': 0.004058263,
'Whitney': 0.001690768, 'Yesenia': 0.000331951,
'Yolanda': 0.001213819, 'Yvette': 0.000483427, 'Yvonne': 0.001005483,
'Zoe': 0.000367407,
}
first_names_male = {
'Aaron': 0.006741589, 'Adam': 0.007124922, 'Adrian': 0.001521889,
'Alan': 0.002344657, 'Albert': 0.001316595, 'Alec': 0.000442958,
'Alejandro': 0.000862489, 'Alex': 0.002111833,
'Alexander': 0.005215733, 'Alexis': 0.000277915,
'Alfred': 0.000318919, 'Allen': 0.001679613, 'Alvin': 0.00024794,
'Andre': 0.001400621, 'Andres': 0.000335574, 'Andrew': 0.013475074,
'Angel': 0.000902262, 'Anthony': 0.013783357, 'Antonio': 0.002392535,
'Arthur': 0.001342637, 'Austin': 0.003785615, 'Barry': 0.001102751,
'Benjamin': 0.006535474, 'Bernard': 0.000298691, 'Bill': 0.000430013,
'Billy': 0.001749806, 'Blake': 0.001218155, 'Bob': 0.000235731,
'Bobby': 0.001666977, 'Brad': 0.000984544, 'Bradley': 0.003845018,
'Brady': 0.000277522, 'Brandon': 0.009518346, 'Brendan': 0.000736758,
'Brent': 0.001889131, 'Brett': 0.002248371, 'Brian': 0.01597677,
'Bruce': 0.001883335, 'Bryan': 0.00456454, 'Bryce': 0.000457406,
'Caleb': 0.001485861, 'Calvin': 0.001168738, 'Cameron': 0.00180755,
'Carl': 0.002011802, 'Carlos': 0.00266638, 'Casey': 0.001440035,
'Cesar': 0.000304898, 'Chad': 0.003858817, 'Charles': 0.010889881,
'Chase': 0.000971942, 'Chris': 0.001389507, 'Christian': 0.003097779,
'Christopher': 0.02783596, 'Clarence': 0.000299289,
'Clayton': 0.000662222, 'Clifford': 0.00053078,
'Clinton': 0.000579307, 'Cody': 0.00353482, 'Cole': 0.000578811,
'Colin': 0.00078508, 'Collin': 0.000406057, 'Colton': 0.000520845,
'Connor': 0.000981073, 'Corey': 0.002476612, 'Cory': 0.001813005,
'Craig': 0.00338161, 'Cristian': 0.000333847, 'Curtis': 0.002140235,
'Dakota': 0.000797614, 'Dale': 0.001171354, 'Dalton': 0.000615113,
'Damon': 0.00034308, 'Dan': 0.000388496, 'Daniel': 0.018881874,
'Danny': 0.001873879, 'Darin': 0.000234962, 'Darius': 0.000336189,
'Darrell': 0.001218582, 'Darren': 0.001253738, 'Darryl': 0.00067019,
'Daryl': 0.000260918, 'Dave': 0.000269673, 'David': 0.031073833,
'Dean': 0.000965375, 'Dennis': 0.003318992, 'Derek': 0.003095299,
'Derrick': 0.001955921, 'Devin': 0.001312474, 'Devon': 0.000485877,
'Dillon': 0.000558361, 'Dominic': 0.000438221, 'Don': 0.000378322,
'Donald': 0.005689572, 'Douglas': 0.004513687, 'Drew': 0.000596868,
'Duane': 0.00061855, 'Dustin': 0.003088938, 'Dwayne': 0.000711382,
'Dylan': 0.002329096, 'Earl': 0.000348347, 'Eddie': 0.0007944,
'Edgar': 0.000379536, 'Eduardo': 0.000465358, 'Edward': 0.005702242,
'Edwin': 0.001117833, 'Elijah': 0.000592183, 'Eric': 0.012024659,
'Erik': 0.001997096, 'Ernest': 0.000746556, 'Ethan': 0.001143978,
'Eugene': 0.000784243, 'Evan': 0.001570691, 'Fernando': 0.000557608,
'Francis': 0.000330837, 'Francisco': 0.001084335,
'Frank': 0.003276449, 'Franklin': 0.000237561, 'Fred': 0.000396618,
'Frederick': 0.001104188, 'Gabriel': 0.001906504,
'Garrett': 0.001124861, 'Gary': 0.005023109, 'Gavin': 0.000295373,
'Gene': 0.00023426, 'Geoffrey': 0.000425978, 'George': 0.004423984,
'Gerald': 0.00165841, 'Gilbert': 0.000246726, 'Glen': 0.000374338,
'Glenn': 0.001111421, 'Gordon': 0.00027075, 'Grant': 0.00068322,
'Greg': 0.000623492, 'Gregg': 0.000235885, 'Gregory': 0.007676443,
'Guy': 0.000262645, 'Harold': 0.000929467, 'Harry': 0.000586934,
'Hayden': 0.000279454, 'Hector': 0.000798691, 'Henry': 0.001856232,
'Herbert': 0.000234226, 'Howard': 0.000712921, 'Hunter': 0.001034679,
'Ian': 0.001863192, 'Isaac': 0.001001951, 'Isaiah': 0.000625441,
'Ivan': 0.000350433, 'Jack': 0.001839748, 'Jackson': 0.000403253,
'Jacob': 0.007845384, 'Jaime': 0.000421378, 'Jake': 0.000565782,
'James': 0.029601617, 'Jamie': 0.00093552, 'Jared': 0.002538802,
'Jason': 0.01520513, 'Javier': 0.000625202, 'Jay': 0.001411462,
'Jeff': 0.001271436, 'Jeffery': 0.002627873, 'Jeffrey': 0.01225709,
'Jeremiah': 0.001209605, 'Jeremy': 0.006336079,
'Jermaine': 0.000450156, 'Jerome': 0.000634299, 'Jerry': 0.003150273,
'Jesse': 0.003884552, 'Jesus': 0.001628965, 'Jim': 0.000567714,
'Jimmy': 0.001607489, 'Joe': 0.001621544, 'Joel': 0.002537742,
'John': 0.028683008, 'Johnathan': 0.000840448, 'Johnny': 0.002117065,
'Jon': 0.001561184, 'Jonathan': 0.009963971, 'Jonathon': 0.000701157,
'Jordan': 0.003451546, 'Jorge': 0.001180553, 'Jose': 0.005368207,
'Joseph': 0.018604763, 'Joshua': 0.014808101, 'Juan': 0.003233598,
'Julian': 0.000693736, 'Justin': 0.010197889, 'Karl': 0.000362437,
'Keith': 0.004622866, 'Kelly': 0.000775283, 'Kenneth': 0.008318145,
'Kent': 0.000329418, 'Kerry': 0.000261448, 'Kevin': 0.014324157,
'Kirk': 0.0003801, 'Kristopher': 0.000580692, 'Kurt': 0.000716375,
'Kyle': 0.006350049, 'Lance': 0.001048495, 'Larry': 0.003658807,
'Lawrence': 0.001670294, 'Lee': 0.001223883, 'Leon': 0.000236347,
'Leonard': 0.000756713, 'Leroy': 0.000260234, 'Leslie': 0.000234637,
'Levi': 0.000347184, 'Logan': 0.001325812, 'Lonnie': 0.000258576,
'Louis': 0.001212255, 'Lucas': 0.001098237, 'Luis': 0.002427777,
'Luke': 0.001221455, 'Malik': 0.000306813, 'Manuel': 0.001331369,
'Marc': 0.001431947, 'Marco': 0.000290586, 'Marcus': 0.002604122,
'Mario': 0.001229337, 'Mark': 0.014382277, 'Martin': 0.002085226,
'Marvin': 0.000732962, 'Mason': 0.000562037, 'Mathew': 0.000605555,
'Matthew': 0.020425018, 'Maurice': 0.000777078, 'Max': 0.000311276,
'Maxwell': 0.000357478, 'Melvin': 0.00061932, 'Michael': 0.045602241,
'Micheal': 0.001273847, 'Miguel': 0.001416267, 'Mike': 0.001221797,
'Mitchell': 0.001747788, 'Nathan': 0.005039405,
'Nathaniel': 0.001887558, 'Neil': 0.000240331,
'Nicholas': 0.010021219, 'Nicolas': 0.000362522, 'Noah': 0.000960947,
'Norman': 0.000389043, 'Omar': 0.000639052, 'Oscar': 0.000946583,
'Parker': 0.000277522, 'Patrick': 0.007153255, 'Paul': 0.009272953,
'Pedro': 0.000275726, 'Perry': 0.000258644, 'Peter': 0.004340385,
'Philip': 0.002262956, 'Phillip': 0.00280273, 'Preston': 0.000292022,
'Ralph': 0.000836891, 'Randall': 0.001614722, 'Randy': 0.003021926,
'Ray': 0.000379451, 'Raymond': 0.003493952, 'Reginald': 0.00095108,
'Ricardo': 0.001197276, 'Richard': 0.014131961, 'Rick': 0.000440016,
'Rickey': 0.00023833, 'Ricky': 0.001856882, 'Riley': 0.000322031,
'Robert': 0.026938092, 'Roberto': 0.000906024, 'Rodney': 0.002180555,
'Roger': 0.002038032, 'Ronald': 0.00576775, 'Ronnie': 0.000905938,
'Ross': 0.00026863, 'Roy': 0.001311346, 'Ruben': 0.000774821,
'Russell': 0.002096221, 'Ryan': 0.01128178, 'Samuel': 0.00498019,
'Scott': 0.010580999, 'Sean': 0.005593456, 'Sergio': 0.000568518,
'Seth': 0.001537416, 'Shane': 0.002530218, 'Shannon': 0.000421583,
'Shaun': 0.000748761, 'Shawn': 0.004474546, 'Spencer': 0.000912094,
'Stanley': 0.000739032, 'Stephen': 0.007675365, 'Steve': 0.001407564,
'Steven': 0.013292898, 'Stuart': 0.000238826, 'Tanner': 0.000639292,
'Taylor': 0.00133036, 'Terrance': 0.000203311,
'Terrence': 0.000203704, 'Terry': 0.002873624,
'Theodore': 0.000596561, 'Thomas': 0.0143364, 'Tim': 0.000711126,
'Timothy': 0.012632608, 'Todd': 0.00414612, 'Tom': 0.000499283,
'Tommy': 0.000778737, 'Tony': 0.002511563, 'Tracy': 0.000728259,
'Travis': 0.004022458, 'Trevor': 0.001692523, 'Tristan': 0.000408759,
'Troy': 0.002695415, 'Tyler': 0.005962323, 'Tyrone': 0.000587207,
'Vernon': 0.000246401, 'Victor': 0.002340621, 'Vincent': 0.002494515,
'Walter': 0.001525891, 'Warren': 0.000317414, 'Wayne': 0.00160966,
'Wesley': 0.001733835, 'William': 0.020025989, 'Willie': 0.001379247,
'Wyatt': 0.000306591, 'Xavier': 0.000415222, 'Zachary': 0.005918634,
}
first_names = first_names_male.copy()
first_names.update(first_names_female)
# Top 1000 US surnames from US Census data
# Weighted by number of occurrences
# By way of http://names.mongabay.com/data/1000.html on 2/10/2016
last_names = {
'Smith': 0.021712045, 'Johnson': 0.01696938, 'Williams': 0.014016962,
'Brown': 0.012610763, 'Jones': 0.012451866, 'Miller': 0.010305045,
'Davis': 0.009798219, 'Garcia': 0.007842422, 'Rodriguez': 0.007348561,
'Wilson': 0.007154951, 'Martinez': 0.007082045,
'Anderson': 0.006966203, 'Taylor': 0.006582218, 'Thomas': 0.006493824,
'Hernandez': 0.006454314, 'Moore': 0.006383948, 'Martin': 0.006146745,
'Jackson': 0.006086567, 'Thompson': 0.005887767, 'White': 0.005843424,
'Lopez': 0.005679145, 'Lee': 0.005535909, 'Gonzalez': 0.005461513,
'Harris': 0.005423356, 'Clark': 0.005010598, 'Lewis': 0.00465937,
'Robinson': 0.004596305, 'Walker': 0.004580579, 'Perez': 0.00446375,
'Hall': 0.004327121, 'Young': 0.004257495, 'Allen': 0.00423392,
'Sanchez': 0.004031749, 'Wright': 0.004023754, 'King': 0.004011135,
'Scott': 0.003838487, 'Green': 0.003778053, 'Baker': 0.003776901,
'Adams': 0.00377448, 'Nelson': 0.003766713, 'Hill': 0.003762455,
'Ramirez': 0.003554281, 'Campbell': 0.003398636,
'Mitchell': 0.003357336, 'Roberts': 0.003346207, 'Carter': 0.0033127,
'Phillips': 0.003214932, 'Evans': 0.003127113, 'Turner': 0.003067045,
'Torres': 0.002971158, 'Parker': 0.002962725, 'Collins': 0.002904264,
'Edwards': 0.002897155, 'Stewart': 0.002859044, 'Flores': 0.002856449,
'Morris': 0.002848582, 'Nguyen': 0.002833697, 'Murphy': 0.00274576,
'Rivera': 0.002736275, 'Cook': 0.002693623, 'Rogers': 0.002690041,
'Morgan': 0.002525543, 'Peterson': 0.002513125, 'Cooper': 0.00246795,
'Reed': 0.0024437, 'Bailey': 0.002429747, 'Bell': 0.002419112,
'Gomez': 0.002408494, 'Kelly': 0.002379209, 'Howard': 0.002327986,
'Ward': 0.002321973, 'Cox': 0.002318775, 'Diaz': 0.00230051,
'Richardson': 0.002280051, 'Wood': 0.002259639, 'Watson': 0.002215168,
'Brooks': 0.002199808, 'Bennett': 0.002184311, 'Gray': 0.002162912,
'James': 0.002131032, 'Reyes': 0.002124517, 'Cruz': 0.002111304,
'Hughes': 0.002095999, 'Price': 0.002090206, 'Myers': 0.002054278,
'Long': 0.002042126, 'Foster': 0.002019703, 'Sanders': 0.002018442,
'Ross': 0.002009844, 'Morales': 0.001988655, 'Powell': 0.001978704,
'Sullivan': 0.001970362, 'Russell': 0.001968461, 'Ortiz': 0.001961617,
'Jenkins': 0.001952974, 'Gutierrez': 0.001945371,
'Perry': 0.001942986, 'Butler': 0.001926859, 'Barnes': 0.00192272,
'Fisher': 0.001921377, 'Henderson': 0.001919686,
'Coleman': 0.001906255, 'Simmons': 0.001842531,
'Patterson': 0.00181427, 'Jordan': 0.00180198,
'Reynolds': 0.001787233, 'Hamilton': 0.001775656,
'Graham': 0.001773307, 'Kim': 0.001773243, 'Gonzales': 0.001772028,
'Alexander': 0.001767542, 'Ramos': 0.001764371,
'Wallace': 0.001743026, 'Griffin': 0.001741893, 'West': 0.001722047,
'Cole': 0.001715916, 'Hayes': 0.001712992, 'Chavez': 0.001698299,
'Gibson': 0.001685096, 'Bryant': 0.001679075, 'Ellis': 0.001662381,
'Stevens': 0.001657657, 'Murray': 0.001630218, 'Ford': 0.001630062,
'Marshall': 0.001619244, 'Owens': 0.001611212,
'Mcdonald': 0.001609019, 'Harrison': 0.001604295, 'Ruiz': 0.001602943,
'Kennedy': 0.001568285, 'Wells': 0.001559139, 'Alvarez': 0.001542527,
'Woods': 0.0015425, 'Mendoza': 0.001540243, 'Castillo': 0.001511972,
'Olson': 0.001493963, 'Webb': 0.001493771, 'Washington': 0.001489705,
'Tucker': 0.001488763, 'Freeman': 0.001486507, 'Burns': 0.001481636,
'Henry': 0.001474683, 'Vasquez': 0.001461863, 'Snyder': 0.001456143,
'Simpson': 0.001445891, 'Crawford': 0.001444795,
'Jimenez': 0.001438892, 'Porter': 0.001433163, 'Mason': 0.0014207,
'Shaw': 0.001417849, 'Gordon': 0.001415674, 'Wagner': 0.001411855,
'Hunter': 0.001410886, 'Romero': 0.001405057, 'Hicks': 0.00140365,
'Dixon': 0.001389003, 'Hunt': 0.001388738, 'Palmer': 0.00137431,
'Robertson': 0.001373323, 'Black': 0.001372291, 'Holmes': 0.001372108,
'Stone': 0.001368782, 'Meyer': 0.001367521, 'Boyd': 0.001365803,
'Mills': 0.001351485, 'Warren': 0.001351458, 'Fox': 0.001346441,
'Rose': 0.001342485, 'Rice': 0.001338062, 'Moreno': 0.001334846,
'Schmidt': 0.001330067, 'Patel': 0.001325508, 'Ferguson': 0.001299832,
'Nichols': 0.001296908, 'Herrera': 0.0012864, 'Medina': 0.001273307,
'Ryan': 0.001273142, 'Fernandez': 0.001272841, 'Weaver': 0.001268354,
'Daniels': 0.001268034, 'Stephens': 0.001267724,
'Gardner': 0.001266974, 'Payne': 0.0012612, 'Kelley': 0.001256878,
'Dunn': 0.001251395, 'Pierce': 0.001247393, 'Arnold': 0.001245547,
'Tran': 0.001243537, 'Spencer': 0.001228443, 'Peters': 0.001226505,
'Hawkins': 0.001224998, 'Grant': 0.001224705, 'Hansen': 0.001219589,
'Castro': 0.001217578, 'Hoffman': 0.001212014, 'Hart': 0.001210378,
'Elliott': 0.001210296, 'Cunningham': 0.00120517,
'Knight': 0.001204841, 'Bradley': 0.001199624, 'Carroll': 0.001197166,
'Hudson': 0.001195091, 'Duncan': 0.001191674,
'Armstrong': 0.001187681, 'Berry': 0.001182409,
'Andrews': 0.001181632, 'Johnston': 0.001178114, 'Ray': 0.001176826,
'Lane': 0.001176214, 'Riley': 0.001169206, 'Carpenter': 0.001161101,
'Perkins': 0.001159986, 'Aguilar': 0.001154942, 'Silva': 0.001152795,
'Richards': 0.001148126, 'Willis': 0.001147888,
'Matthews': 0.001140688, 'Chapman': 0.001138632,
'Lawrence': 0.001135955, 'Garza': 0.00113421, 'Vargas': 0.001132583,
'Watkins': 0.001118832, 'Wheeler': 0.00111186, 'Larson': 0.001106195,
'Carlson': 0.001097606, 'Harper': 0.001095267, 'George': 0.001094444,
'Greene': 0.001092855, 'Burke': 0.001088935, 'Guzman': 0.001081762,
'Morrison': 0.001077641, 'Munoz': 0.001076133, 'Jacobs': 0.001055721,
'Obrien': 0.001054304, 'Lawson': 0.001052486, 'Franklin': 0.001049498,
'Lynch': 0.001045743, 'Bishop': 0.00104196, 'Carr': 0.001040662,
'Salazar': 0.001036788, 'Austin': 0.001033974, 'Mendez': 0.0010301,
'Gilbert': 0.001027084, 'Jensen': 0.001026408,
'Williamson': 0.001025348, 'Montgomery': 0.00102469,
'Harvey': 0.001024617, 'Oliver': 0.001020094, 'Howell': 0.001001756,
'Dean': 0.000998064, 'Hanson': 0.000996685, 'Weber': 0.000985601,
'Garrett': 0.000984788, 'Sims': 0.000979918, 'Burton': 0.000979132,
'Fuller': 0.000974783, 'Soto': 0.000974317, 'Mccoy': 0.000972946,
'Welch': 0.00096676, 'Chen': 0.000964384, 'Schultz': 0.000959067,
'Walters': 0.000952844, 'Reid': 0.00095034, 'Fields': 0.00094335,
'Walsh': 0.000943113, 'Little': 0.000938563, 'Fowler': 0.000937667,
'Bowman': 0.000934186, 'Davidson': 0.000932404, 'May': 0.000929498,
'Day': 0.000929041, 'Schneider': 0.00091878, 'Newman': 0.000918214,
'Brewer': 0.000917976, 'Lucas': 0.000917538, 'Holland': 0.000912677,
'Wong': 0.000908172, 'Banks': 0.000907276, 'Santos': 0.000904526,
'Curtis': 0.000904206, 'Pearson': 0.000902105, 'Delgado': 0.000901621,
'Valdez': 0.000901027, 'Pena': 0.000898605, 'Rios': 0.000882377,
'Douglas': 0.000881062, 'Sandoval': 0.000879947,
'Barrett': 0.000876228, 'Hopkins': 0.000864414, 'Keller': 0.000861645,
'Guerrero': 0.000860293, 'Stanley': 0.000857232, 'Bates': 0.000856555,
'Alvarado': 0.000856373, 'Beck': 0.000851238, 'Ortega': 0.000850963,
'Wade': 0.00084825, 'Estrada': 0.000848222, 'Contreras': 0.00084666,
'Barnett': 0.000843252, 'Caldwell': 0.00083458,
'Santiago': 0.00083119, 'Lambert': 0.000828001, 'Powers': 0.000826019,
'Chambers': 0.000825324, 'Nunez': 0.000824255, 'Craig': 0.000818618,
'Leonard': 0.000815027, 'Lowe': 0.000814844, 'Rhodes': 0.000812459,
'Byrd': 0.00081149, 'Gregory': 0.000811481, 'Shelton': 0.000807059,
'Frazier': 0.00080705, 'Becker': 0.000805122,
'Maldonado': 0.000804226, 'Fleming': 0.000803614, 'Vega': 0.000801595,
'Sutton': 0.000798351, 'Cohen': 0.000797008, 'Jennings': 0.00079529,
'Parks': 0.000788967, 'Mcdaniel': 0.000788702, 'Watts': 0.000787889,
'Barker': 0.000778688, 'Norris': 0.000778605, 'Vaughn': 0.000777006,
'Vazquez': 0.000775992, 'Holt': 0.000774018, 'Schwartz': 0.000773918,
'Steele': 0.000770756, 'Benson': 0.00076966, 'Neal': 0.000766151,
'Dominguez': 0.000765073, 'Horton': 0.000763173, 'Terry': 0.000762387,
'Wolfe': 0.000759417, 'Hale': 0.000757983, 'Lyons': 0.000751614,
'Graves': 0.000750892, 'Haynes': 0.000749595, 'Miles': 0.000748644,
'Park': 0.000748251, 'Warner': 0.000747648, 'Padilla': 0.000747475,
'Bush': 0.000744907, 'Thornton': 0.000741864, 'Mccarthy': 0.000740439,
'Mann': 0.00074032, 'Zimmerman': 0.000739608, 'Erickson': 0.000739534,
'Fletcher': 0.000739498, 'Mckinney': 0.00073661, 'Page': 0.000735487,
'Dawson': 0.000732718, 'Joseph': 0.000731256, 'Marquez': 0.000730534,
'Reeves': 0.00072931, 'Klein': 0.000728104, 'Espinoza': 0.000724787,
'Baldwin': 0.000723224, 'Moran': 0.000717696, 'Love': 0.000715659,
'Robbins': 0.000713996, 'Higgins': 0.000713685, 'Ball': 0.000708696,
'Cortez': 0.000708066, 'Le': 0.000707709, 'Griffith': 0.00070749,
'Bowen': 0.000704283, 'Sharp': 0.000702364, 'Cummings': 0.000700893,
'Ramsey': 0.000700144, 'Hardy': 0.000699988, 'Swanson': 0.000699358,
'Barber': 0.000699038, 'Acosta': 0.000698791, 'Luna': 0.000695593,
'Chandler': 0.000695474, 'Daniel': 0.000686529, 'Blair': 0.000686529,
'Cross': 0.00068652, 'Simon': 0.000683824, 'Dennis': 0.000683322,
'Oconnor': 0.000683066, 'Quinn': 0.00068101, 'Gross': 0.000678762,
'Navarro': 0.000675884, 'Moss': 0.000673874,
'Fitzgerald': 0.000671791, 'Doyle': 0.000671754,
'Mclaughlin': 0.000668191, 'Rojas': 0.00066767,
'Rodgers': 0.000667213, 'Stevenson': 0.000666034, 'Singh': 0.00066375,
'Yang': 0.000663613, 'Figueroa': 0.000662754, 'Harmon': 0.000661667,
'Newton': 0.000660881, 'Paul': 0.00066015, 'Manning': 0.000658514,
'Garner': 0.000658359, 'Mcgee': 0.000657198, 'Reese': 0.000655636,
'Francis': 0.000655353, 'Burgess': 0.000654265, 'Adkins': 0.000653571,
'Goodman': 0.000653151, 'Curry': 0.00065189, 'Brady': 0.000650345,
'Christensen': 0.000650062, 'Potter': 0.000649688,
'Walton': 0.000648719, 'Goodwin': 0.000642652, 'Mullins': 0.000642222,
'Molina': 0.000641537, 'Webster': 0.000640733, 'Fischer': 0.000640477,
'Campos': 0.000639152, 'Avila': 0.000638175, 'Sherman': 0.000638147,
'Todd': 0.000637873, 'Chang': 0.00063738, 'Blake': 0.000633021,
'Malone': 0.00063282, 'Wolf': 0.000629604, 'Hodges': 0.000629266,
'Juarez': 0.000628507, 'Gill': 0.000627722, 'Farmer': 0.000624158,
'Hines': 0.00062266, 'Gallagher': 0.00062202, 'Duran': 0.000621755,
'Hubbard': 0.000621527, 'Cannon': 0.000620631, 'Miranda': 0.0006181,
'Wang': 0.000617406, 'Saunders': 0.000614116, 'Tate': 0.000614098,
'Mack': 0.000613604, 'Hammond': 0.000612773, 'Carrillo': 0.000612691,
'Townsend': 0.000610854, 'Wise': 0.000609803, 'Ingram': 0.000609136,
'Barton': 0.000608743, 'Mejia': 0.000607939, 'Ayala': 0.000607766,
'Schroeder': 0.000606825, 'Hampton': 0.000606514, 'Rowe': 0.000604933,
'Parsons': 0.000604915, 'Frank': 0.000602311, 'Waters': 0.000601388,
'Strickland': 0.000601361, 'Osborne': 0.000601251,
'Maxwell': 0.000601041, 'Chan': 0.000600493, 'Deleon': 0.000599387,
'Norman': 0.000596381, 'Harrington': 0.00059512, 'Casey': 0.000592232,
'Patton': 0.00059184, 'Logan': 0.000590049, 'Bowers': 0.000589318,
'Mueller': 0.000587572, 'Glover': 0.00058643, 'Floyd': 0.000586074,
'Hartman': 0.000583205, 'Buchanan': 0.000583187, 'Cobb': 0.000582401,
'French': 0.00057701, 'Kramer': 0.000575858, 'Mccormick': 0.000572569,
'Clarke': 0.0005715, 'Tyler': 0.00057139, 'Gibbs': 0.000571208,
'Moody': 0.000569654, 'Conner': 0.000569572, 'Sparks': 0.000568649,
'Mcguire': 0.000567571, 'Leon': 0.000566822, 'Bauer': 0.000566319,
'Norton': 0.000564729, 'Pope': 0.000564227, 'Flynn': 0.000564199,
'Hogan': 0.000563322, 'Robles': 0.00056303, 'Salinas': 0.000562692,
'Yates': 0.000561029, 'Lindsey': 0.000559192, 'Lloyd': 0.000558781,
'Marsh': 0.000557365, 'Mcbride': 0.000556222, 'Owen': 0.000552449,
'Solis': 0.000548648, 'Pham': 0.00054777, 'Lang': 0.000546802,
'Pratt': 0.000546418, 'Lara': 0.000545779, 'Brock': 0.000545331,
'Ballard': 0.00054513, 'Trujillo': 0.000544664, 'Shaffer': 0.000541173,
'Drake': 0.000539602, 'Roman': 0.000539282, 'Aguirre': 0.00053835,
'Morton': 0.000537162, 'Stokes': 0.000536239, 'Lamb': 0.000535033,
'Pacheco': 0.000534841, 'Patrick': 0.00053231, 'Cochran': 0.000532091,
'Shepherd': 0.000529368, 'Cain': 0.000528801, 'Burnett': 0.000528674,
'Hess': 0.000528335, 'Li': 0.000528007, 'Cervantes': 0.000527084,
'Olsen': 0.000524087, 'Briggs': 0.000523538, 'Ochoa': 0.000522743,
'Cabrera': 0.000522387, 'Velasquez': 0.000522314,
'Montoya': 0.00052151, 'Roth': 0.000521099, 'Meyers': 0.000518485,
'Cardenas': 0.000517334, 'Fuentes': 0.000515717, 'Weiss': 0.000513085,
'Wilkins': 0.000512309, 'Hoover': 0.000512309,
'Nicholson': 0.000511559, 'Underwood': 0.000511441,
'Short': 0.000510801, 'Carson': 0.000510052, 'Morrow': 0.000508617,
'Colon': 0.000507228, 'Holloway': 0.000506808, 'Summers': 0.000506123,
'Bryan': 0.000505008, 'Petersen': 0.00050424, 'Mckenzie': 0.000503318,
'Serrano': 0.000503071, 'Wilcox': 0.000502431, 'Carey': 0.000501856,
'Clayton': 0.000501408, 'Poole': 0.000499864, 'Calderon': 0.000499727,
'Gallegos': 0.000499553, 'Greer': 0.000498996, 'Rivas': 0.000498786,
'Guerra': 0.000498667, 'Decker': 0.000497525, 'Collier': 0.000497196,
'Wall': 0.000497077, 'Whitaker': 0.000496547, 'Bass': 0.000496117,
'Flowers': 0.000495944, 'Davenport': 0.000495295,
'Conley': 0.000495185, 'Houston': 0.00049365, 'Huff': 0.000492426,
'Copeland': 0.00049132, 'Hood': 0.00049101, 'Monroe': 0.000488616,
'Massey': 0.00048847, 'Roberson': 0.000486085, 'Combs': 0.00048592,
'Franco': 0.000485747, 'Larsen': 0.000483937, 'Pittman': 0.000481434,
'Randall': 0.000479661, 'Skinner': 0.000479616,
'Wilkinson': 0.000479552, 'Kirby': 0.00047946, 'Cameron': 0.00047915,
'Bridges': 0.000477514, 'Anthony': 0.000476472,
'Richard': 0.000476399, 'Kirk': 0.00047565, 'Bruce': 0.000475175,
'Singleton': 0.000473283, 'Mathis': 0.000473274,
'Bradford': 0.000472635, 'Boone': 0.000472205, 'Abbott': 0.000471666,
'Charles': 0.000470734, 'Allison': 0.000470606, 'Sweeney': 0.00047057,
'Atkinson': 0.000470469, 'Horn': 0.000469473, 'Jefferson': 0.0004693,
'Rosales': 0.000469071, 'York': 0.000469053, 'Christian': 0.000467618,
'Phelps': 0.000467408, 'Farrell': 0.000466869,
'Castaneda': 0.000466814, 'Nash': 0.000466193,
'Dickerson': 0.000466156, 'Bond': 0.000465818, 'Wyatt': 0.00046485,
'Foley': 0.000464649, 'Chase': 0.000463963, 'Gates': 0.000463698,
'Vincent': 0.000462602, 'Mathews': 0.000462419, 'Hodge': 0.000462136,
'Garrison': 0.000461268, 'Trevino': 0.000461012,
'Villarreal': 0.000460071, 'Heath': 0.000459669, 'Dalton': 0.00045838,
'Valencia': 0.000457101, 'Callahan': 0.000456178,
'Hensley': 0.000455566, 'Atkins': 0.000454616, 'Huffman': 0.000454461,
'Roy': 0.000454351, 'Boyer': 0.000453218, 'Shields': 0.000452807,
'Lin': 0.000451016, 'Hancock': 0.000450742, 'Grimes': 0.000449965,
'Glenn': 0.000449929, 'Cline': 0.000449252, 'Delacruz': 0.00044917,
'Camacho': 0.000447726, 'Dillon': 0.0004462, 'Parrish': 0.000446109,
'Oneill': 0.000444583, 'Melton': 0.000444017, 'Booth': 0.000443889,
'Kane': 0.000443404, 'Berg': 0.000442975, 'Harrell': 0.000442893,
'Pitts': 0.000442811, 'Savage': 0.000441943, 'Wiggins': 0.000441833,
'Brennan': 0.000441294, 'Salas': 0.000441166, 'Marks': 0.000441157,
'Russo': 0.00043974, 'Sawyer': 0.000438397, 'Baxter': 0.000437283,
'Golden': 0.000437118, 'Hutchinson': 0.000436844, 'Liu': 0.000435528,
'Walter': 0.000435071, 'Mcdowell': 0.000434258, 'Wiley': 0.000434048,
'Rich': 0.00043381, 'Humphrey': 0.000433746, 'Johns': 0.000432093,
'Koch': 0.000432065, 'Suarez': 0.000431599, 'Hobbs': 0.000431462,
'Beard': 0.000430621, 'Gilmore': 0.000429909, 'Ibarra': 0.000428492,
'Keith': 0.00042714, 'Macias': 0.000427067, 'Khan': 0.000426829,
'Andrade': 0.000426729, 'Ware': 0.000426546, 'Stephenson': 0.000426363,
'Henson': 0.000425879, 'Wilkerson': 0.000425843, 'Dyer': 0.000425559,
'Mcclure': 0.000424929, 'Blackwell': 0.000424838,
'Mercado': 0.000424308, 'Tanner': 0.000424079, 'Eaton': 0.000423997,
'Clay': 0.000422727, 'Barron': 0.000422106, 'Beasley': 0.00042195,
'Oneal': 0.000421786, 'Small': 0.000418944, 'Preston': 0.000418944,
'Wu': 0.000418624, 'Zamora': 0.000418542, 'Macdonald': 0.000418323,
'Vance': 0.000418149, 'Snow': 0.000417473, 'Mcclain': 0.000416294,
'Stafford': 0.000414366, 'Orozco': 0.000413818, 'Barry': 0.000411579,
'English': 0.00041147, 'Shannon': 0.000410282, 'Kline': 0.000410264,
'Jacobson': 0.000410026, 'Woodard': 0.000409624, 'Huang': 0.000408573,
'Kemp': 0.000408445, 'Mosley': 0.000408418, 'Prince': 0.000407888,
'Merritt': 0.00040776, 'Hurst': 0.000407404,
'Villanueva': 0.000407248, 'Roach': 0.000406188, 'Nolan': 0.000405887,
'Lam': 0.000405558, 'Yoder': 0.000404279, 'Mccullough': 0.000403164,
'Lester': 0.0004013, 'Santana': 0.000400898,
'Valenzuela': 0.000399938, 'Winters': 0.000399865,
'Barrera': 0.000399482, 'Orr': 0.000398988, 'Leach': 0.000398988,
'Berger': 0.000397983, 'Mckee': 0.000397974, 'Strong': 0.000396832,
'Conway': 0.000396512, 'Stein': 0.000395927, 'Whitehead': 0.000395735,
'Bullock': 0.000393095, 'Escobar': 0.000392492, 'Knox': 0.000392327,
'Meadows': 0.000391843, 'Solomon': 0.000391432, 'Velez': 0.000391258,
'Odonnell': 0.000391094, 'Kerr': 0.000390692, 'Stout': 0.000389878,
'Blankenship': 0.000389824, 'Browning': 0.000389632,
'Kent': 0.00038922, 'Lozano': 0.000388946, 'Bartlett': 0.000388444,
'Pruitt': 0.000387996, 'Buck': 0.000387795, 'Barr': 0.000387713,
'Gaines': 0.000387137, 'Durham': 0.000387101, 'Gentry': 0.000387028,
'Mcintyre': 0.000386826, 'Sloan': 0.000386333, 'Rocha': 0.000385036,
'Melendez': 0.000385036, 'Herman': 0.000384597, 'Sexton': 0.000384496,
'Moon': 0.000384332, 'Hendricks': 0.00038266, 'Rangel': 0.000382559,
'Stark': 0.000382514, 'Lowery': 0.00038075, 'Hardin': 0.000380695,
'Hull': 0.000380622, 'Sellers': 0.000379754, 'Ellison': 0.000378822,
'Calhoun': 0.000378758, 'Gillespie': 0.000378219, 'Mora': 0.000377808,
'Knapp': 0.000377068, 'Mccall': 0.000376739, 'Morse': 0.000375652,
'Dorsey': 0.000375579, 'Weeks': 0.000375113, 'Nielsen': 0.000374692,
'Livingston': 0.000374299, 'Leblanc': 0.000373925,
'Mclean': 0.00037345, 'Bradshaw': 0.000372746, 'Glass': 0.000372106,
'Middleton': 0.00037196, 'Buckley': 0.000371942,
'Schaefer': 0.000371549, 'Frost': 0.000370809, 'Howe': 0.000370562,
'House': 0.000369849, 'Mcintosh': 0.00036963, 'Ho': 0.000369265,
'Pennington': 0.000368588, 'Reilly': 0.000368324,
'Hebert': 0.000368077, 'Mcfarland': 0.00036772,
'Hickman': 0.000367538, 'Noble': 0.000367474, 'Spears': 0.000367346,
'Conrad': 0.000366423, 'Arias': 0.000366277, 'Galvan': 0.000365911,
'Velazquez': 0.000365765, 'Huynh': 0.000365591,
'Frederick': 0.000364659, 'Randolph': 0.000363134,
'Cantu': 0.000361845, 'Fitzpatrick': 0.000360931,
'Mahoney': 0.000360374, 'Peck': 0.000360301, 'Villa': 0.000360027,
'Michael': 0.000359725, 'Donovan': 0.000358821,
'Mcconnell': 0.000358209, 'Walls': 0.00035787, 'Boyle': 0.000357642,
'Mayer': 0.000357368, 'Zuniga': 0.000356875, 'Giles': 0.000356372,
'Pineda': 0.000356345, 'Pace': 0.000356125, 'Hurley': 0.000356089,
'Mays': 0.000355568, 'Mcmillan': 0.000355403, 'Crosby': 0.000354928,
'Ayers': 0.000354855, 'Case': 0.000354152, 'Bentley': 0.00035374,
'Shepard': 0.000353658, 'Everett': 0.000353631, 'Pugh': 0.00035353,
'David': 0.000353238, 'Mcmahon': 0.000352306, 'Dunlap': 0.000351931,
'Bender': 0.000351456, 'Hahn': 0.000350451, 'Harding': 0.000350323,
'Acevedo': 0.000349336, 'Raymond': 0.00034866,
'Blackburn': 0.000348468, 'Duffy': 0.000346869, 'Landry': 0.00034686,
'Dougherty': 0.00034633, 'Bautista': 0.000345818, 'Shah': 0.00034569,
'Potts': 0.000344356, 'Arroyo': 0.000344274, 'Valentine': 0.000344192,
'Meza': 0.000344128, 'Gould': 0.00034411, 'Vaughan': 0.000343479,
'Fry': 0.000343032, 'Rush': 0.000342374, 'Avery': 0.0003421,
'Herring': 0.000341305, 'Dodson': 0.000340802,
'Clements': 0.000340245, 'Sampson': 0.000340217, 'Tapia': 0.000339916,
'Bean': 0.000339404, 'Lynn': 0.000339221, 'Crane': 0.000339203,
'Farley': 0.000339139, 'Cisneros': 0.000338536, 'Benton': 0.000338372,
'Ashley': 0.000338271, 'Mckay': 0.000337604, 'Finley': 0.000336928,
'Best': 0.000336818, 'Blevins': 0.000336626, 'Friedman': 0.000336553,
'Moses': 0.00033638, 'Sosa': 0.00033637, 'Blanchard': 0.000335923,
'Huber': 0.000335603, 'Frye': 0.000335484, 'Krueger': 0.000335283,
'Bernard': 0.000333931, 'Rosario': 0.000333867, 'Rubio': 0.000333794,
'Mullen': 0.000332981, 'Benjamin': 0.000332953, 'Haley': 0.000332898,
'Chung': 0.000332798, 'Moyer': 0.000332789, 'Choi': 0.000332505,
'Horne': 0.000331573, 'Yu': 0.000331546, 'Woodward': 0.000331153,
'Ali': 0.000329664, 'Nixon': 0.00032928, 'Hayden': 0.000329161,
'Rivers': 0.000328759, 'Estes': 0.000327471, 'Mccarty': 0.000326365,
'Richmond': 0.000326338, 'Stuart': 0.00032621, 'Maynard': 0.000325726,
'Brandt': 0.000325433, 'Oconnell': 0.000325378, 'Hanna': 0.000325278,
'Sanford': 0.000324967, 'Sheppard': 0.000324867, 'Church': 0.00032473,
'Burch': 0.000324565, 'Levy': 0.000324044, 'Rasmussen': 0.000323944,
'Coffey': 0.000323843, 'Ponce': 0.000323459, 'Faulkner': 0.000323359,
'Donaldson': 0.000323341, 'Schmitt': 0.000322783,
'Novak': 0.000322381, 'Costa': 0.000321879, 'Montes': 0.000321595,
'Booker': 0.000320727, 'Cordova': 0.000320481, 'Waller': 0.000319814,
'Arellano': 0.000319795, 'Maddox': 0.00031953, 'Mata': 0.000318781,
'Bonilla': 0.000318196, 'Stanton': 0.000318087,
'Compton': 0.000317867, 'Kaufman': 0.000317849, 'Dudley': 0.000317703,
'Mcpherson': 0.000317639, 'Beltran': 0.000317392,
'Dickson': 0.000317045, 'Mccann': 0.00031699, 'Villegas': 0.000316917,
'Proctor': 0.000316899, 'Hester': 0.000316835,
'Cantrell': 0.000316826, 'Daugherty': 0.000316607,
'Cherry': 0.000316287, 'Bray': 0.000315921, 'Davila': 0.000315611,
'Rowland': 0.000315218, 'Madden': 0.00031498, 'Levine': 0.00031498,
'Spence': 0.000314642, 'Good': 0.000314596, 'Irwin': 0.000314085,
'Werner': 0.000313884, 'Krause': 0.00031382, 'Petty': 0.000313207,
'Whitney': 0.000312961, 'Baird': 0.000312796, 'Hooper': 0.000311435,
'Pollard': 0.000311389, 'Zavala': 0.000311289, 'Jarvis': 0.000311124,
'Holden': 0.000311042, 'Hendrix': 0.00031096, 'Haas': 0.00031096,
'Mcgrath': 0.000310951, 'Bird': 0.00031032, 'Lucero': 0.000309955,
'Terrell': 0.000309882, 'Riggs': 0.000309461, 'Joyce': 0.000309233,
'Rollins': 0.000308812, 'Mercer': 0.000308812,
'Galloway': 0.000308593, 'Duke': 0.000308337, 'Odom': 0.000308081,
'Andersen': 0.000306172, 'Downs': 0.000306044, 'Hatfield': 0.00030577,
'Benitez': 0.00030556, 'Archer': 0.000305285, 'Huerta': 0.00030471,
'Travis': 0.000304628, 'Mcneil': 0.000303714, 'Hinton': 0.00030344,
'Zhang': 0.000303376, 'Hays': 0.000303303, 'Mayo': 0.000302681,
'Fritz': 0.000302151, 'Branch': 0.000301896, 'Mooney': 0.000301101,
'Ewing': 0.000300845, 'Ritter': 0.000300287, 'Esparza': 0.000299447,
'Frey': 0.000299109, 'Braun': 0.00029857, 'Gay': 0.000298533,
'Riddle': 0.000298369, 'Haney': 0.000298277, 'Kaiser': 0.000297574,
'Holder': 0.000296651, 'Chaney': 0.000296349, 'Mcknight': 0.00029592,
'Gamble': 0.000295838, 'Vang': 0.000295435, 'Cooley': 0.000295015,
'Carney': 0.000294969, 'Cowan': 0.000294604, 'Forbes': 0.000294476,
'Ferrell': 0.000293983, 'Davies': 0.0002939, 'Barajas': 0.000293736,
'Shea': 0.000293023, 'Osborn': 0.000292795, 'Bright': 0.000292777,
'Cuevas': 0.00029253, 'Bolton': 0.000292347, 'Murillo': 0.000292064,
'Lutz': 0.000291845, 'Duarte': 0.000291442, 'Kidd': 0.000291351,
'Key': 0.000291315, 'Cooke': 0.000291114,
}
prefixes_female = {'Mrs.': 0.5, 'Ms.': 0.1, 'Miss': 0.1, 'Dr.': 0.3}
prefixes_male = {'Mr.': 0.7, 'Dr.': 0.3}
suffixes_female = {'MD': 0.5, 'DDS': 0.3, 'PhD': 0.1, 'DVM': 0.2}
# Removed Sr and I as they'd almost never be part of legal names.
suffixes_male = {
'Jr.': 0.2, 'II': 0.05, 'III': 0.03, 'IV': 0.015, 'V': 0.005,
'MD': 0.3, 'DDS': 0.2, 'PhD': 0.1, 'DVM': 0.1
}
| mit |
CodingCat/mxnet | python/mxnet/__init__.py | 15 | 2302 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""MXNet: a concise, fast and flexible framework for deep learning."""
from __future__ import absolute_import
from .context import Context, current_context, cpu, gpu
from . import engine
from .base import MXNetError
from . import base
from . import contrib
from . import ndarray
from . import ndarray as nd
from . import name
# use mx.sym as short for symbol
from . import symbol as sym
from . import symbol
from . import symbol_doc
from . import io
from . import recordio
from . import operator
# use mx.rnd as short for mx.random
from . import random as rnd
from . import random
from . import optimizer
from . import model
from . import notebook
from . import initializer
# use mx.init as short for mx.initializer
from . import initializer as init
from . import visualization
# use viz as short for mx.ndarray
from . import visualization as viz
from . import callback
# from . import misc
from . import lr_scheduler
# use mx.kv as short for kvstore
from . import kvstore as kv
from . import kvstore_server
# Runtime compile module
from . import rtc
# Attribute scope to add attributes to symbolic graphs
from .attribute import AttrScope
from . import monitor
from . import monitor as mon
from . import torch
from . import torch as th
from . import profiler
from . import log
from . import module
from . import module as mod
from . import image
from . import image as img
from . import test_utils
from . import rnn
from . import gluon
__version__ = base.__version__
| apache-2.0 |
bblacey/FreeCAD-MacOS-CI | src/Mod/PartDesign/Scripts/Spring.py | 38 | 1813 | #! python
# -*- coding: utf-8 -*-
# (c) 2011 Adrian Przekwas LGPL
from __future__ import division # allows floating point division from integers
import FreeCAD, Part
from FreeCAD import Base
class MySpring:
def __init__(self, obj):
''' Add the properties: Pitch, Diameter, Height, BarDiameter '''
obj.addProperty("App::PropertyLength","Pitch","MySpring","Pitch of the helix").Pitch=5.0
obj.addProperty("App::PropertyLength","Diameter","MySpring","Diameter of the helix").Diameter=6.0
obj.addProperty("App::PropertyLength","Height","MySpring","Height of the helix").Height=30.0
obj.addProperty("App::PropertyLength","BarDiameter","MySpring","Diameter of the bar").BarDiameter=3.0
obj.Proxy = self
def onChanged(self, fp, prop):
if prop == "Pitch" or prop == "Diameter" or prop == "Height" or prop == "BarDiameter":
self.execute(fp)
def execute(self, fp):
pitch = fp.Pitch
radius = fp.Diameter/2
height = fp.Height
barradius = fp.BarDiameter/2
myhelix=Part.makeHelix(pitch,height,radius)
g=myhelix.Edges[0].Curve
c=Part.Circle()
c.Center=g.value(0) # start point of the helix
c.Axis=(0,1,0)
c.Radius=barradius
p=c.toShape()
section = Part.Wire([p])
makeSolid=1 #change to 1 to make a solid
isFrenet=1
myspring=Part.Wire(myhelix).makePipeShell([section],makeSolid,isFrenet)
fp.Shape = myspring
def makeMySpring():
doc = FreeCAD.activeDocument()
if doc == None:
doc = FreeCAD.newDocument()
spring=doc.addObject("Part::FeaturePython","My_Spring")
spring.Label = "My Spring"
MySpring(spring)
spring.ViewObject.Proxy=0
doc.recompute()
if __name__ == "__main__":
makeMySpring()
| lgpl-2.1 |
ArcherSys/ArcherSys | Lib/site-packages/nbconvert/exporters/notebook.py | 3 | 1078 | """NotebookExporter class"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from .exporter import Exporter
import nbformat
from traitlets import Enum
class NotebookExporter(Exporter):
"""Exports to an IPython notebook."""
nbformat_version = Enum(list(nbformat.versions),
default_value=nbformat.current_nbformat,
config=True,
help="""The nbformat version to write.
Use this to downgrade notebooks.
"""
)
def _file_extension_default(self):
return '.ipynb'
output_mimetype = 'application/json'
def from_notebook_node(self, nb, resources=None, **kw):
nb_copy, resources = super(NotebookExporter, self).from_notebook_node(nb, resources, **kw)
if self.nbformat_version != nb_copy.nbformat:
resources['output_suffix'] = '.v%i' % self.nbformat_version
else:
resources['output_suffix'] = '.nbconvert'
output = nbformat.writes(nb_copy, version=self.nbformat_version)
return output, resources
| mit |
eugene1g/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/queues.py | 116 | 21137 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import logging
import os
import sys
import time
import traceback
from datetime import datetime
from optparse import make_option
from StringIO import StringIO
from webkitpy.common.config.committervalidator import CommitterValidator
from webkitpy.common.config.ports import DeprecatedPort
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.net.statusserver import StatusServer
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.botinfo import BotInfo
from webkitpy.tool.bot.commitqueuetask import CommitQueueTask, CommitQueueTaskDelegate
from webkitpy.tool.bot.expectedfailures import ExpectedFailures
from webkitpy.tool.bot.feeders import CommitQueueFeeder, EWSFeeder
from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter
from webkitpy.tool.bot.layouttestresultsreader import LayoutTestResultsReader
from webkitpy.tool.bot.patchanalysistask import UnableToApplyPatch
from webkitpy.tool.bot.queueengine import QueueEngine, QueueEngineDelegate
from webkitpy.tool.bot.stylequeuetask import StyleQueueTask, StyleQueueTaskDelegate
from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
from webkitpy.tool.multicommandtool import Command, TryAgain
_log = logging.getLogger(__name__)
class AbstractQueue(Command, QueueEngineDelegate):
watchers = [
]
_pass_status = "Pass"
_fail_status = "Fail"
_retry_status = "Retry"
_error_status = "Error"
def __init__(self, options=None): # Default values should never be collections (like []) as default values are shared between invocations
options_list = (options or []) + [
make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"),
make_option("--exit-after-iteration", action="store", type="int", dest="iterations", default=None, help="Stop running the queue after iterating this number of times."),
]
self.help_text = "Run the %s" % self.name
Command.__init__(self, options=options_list)
self._iteration_count = 0
def _cc_watchers(self, bug_id):
try:
self._tool.bugs.add_cc_to_bug(bug_id, self.watchers)
except Exception, e:
traceback.print_exc()
_log.error("Failed to CC watchers.")
def run_webkit_patch(self, args):
webkit_patch_args = [self._tool.path()]
# FIXME: This is a hack, we should have a more general way to pass global options.
# FIXME: We must always pass global options and their value in one argument
# because our global option code looks for the first argument which does
# not begin with "-" and assumes that is the command name.
webkit_patch_args += ["--status-host=%s" % self._tool.status_server.host]
if self._tool.status_server.bot_id:
webkit_patch_args += ["--bot-id=%s" % self._tool.status_server.bot_id]
if self._options.port:
webkit_patch_args += ["--port=%s" % self._options.port]
webkit_patch_args.extend(args)
try:
args_for_printing = list(webkit_patch_args)
args_for_printing[0] = 'webkit-patch' # Printing our path for each log is redundant.
_log.info("Running: %s" % self._tool.executive.command_for_printing(args_for_printing))
command_output = self._tool.executive.run_command(webkit_patch_args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
# Make sure the whole output gets printed if the command failed.
_log.error(e.message_with_output(output_limit=None))
raise
return command_output
def _log_directory(self):
return os.path.join("..", "%s-logs" % self.name)
# QueueEngineDelegate methods
def queue_log_path(self):
return os.path.join(self._log_directory(), "%s.log" % self.name)
def work_item_log_path(self, work_item):
raise NotImplementedError, "subclasses must implement"
def begin_work_queue(self):
_log.info("CAUTION: %s will discard all local changes in \"%s\"" % (self.name, self._tool.scm().checkout_root))
if self._options.confirm:
response = self._tool.user.prompt("Are you sure? Type \"yes\" to continue: ")
if (response != "yes"):
_log.error("User declined.")
sys.exit(1)
_log.info("Running WebKit %s." % self.name)
self._tool.status_server.update_status(self.name, "Starting Queue")
def stop_work_queue(self, reason):
self._tool.status_server.update_status(self.name, "Stopping Queue, reason: %s" % reason)
def should_continue_work_queue(self):
self._iteration_count += 1
return not self._options.iterations or self._iteration_count <= self._options.iterations
def next_work_item(self):
raise NotImplementedError, "subclasses must implement"
def process_work_item(self, work_item):
raise NotImplementedError, "subclasses must implement"
def handle_unexpected_error(self, work_item, message):
raise NotImplementedError, "subclasses must implement"
# Command methods
def execute(self, options, args, tool, engine=QueueEngine):
self._options = options # FIXME: This code is wrong. Command.options is a list, this assumes an Options element!
self._tool = tool # FIXME: This code is wrong too! Command.bind_to_tool handles this!
return engine(self.name, self, self._tool.wakeup_event, self._options.seconds_to_sleep).run()
@classmethod
def _log_from_script_error_for_upload(cls, script_error, output_limit=None):
# We have seen request timeouts with app engine due to large
# log uploads. Trying only the last 512k.
if not output_limit:
output_limit = 512 * 1024 # 512k
output = script_error.message_with_output(output_limit=output_limit)
# We pre-encode the string to a byte array before passing it
# to status_server, because ClientForm (part of mechanize)
# wants a file-like object with pre-encoded data.
return StringIO(output.encode("utf-8"))
@classmethod
def _update_status_for_script_error(cls, tool, state, script_error, is_error=False):
message = str(script_error)
if is_error:
message = "Error: %s" % message
failure_log = cls._log_from_script_error_for_upload(script_error)
return tool.status_server.update_status(cls.name, message, state["patch"], failure_log)
class FeederQueue(AbstractQueue):
name = "feeder-queue"
_sleep_duration = 30 # seconds
# AbstractQueue methods
def begin_work_queue(self):
AbstractQueue.begin_work_queue(self)
self.feeders = [
CommitQueueFeeder(self._tool),
EWSFeeder(self._tool),
]
def next_work_item(self):
# This really show inherit from some more basic class that doesn't
# understand work items, but the base class in the heirarchy currently
# understands work items.
return "synthetic-work-item"
def process_work_item(self, work_item):
for feeder in self.feeders:
feeder.feed()
time.sleep(self._sleep_duration)
return True
def work_item_log_path(self, work_item):
return None
def handle_unexpected_error(self, work_item, message):
_log.error(message)
class AbstractPatchQueue(AbstractQueue):
def _update_status(self, message, patch=None, results_file=None):
return self._tool.status_server.update_status(self.name, message, patch, results_file)
def _next_patch(self):
# FIXME: Bugzilla accessibility should be checked here; if it's unaccessible,
# it should return None.
patch = None
while not patch:
patch_id = self._tool.status_server.next_work_item(self.name)
if not patch_id:
return None
patch = self._tool.bugs.fetch_attachment(patch_id)
if not patch:
# FIXME: Using a fake patch because release_work_item has the wrong API.
# We also don't really need to release the lock (although that's fine),
# mostly we just need to remove this bogus patch from our queue.
# If for some reason bugzilla is just down, then it will be re-fed later.
fake_patch = Attachment({'id': patch_id}, None)
self._release_work_item(fake_patch)
return patch
def _release_work_item(self, patch):
self._tool.status_server.release_work_item(self.name, patch)
def _did_pass(self, patch):
self._update_status(self._pass_status, patch)
self._release_work_item(patch)
def _did_fail(self, patch):
self._update_status(self._fail_status, patch)
self._release_work_item(patch)
def _did_retry(self, patch):
self._update_status(self._retry_status, patch)
self._release_work_item(patch)
def _did_error(self, patch, reason):
message = "%s: %s" % (self._error_status, reason)
self._update_status(message, patch)
self._release_work_item(patch)
def work_item_log_path(self, patch):
return os.path.join(self._log_directory(), "%s.log" % patch.bug_id())
# Used to share code between the EWS and commit-queue.
class PatchProcessingQueue(AbstractPatchQueue):
# Subclasses must override.
port_name = None
def __init__(self, options=None):
self._port = None # We can't instantiate port here because tool isn't avaialble.
AbstractPatchQueue.__init__(self, options)
# FIXME: This is a hack to map between the old port names and the new port names.
def _new_port_name_from_old(self, port_name, platform):
# ApplePort.determine_full_port_name asserts if the name doesn't include version.
if port_name == 'mac':
return 'mac-' + platform.os_version
if port_name == 'win':
return 'win-future'
return port_name
def begin_work_queue(self):
AbstractPatchQueue.begin_work_queue(self)
if not self.port_name:
return
# FIXME: This is only used for self._deprecated_port.flag()
self._deprecated_port = DeprecatedPort.port(self.port_name)
# FIXME: This violates abstraction
self._tool._deprecated_port = self._deprecated_port
self._port = self._tool.port_factory.get(self._new_port_name_from_old(self.port_name, self._tool.platform))
def _upload_results_archive_for_patch(self, patch, results_archive_zip):
if not self._port:
self._port = self._tool.port_factory.get(self._new_port_name_from_old(self.port_name, self._tool.platform))
bot_id = self._tool.status_server.bot_id or "bot"
description = "Archive of layout-test-results from %s for %s" % (bot_id, self._port.name())
# results_archive is a ZipFile object, grab the File object (.fp) to pass to Mechanize for uploading.
results_archive_file = results_archive_zip.fp
# Rewind the file object to start (since Mechanize won't do that automatically)
# See https://bugs.webkit.org/show_bug.cgi?id=54593
results_archive_file.seek(0)
# FIXME: This is a small lie to always say run-webkit-tests since Chromium uses new-run-webkit-tests.
# We could make this code look up the test script name off the port.
comment_text = "The attached test failures were seen while running run-webkit-tests on the %s.\n" % (self.name)
# FIXME: We could easily list the test failures from the archive here,
# currently callers do that separately.
comment_text += BotInfo(self._tool, self._port.name()).summary_text()
self._tool.bugs.add_attachment_to_bug(patch.bug_id(), results_archive_file, description, filename="layout-test-results.zip", comment_text=comment_text)
class CommitQueue(PatchProcessingQueue, StepSequenceErrorHandler, CommitQueueTaskDelegate):
name = "commit-queue"
port_name = "mac-mountainlion"
# AbstractPatchQueue methods
def begin_work_queue(self):
PatchProcessingQueue.begin_work_queue(self)
self.committer_validator = CommitterValidator(self._tool)
self._expected_failures = ExpectedFailures()
self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._port.results_directory(), self._log_directory())
def next_work_item(self):
return self._next_patch()
def process_work_item(self, patch):
self._cc_watchers(patch.bug_id())
task = CommitQueueTask(self, patch)
try:
if task.run():
self._did_pass(patch)
return True
self._did_retry(patch)
except ScriptError, e:
validator = CommitterValidator(self._tool)
validator.reject_patch_from_commit_queue(patch.id(), self._error_message_for_bug(task, patch, e))
results_archive = task.results_archive_from_patch_test_run(patch)
if results_archive:
self._upload_results_archive_for_patch(patch, results_archive)
self._did_fail(patch)
def _failing_tests_message(self, task, patch):
results = task.results_from_patch_test_run(patch)
unexpected_failures = self._expected_failures.unexpected_failures_observed(results)
if not unexpected_failures:
return None
return "New failing tests:\n%s" % "\n".join(unexpected_failures)
def _error_message_for_bug(self, task, patch, script_error):
message = self._failing_tests_message(task, patch)
if not message:
message = script_error.message_with_output()
results_link = self._tool.status_server.results_url_for_status(task.failure_status_id)
return "%s\nFull output: %s" % (message, results_link)
def handle_unexpected_error(self, patch, message):
self.committer_validator.reject_patch_from_commit_queue(patch.id(), message)
# CommitQueueTaskDelegate methods
def run_command(self, command):
self.run_webkit_patch(command + [self._deprecated_port.flag()])
def command_passed(self, message, patch):
self._update_status(message, patch=patch)
def command_failed(self, message, script_error, patch):
failure_log = self._log_from_script_error_for_upload(script_error)
return self._update_status(message, patch=patch, results_file=failure_log)
def expected_failures(self):
return self._expected_failures
def test_results(self):
return self._layout_test_results_reader.results()
def archive_last_test_results(self, patch):
return self._layout_test_results_reader.archive(patch)
def build_style(self):
return "release"
def refetch_patch(self, patch):
return self._tool.bugs.fetch_attachment(patch.id())
def report_flaky_tests(self, patch, flaky_test_results, results_archive=None):
reporter = FlakyTestReporter(self._tool, self.name)
reporter.report_flaky_tests(patch, flaky_test_results, results_archive)
def did_pass_testing_ews(self, patch):
# Only Mac and Mac WK2 run tests
# FIXME: We shouldn't have to hard-code it here.
patch_status = self._tool.status_server.patch_status
return patch_status("mac-ews", patch.id()) == self._pass_status or patch_status("mac-wk2-ews", patch.id()) == self._pass_status
# StepSequenceErrorHandler methods
@classmethod
def handle_script_error(cls, tool, state, script_error):
# Hitting this error handler should be pretty rare. It does occur,
# however, when a patch no longer applies to top-of-tree in the final
# land step.
_log.error(script_error.message_with_output())
@classmethod
def handle_checkout_needs_update(cls, tool, state, options, error):
message = "Tests passed, but commit failed (checkout out of date). Updating, then landing without building or re-running tests."
tool.status_server.update_status(cls.name, message, state["patch"])
# The only time when we find out that out checkout needs update is
# when we were ready to actually pull the trigger and land the patch.
# Rather than spinning in the master process, we retry without
# building or testing, which is much faster.
options.build = False
options.test = False
options.update = True
raise TryAgain()
class AbstractReviewQueue(PatchProcessingQueue, StepSequenceErrorHandler):
"""This is the base-class for the EWS queues and the style-queue."""
def __init__(self, options=None):
PatchProcessingQueue.__init__(self, options)
def review_patch(self, patch):
raise NotImplementedError("subclasses must implement")
# AbstractPatchQueue methods
def begin_work_queue(self):
PatchProcessingQueue.begin_work_queue(self)
def next_work_item(self):
return self._next_patch()
def process_work_item(self, patch):
try:
if not self.review_patch(patch):
return False
self._did_pass(patch)
return True
except ScriptError, e:
if e.exit_code != QueueEngine.handled_error_code:
self._did_fail(patch)
else:
# The subprocess handled the error, but won't have released the patch, so we do.
# FIXME: We need to simplify the rules by which _release_work_item is called.
self._release_work_item(patch)
raise e
def handle_unexpected_error(self, patch, message):
_log.error(message)
# StepSequenceErrorHandler methods
@classmethod
def handle_script_error(cls, tool, state, script_error):
_log.error(script_error.output)
class StyleQueue(AbstractReviewQueue, StyleQueueTaskDelegate):
name = "style-queue"
def __init__(self):
AbstractReviewQueue.__init__(self)
def review_patch(self, patch):
task = StyleQueueTask(self, patch)
if not task.validate():
self._did_error(patch, "%s did not process patch." % self.name)
return False
try:
return task.run()
except UnableToApplyPatch, e:
self._did_error(patch, "%s unable to apply patch." % self.name)
return False
except ScriptError, e:
message = "Attachment %s did not pass %s:\n\n%s\n\nIf any of these errors are false positives, please file a bug against check-webkit-style." % (patch.id(), self.name, e.output)
self._tool.bugs.post_comment_to_bug(patch.bug_id(), message, cc=self.watchers)
self._did_fail(patch)
return False
return True
# StyleQueueTaskDelegate methods
def run_command(self, command):
self.run_webkit_patch(command)
def command_passed(self, message, patch):
self._update_status(message, patch=patch)
def command_failed(self, message, script_error, patch):
failure_log = self._log_from_script_error_for_upload(script_error)
return self._update_status(message, patch=patch, results_file=failure_log)
def expected_failures(self):
return None
def refetch_patch(self, patch):
return self._tool.bugs.fetch_attachment(patch.id())
| bsd-3-clause |
KingxBanana/zulip | zproject/settings.py | 1 | 41223 | from __future__ import absolute_import
# Django settings for zulip project.
########################################################################
# Here's how settings for the Zulip project work:
#
# * settings.py contains non-site-specific and settings configuration
# for the Zulip Django app.
# * settings.py imports prod_settings.py, and any site-specific configuration
# belongs there. The template for prod_settings.py is prod_settings_template.py
#
# See http://zulip.readthedocs.io/en/latest/settings.html for more information
#
########################################################################
import os
import platform
import time
import sys
import six.moves.configparser
from zerver.lib.db import TimeTrackingConnection
import six
########################################################################
# INITIAL SETTINGS
########################################################################
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
config_file = six.moves.configparser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
# Whether this instance of Zulip is running in a production environment.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
DEVELOPMENT = not PRODUCTION
secrets_file = six.moves.configparser.RawConfigParser()
if PRODUCTION:
secrets_file.read("/etc/zulip/zulip-secrets.conf")
else:
secrets_file.read(os.path.join(DEPLOY_ROOT, "zproject/dev-secrets.conf"))
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_secret("secret_key")
# A shared secret, used to authenticate different parts of the app to each other.
SHARED_SECRET = get_secret("shared_secret")
# We use this salt to hash a user's email into a filename for their user-uploaded
# avatar. If this salt is discovered, attackers will only be able to determine
# that the owner of an email account has uploaded an avatar to Zulip, which isn't
# the end of the world. Don't use the salt where there is more security exposure.
AVATAR_SALT = get_secret("avatar_salt")
# SERVER_GENERATION is used to track whether the server has been
# restarted for triggering browser clients to reload.
SERVER_GENERATION = int(time.time())
if 'DEBUG' not in globals():
# Uncomment end of next line to test JS/CSS minification.
DEBUG = DEVELOPMENT # and platform.node() != 'your-machine'
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
# Detect whether we're running as a queue worker; this impacts the logging configuration.
if len(sys.argv) > 2 and sys.argv[0].endswith('manage.py') and sys.argv[1] == 'process_queue':
IS_WORKER = True
else:
IS_WORKER = False
# This is overridden in test_settings.py for the test suites
TEST_SUITE = False
# The new user tutorial is enabled by default, but disabled for client tests.
TUTORIAL_ENABLED = True
# This is overridden in test_settings.py for the test suites
CASPER_TESTS = False
# Import variables like secrets from the prod_settings file
# Import prod_settings after determining the deployment/machine type
if PRODUCTION:
from .prod_settings import *
else:
from .dev_settings import *
########################################################################
# DEFAULT VALUES FOR SETTINGS
########################################################################
# For any settings that are not defined in prod_settings.py,
# we want to initialize them to sane default
DEFAULT_SETTINGS = {'TWITTER_CONSUMER_KEY': '',
'TWITTER_CONSUMER_SECRET': '',
'TWITTER_ACCESS_TOKEN_KEY': '',
'TWITTER_ACCESS_TOKEN_SECRET': '',
'EMAIL_GATEWAY_PATTERN': '',
'EMAIL_GATEWAY_EXAMPLE': '',
'EMAIL_GATEWAY_BOT': None,
'EMAIL_GATEWAY_LOGIN': None,
'EMAIL_GATEWAY_PASSWORD': None,
'EMAIL_GATEWAY_IMAP_SERVER': None,
'EMAIL_GATEWAY_IMAP_PORT': None,
'EMAIL_GATEWAY_IMAP_FOLDER': None,
'EMAIL_GATEWAY_EXTRA_PATTERN_HACK': None,
'S3_KEY': '',
'S3_SECRET_KEY': '',
'S3_AVATAR_BUCKET': '',
'LOCAL_UPLOADS_DIR': None,
'MAX_FILE_UPLOAD_SIZE': 25,
'ERROR_REPORTING': True,
'STAGING_ERROR_NOTIFICATIONS': False,
'EVENT_LOGS_ENABLED': False,
'SAVE_FRONTEND_STACKTRACES': False,
'JWT_AUTH_KEYS': {},
'NAME_CHANGES_DISABLED': False,
'DEPLOYMENT_ROLE_NAME': "",
'RABBITMQ_HOST': 'localhost',
'RABBITMQ_USERNAME': 'zulip',
'MEMCACHED_LOCATION': '127.0.0.1:11211',
'RATE_LIMITING': True,
'REDIS_HOST': '127.0.0.1',
'REDIS_PORT': 6379,
# The following bots only exist in non-VOYAGER installs
'ERROR_BOT': None,
'NEW_USER_BOT': None,
'NAGIOS_STAGING_SEND_BOT': None,
'NAGIOS_STAGING_RECEIVE_BOT': None,
'APNS_CERT_FILE': None,
'APNS_KEY_FILE': None,
'APNS_SANDBOX': True,
'ANDROID_GCM_API_KEY': None,
'INITIAL_PASSWORD_SALT': None,
'FEEDBACK_BOT': 'feedback@zulip.com',
'FEEDBACK_BOT_NAME': 'Zulip Feedback Bot',
'ADMINS': '',
'SHARE_THE_LOVE': False,
'INLINE_IMAGE_PREVIEW': True,
'INLINE_URL_EMBED_PREVIEW': False,
'CAMO_URI': '',
'ENABLE_FEEDBACK': PRODUCTION,
'SEND_MISSED_MESSAGE_EMAILS_AS_USER': False,
'SERVER_EMAIL': None,
'FEEDBACK_EMAIL': None,
'WELCOME_EMAIL_SENDER': None,
'EMAIL_DELIVERER_DISABLED': False,
'ENABLE_GRAVATAR': True,
'DEFAULT_AVATAR_URI': '/static/images/default-avatar.png',
'AUTH_LDAP_SERVER_URI': "",
'EXTERNAL_URI_SCHEME': "https://",
'ZULIP_COM': False,
'SHOW_OSS_ANNOUNCEMENT': False,
'REGISTER_LINK_DISABLED': False,
'LOGIN_LINK_DISABLED': False,
'ABOUT_LINK_DISABLED': False,
'CUSTOM_LOGO_URL': None,
'VERBOSE_SUPPORT_OFFERS': False,
'STATSD_HOST': '',
'OPEN_REALM_CREATION': False,
'REALMS_HAVE_SUBDOMAINS': False,
'SUBDOMAINS_HOMEPAGE': False,
'ROOT_SUBDOMAIN_ALIASES': ["www"],
'REMOTE_POSTGRES_HOST': '',
'REMOTE_POSTGRES_SSLMODE': '',
# Default GOOGLE_CLIENT_ID to the value needed for Android auth to work
'GOOGLE_CLIENT_ID': '835904834568-77mtr5mtmpgspj9b051del9i9r5t4g4n.apps.googleusercontent.com',
'SOCIAL_AUTH_GITHUB_KEY': None,
'SOCIAL_AUTH_GITHUB_ORG_NAME': None,
'SOCIAL_AUTH_GITHUB_TEAM_ID': None,
'SOCIAL_AUTH_FIELDS_STORED_IN_SESSION': ['subdomain'],
'DBX_APNS_CERT_FILE': None,
'DBX_APNS_KEY_FILE': None,
'PERSONAL_ZMIRROR_SERVER': None,
'EXTRA_INSTALLED_APPS': [],
'DEFAULT_NEW_REALM_STREAMS': {
"social": {"description": "For socializing", "invite_only": False},
"general": {"description": "For general stuff", "invite_only": False},
"zulip": {"description": "For zulip stuff", "invite_only": False}
},
'REALM_CREATION_LINK_VALIDITY_DAYS': 7,
'TERMS_OF_SERVICE': None,
'TOS_VERSION': None,
'SYSTEM_ONLY_REALMS': {"zulip.com"},
'FIRST_TIME_TOS_TEMPLATE': None,
'USING_PGROONGA': False,
'POST_MIGRATION_CACHE_FLUSHING': False,
'ENABLE_FILE_LINKS': False,
'USE_WEBSOCKETS': True,
}
for setting_name, setting_val in six.iteritems(DEFAULT_SETTINGS):
if setting_name not in vars():
vars()[setting_name] = setting_val
# Extend ALLOWED_HOSTS with localhost (needed to RPC to Tornado).
ALLOWED_HOSTS += ['127.0.0.1', 'localhost']
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
REQUIRED_SETTINGS = [("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "zulip-admin@example.com"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
("NOREPLY_EMAIL_ADDRESS", "noreply@example.com"),
("DEFAULT_FROM_EMAIL", "Zulip <zulip@example.com>"),
("ALLOWED_HOSTS", ["*", '127.0.0.1', 'localhost']),
]
if ADMINS == "":
ADMINS = (("Zulip Administrator", ZULIP_ADMINISTRATOR),)
MANAGERS = ADMINS
# Voyager is a production zulip server that is not zulip.com or
# staging.zulip.com VOYAGER is the standalone all-on-one-server
# production deployment model for based on the original Zulip
# ENTERPRISE implementation. We expect most users of the open source
# project will be using VOYAGER=True in production.
VOYAGER = PRODUCTION and not ZULIP_COM
########################################################################
# STANDARD DJANGO SETTINGS
########################################################################
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# The ID, as an integer, of the current site in the django_site database table.
# This is used so that application data can hook into specific site(s) and a
# single database can manage content for multiple sites.
#
# We set this site's domain to 'zulip.com' in populate_db.
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
# this directory will be used to store logs for development environment
DEVELOPMENT_LOG_DIRECTORY = os.path.join(DEPLOY_ROOT, 'var', 'log')
# Make redirects work properly behind a reverse proxy
USE_X_FORWARDED_HOST = True
# List of callables that know how to import templates from various sources.
LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
if PRODUCTION:
# Template caching is a significant performance win in production.
LOADERS = [('django.template.loaders.cached.Loader', LOADERS)]
TEMPLATES = [
{
'BACKEND': 'zproject.jinja2.backends.Jinja2',
'DIRS': [
os.path.join(DEPLOY_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'environment': 'zproject.jinja2.environment',
'extensions': [
'jinja2.ext.i18n',
'jinja2.ext.autoescape',
'pipeline.jinja2.PipelineExtension',
],
'context_processors': [
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
'django.template.context_processors.i18n',
],
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(DEPLOY_ROOT, 'django_templates'),
],
'APP_DIRS': False,
'OPTIONS': {
'debug': DEBUG,
'loaders': LOADERS,
'context_processors': [
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
],
},
},
]
MIDDLEWARE_CLASSES = (
# Our logging middleware should be the first middleware item.
'zerver.middleware.TagRequests',
'zerver.middleware.LogRequests',
'zerver.middleware.JsonErrorHandler',
'zerver.middleware.RateLimitMiddleware',
'zerver.middleware.FlushDisplayRecipientCache',
'django.middleware.common.CommonMiddleware',
'zerver.middleware.SessionHostDomainMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ANONYMOUS_USER_ID = None
AUTH_USER_MODEL = "zerver.UserProfile"
TEST_RUNNER = 'zerver.lib.test_runner.Runner'
ROOT_URLCONF = 'zproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'zproject.wsgi.application'
# A site can include additional installed apps via the
# EXTRA_INSTALLED_APPS setting
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'confirmation',
'guardian',
'pipeline',
'zerver',
'social.apps.django_app.default',
]
if USING_PGROONGA:
INSTALLED_APPS += ['pgroonga']
INSTALLED_APPS += EXTRA_INSTALLED_APPS
ZILENCER_ENABLED = 'zilencer' in INSTALLED_APPS
# Base URL of the Tornado server
# We set it to None when running backend tests or populate_db.
# We override the port number when running frontend tests.
TORNADO_SERVER = 'http://127.0.0.1:9993'
RUNNING_INSIDE_TORNADO = False
########################################################################
# DATABASE CONFIGURATION
########################################################################
DATABASES = {"default": {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'zulip',
'USER': 'zulip',
'PASSWORD': '', # Authentication done via certificates
'HOST': '', # Host = '' => connect through a local socket
'SCHEMA': 'zulip',
'CONN_MAX_AGE': 600,
'OPTIONS': {
'connection_factory': TimeTrackingConnection
},
},
}
if DEVELOPMENT:
LOCAL_DATABASE_PASSWORD = get_secret("local_database_password")
DATABASES["default"].update({
'PASSWORD': LOCAL_DATABASE_PASSWORD,
'HOST': 'localhost'
})
elif REMOTE_POSTGRES_HOST != '':
DATABASES['default'].update({
'HOST': REMOTE_POSTGRES_HOST,
})
if get_secret("postgres_password") is not None:
DATABASES['default'].update({
'PASSWORD': get_secret("postgres_password"),
})
if REMOTE_POSTGRES_SSLMODE != '':
DATABASES['default']['OPTIONS']['sslmode'] = REMOTE_POSTGRES_SSLMODE
else:
DATABASES['default']['OPTIONS']['sslmode'] = 'verify-full'
if USING_PGROONGA:
# We need to have "pgroonga" schema before "pg_catalog" schema in
# the PostgreSQL search path, because "pgroonga" schema overrides
# the "@@" operator from "pg_catalog" schema, and "pg_catalog"
# schema is searched first if not specified in the search path.
# See also: http://www.postgresql.org/docs/current/static/runtime-config-client.html
pg_options = '-c search_path=%(SCHEMA)s,zulip,public,pgroonga,pg_catalog' % \
DATABASES['default']
DATABASES['default']['OPTIONS']['options'] = pg_options
########################################################################
# RABBITMQ CONFIGURATION
########################################################################
USING_RABBITMQ = True
RABBITMQ_PASSWORD = get_secret("rabbitmq_password")
########################################################################
# CACHING CONFIGURATION
########################################################################
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': MEMCACHED_LOCATION,
'TIMEOUT': 3600,
'OPTIONS': {
'verify_keys': True,
'tcp_nodelay': True,
'retry_timeout': 1,
}
},
'database': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'third_party_api_results',
# Basically never timeout. Setting to 0 isn't guaranteed
# to work, see https://code.djangoproject.com/ticket/9595
'TIMEOUT': 2000000000,
'OPTIONS': {
'MAX_ENTRIES': 100000000,
'CULL_FREQUENCY': 10,
}
},
}
########################################################################
# REDIS-BASED RATE LIMITING CONFIGURATION
########################################################################
RATE_LIMITING_RULES = [
(60, 100), # 100 requests max every minute
]
DEBUG_RATE_LIMITING = DEBUG
REDIS_PASSWORD = get_secret('redis_password')
########################################################################
# SECURITY SETTINGS
########################################################################
# Tell the browser to never send our cookies without encryption, e.g.
# when executing the initial http -> https redirect.
#
# Turn it off for local testing because we don't have SSL.
if PRODUCTION:
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
try:
# For get_updates hostname sharding.
domain = config_file.get('django', 'cookie_domain')
SESSION_COOKIE_DOMAIN = '.' + domain
CSRF_COOKIE_DOMAIN = '.' + domain
except six.moves.configparser.Error:
# Failing here is OK
pass
# Prevent Javascript from reading the CSRF token from cookies. Our code gets
# the token from the DOM, which means malicious code could too. But hiding the
# cookie will slow down some attackers.
CSRF_COOKIE_PATH = '/;HttpOnly'
CSRF_FAILURE_VIEW = 'zerver.middleware.csrf_failure'
if DEVELOPMENT:
# Use fast password hashing for creating testing users when not
# PRODUCTION. Saves a bunch of time.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher'
)
# Also we auto-generate passwords for the default users which you
# can query using ./manage.py print_initial_password
INITIAL_PASSWORD_SALT = get_secret("initial_password_salt")
########################################################################
# API/BOT SETTINGS
########################################################################
if "EXTERNAL_API_PATH" not in vars():
EXTERNAL_API_PATH = EXTERNAL_HOST + "/api"
EXTERNAL_API_URI = EXTERNAL_URI_SCHEME + EXTERNAL_API_PATH
SERVER_URI = EXTERNAL_URI_SCHEME + EXTERNAL_HOST
if "NAGIOS_BOT_HOST" not in vars():
NAGIOS_BOT_HOST = EXTERNAL_HOST
S3_KEY = get_secret("s3_key")
S3_SECRET_KEY = get_secret("s3_secret_key")
# GCM tokens are IP-whitelisted; if we deploy to additional
# servers you will need to explicitly add their IPs here:
# https://cloud.google.com/console/project/apps~zulip-android/apiui/credential
ANDROID_GCM_API_KEY = get_secret("android_gcm_api_key")
GOOGLE_OAUTH2_CLIENT_SECRET = get_secret('google_oauth2_client_secret')
DROPBOX_APP_KEY = get_secret("dropbox_app_key")
MAILCHIMP_API_KEY = get_secret("mailchimp_api_key")
# This comes from our mandrill accounts page
MANDRILL_API_KEY = get_secret("mandrill_api_key")
# Twitter API credentials
# Secrecy not required because its only used for R/O requests.
# Please don't make us go over our rate limit.
TWITTER_CONSUMER_KEY = get_secret("twitter_consumer_key")
TWITTER_CONSUMER_SECRET = get_secret("twitter_consumer_secret")
TWITTER_ACCESS_TOKEN_KEY = get_secret("twitter_access_token_key")
TWITTER_ACCESS_TOKEN_SECRET = get_secret("twitter_access_token_secret")
# These are the bots that Zulip sends automated messages as.
INTERNAL_BOTS = [{'var_name': 'NOTIFICATION_BOT',
'email_template': 'notification-bot@%s',
'name': 'Notification Bot'},
{'var_name': 'EMAIL_GATEWAY_BOT',
'email_template': 'emailgateway@%s',
'name': 'Email Gateway'},
{'var_name': 'NAGIOS_SEND_BOT',
'email_template': 'nagios-send-bot@%s',
'name': 'Nagios Send Bot'},
{'var_name': 'NAGIOS_RECEIVE_BOT',
'email_template': 'nagios-receive-bot@%s',
'name': 'Nagios Receive Bot'},
{'var_name': 'WELCOME_BOT',
'email_template': 'welcome-bot@%s',
'name': 'Welcome Bot'}]
if PRODUCTION:
INTERNAL_BOTS += [
{'var_name': 'NAGIOS_STAGING_SEND_BOT',
'email_template': 'nagios-staging-send-bot@%s',
'name': 'Nagios Staging Send Bot'},
{'var_name': 'NAGIOS_STAGING_RECEIVE_BOT',
'email_template': 'nagios-staging-receive-bot@%s',
'name': 'Nagios Staging Receive Bot'},
]
INTERNAL_BOT_DOMAIN = "zulip.com"
# Set the realm-specific bot names
for bot in INTERNAL_BOTS:
if vars().get(bot['var_name']) is None:
bot_email = bot['email_template'] % (INTERNAL_BOT_DOMAIN,)
vars()[bot['var_name']] = bot_email
if EMAIL_GATEWAY_PATTERN != "":
EMAIL_GATEWAY_EXAMPLE = EMAIL_GATEWAY_PATTERN % ("support+abcdefg",)
DEPLOYMENT_ROLE_KEY = get_secret("deployment_role_key")
if PRODUCTION:
FEEDBACK_TARGET = "https://zulip.com/api"
else:
FEEDBACK_TARGET = "http://localhost:9991/api"
########################################################################
# STATSD CONFIGURATION
########################################################################
# Statsd is not super well supported; if you want to use it you'll need
# to set STATSD_HOST and STATSD_PREFIX.
if STATSD_HOST != '':
INSTALLED_APPS += ['django_statsd']
STATSD_PORT = 8125
STATSD_CLIENT = 'django_statsd.clients.normal'
########################################################################
# CAMO HTTPS CACHE CONFIGURATION
########################################################################
if CAMO_URI != '':
# This needs to be synced with the Camo installation
CAMO_KEY = get_secret("camo_key")
########################################################################
# STATIC CONTENT AND MINIFICATION SETTINGS
########################################################################
STATIC_URL = '/static/'
# ZulipStorage is a modified version of PipelineCachedStorage,
# and, like that class, it inserts a file hash into filenames
# to prevent the browser from using stale files from cache.
#
# Unlike PipelineStorage, it requires the files to exist in
# STATIC_ROOT even for dev servers. So we only use
# ZulipStorage when not DEBUG.
# This is the default behavior from Pipeline, but we set it
# here so that urls.py can read it.
PIPELINE_ENABLED = not DEBUG
if DEBUG:
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
if PIPELINE_ENABLED:
STATIC_ROOT = os.path.abspath('prod-static/serve')
else:
STATIC_ROOT = os.path.abspath('static/')
else:
STATICFILES_STORAGE = 'zerver.storage.ZulipStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'pipeline.finders.PipelineFinder',
)
if PRODUCTION:
STATIC_ROOT = '/home/zulip/prod-static'
else:
STATIC_ROOT = os.path.abspath('prod-static/serve')
LOCALE_PATHS = (os.path.join(STATIC_ROOT, 'locale'),)
# We want all temporary uploaded files to be stored on disk.
FILE_UPLOAD_MAX_MEMORY_SIZE = 0
STATICFILES_DIRS = ['static/']
STATIC_HEADER_FILE = 'zerver/static_header.txt'
# To use minified files in dev, set PIPELINE_ENABLED = True. For the full
# cache-busting behavior, you must also set DEBUG = False.
#
# You will need to run update-prod-static after changing
# static files.
PIPELINE = {
'PIPELINE_ENABLED': PIPELINE_ENABLED,
'CSS_COMPRESSOR': 'pipeline.compressors.yui.YUICompressor',
'YUI_BINARY': '/usr/bin/env yui-compressor',
'STYLESHEETS': {
# If you add a style here, please update stylesheets()
# in frontend_tests/zjsunit/output.js as needed.
'activity': {
'source_filenames': ('styles/activity.css',),
'output_filename': 'min/activity.css'
},
'portico': {
'source_filenames': (
'third/zocial/zocial.css',
'styles/portico.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
),
'output_filename': 'min/portico.css'
},
# Two versions of the app CSS exist because of QTBUG-3467
'app-fontcompat': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'styles/components.css',
'styles/zulip.css',
'styles/settings.css',
'styles/subscriptions.css',
'styles/compose.css',
'styles/left-sidebar.css',
'styles/overlay.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/media.css',
# We don't want fonts.css on QtWebKit, so its omitted here
),
'output_filename': 'min/app-fontcompat.css'
},
'app': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'third/jquery-perfect-scrollbar/css/perfect-scrollbar.css',
'styles/components.css',
'styles/zulip.css',
'styles/settings.css',
'styles/subscriptions.css',
'styles/compose.css',
'styles/left-sidebar.css',
'styles/overlay.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
'styles/media.css',
),
'output_filename': 'min/app.css'
},
'common': {
'source_filenames': (
'third/bootstrap/css/bootstrap.css',
'third/bootstrap/css/bootstrap-btn.css',
'third/bootstrap/css/bootstrap-responsive.css',
),
'output_filename': 'min/common.css'
},
},
'JAVASCRIPT': {},
}
JS_SPECS = {
'common': {
'source_filenames': (
'node_modules/jquery/dist/jquery.js',
'third/underscore/underscore.js',
'js/blueslip.js',
'third/bootstrap/js/bootstrap.js',
'js/common.js',
),
'output_filename': 'min/common.js'
},
'signup': {
'source_filenames': (
'js/signup.js',
'node_modules/jquery-validation/dist/jquery.validate.js',
),
'output_filename': 'min/signup.js'
},
'api': {
'source_filenames': ('js/api.js',),
'output_filename': 'min/api.js'
},
'app_debug': {
'source_filenames': ('js/debug.js',),
'output_filename': 'min/app_debug.js'
},
'app': {
'source_filenames': [
'third/bootstrap-notify/js/bootstrap-notify.js',
'third/html5-formdata/formdata.js',
'node_modules/jquery-validation/dist/jquery.validate.js',
'node_modules/sockjs-client/sockjs.js',
'third/jquery-form/jquery.form.js',
'third/jquery-filedrop/jquery.filedrop.js',
'third/jquery-caret/jquery.caret.1.5.2.js',
'third/xdate/xdate.dev.js',
'third/spin/spin.js',
'third/jquery-mousewheel/jquery.mousewheel.js',
'third/jquery-throttle-debounce/jquery.ba-throttle-debounce.js',
'third/jquery-idle/jquery.idle.js',
'third/jquery-autosize/jquery.autosize.js',
'third/jquery-perfect-scrollbar/js/perfect-scrollbar.js',
'third/lazyload/lazyload.js',
'third/spectrum/spectrum.js',
'third/string-prototype-codepointat/codepointat.js',
'third/winchan/winchan.js',
'third/handlebars/handlebars.runtime.js',
'third/marked/lib/marked.js',
'templates/compiled.js',
'js/feature_flags.js',
'js/loading.js',
'js/util.js',
'js/dict.js',
'js/components.js',
'js/localstorage.js',
'js/channel.js',
'js/setup.js',
'js/unread_ui.js',
'js/muting.js',
'js/muting_ui.js',
'js/viewport.js',
'js/rows.js',
'js/people.js',
'js/unread.js',
'js/topic_list.js',
'js/pm_list.js',
'js/stream_list.js',
'js/filter.js',
'js/message_list_view.js',
'js/message_list.js',
'js/narrow.js',
'js/reload.js',
'js/compose_fade.js',
'js/fenced_code.js',
'js/echo.js',
'js/socket.js',
'js/compose.js',
'js/stream_color.js',
'js/admin.js',
'js/stream_data.js',
'js/subs.js',
'js/message_edit.js',
'js/condense.js',
'js/resize.js',
'js/floating_recipient_bar.js',
'js/ui.js',
'js/pointer.js',
'js/click_handlers.js',
'js/scroll_bar.js',
'js/gear_menu.js',
'js/copy_and_paste.js',
'js/popovers.js',
'js/typeahead_helper.js',
'js/search_suggestion.js',
'js/search.js',
'js/composebox_typeahead.js',
'js/navigate.js',
'js/hotkey.js',
'js/favicon.js',
'js/notifications.js',
'js/hashchange.js',
'js/invite.js',
'js/message_flags.js',
'js/alert_words.js',
'js/alert_words_ui.js',
'js/message_store.js',
'js/server_events.js',
'js/zulip.js',
'js/activity.js',
'js/colorspace.js',
'js/timerender.js',
'js/tutorial.js',
'js/templates.js',
'js/avatar.js',
'js/settings.js',
'js/tab_bar.js',
'js/emoji.js',
'js/referral.js',
'js/custom_markdown.js',
'js/bot_data.js',
# JS bundled by webpack is also included here if PIPELINE_ENABLED setting is true
],
'output_filename': 'min/app.js'
},
'activity': {
'source_filenames': (
'third/sorttable/sorttable.js',
),
'output_filename': 'min/activity.js'
},
# We also want to minify sockjs separately for the sockjs iframe transport
'sockjs': {
'source_filenames': ('node_modules/sockjs-client/sockjs.js',),
'output_filename': 'min/sockjs.min.js'
},
}
if PIPELINE_ENABLED:
# This is also done in test_settings.py, see comment there..
JS_SPECS['app']['source_filenames'].append('js/bundle.js')
app_srcs = JS_SPECS['app']['source_filenames']
########################################################################
# LOGGING SETTINGS
########################################################################
ZULIP_PATHS = [
("SERVER_LOG_PATH", "/var/log/zulip/server.log"),
("ERROR_FILE_LOG_PATH", "/var/log/zulip/errors.log"),
("MANAGEMENT_LOG_PATH", "/var/log/zulip/manage.log"),
("WORKER_LOG_PATH", "/var/log/zulip/workers.log"),
("PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.pickle"),
("JSON_PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.json"),
("EMAIL_MIRROR_LOG_PATH", "/var/log/zulip/email_mirror.log"),
("EMAIL_DELIVERER_LOG_PATH", "/var/log/zulip/email-deliverer.log"),
("LDAP_SYNC_LOG_PATH", "/var/log/zulip/sync_ldap_user_data.log"),
("QUEUE_ERROR_DIR", "/var/log/zulip/queue_error"),
("STATS_DIR", "/home/zulip/stats"),
("DIGEST_LOG_PATH", "/var/log/zulip/digest.log"),
("ANALYTICS_LOG_PATH", "/var/log/zulip/analytics.log"),
]
# The Event log basically logs most significant database changes,
# which can be useful for debugging.
if EVENT_LOGS_ENABLED:
ZULIP_PATHS.append(("EVENT_LOG_DIR", "/home/zulip/logs/event_log"))
else:
EVENT_LOG_DIR = None
for (var, path) in ZULIP_PATHS:
if DEVELOPMENT:
# if DEVELOPMENT, store these files in the Zulip checkout
path = os.path.join(DEVELOPMENT_LOG_DIRECTORY, os.path.basename(path))
# only `JSON_PERSISTENT_QUEUE_FILENAME` will be stored in `var`
if var == 'JSON_PERSISTENT_QUEUE_FILENAME':
path = os.path.join(os.path.join(DEPLOY_ROOT, 'var'), os.path.basename(path))
vars()[var] = path
ZULIP_WORKER_TEST_FILE = '/tmp/zulip-worker-test-file'
if IS_WORKER:
FILE_LOG_PATH = WORKER_LOG_PATH
else:
FILE_LOG_PATH = SERVER_LOG_PATH
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)-8s %(message)s'
}
},
'filters': {
'ZulipLimiter': {
'()': 'zerver.lib.logging_util.ZulipLimiter',
},
'EmailLimiter': {
'()': 'zerver.lib.logging_util.EmailLimiter',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'nop': {
'()': 'zerver.lib.logging_util.ReturnTrue',
},
'require_really_deployed': {
'()': 'zerver.lib.logging_util.RequireReallyDeployed',
},
},
'handlers': {
'zulip_admins': {
'level': 'ERROR',
'class': 'zerver.logging_handlers.AdminZulipHandler',
# For testing the handler delete the next line
'filters': ['ZulipLimiter', 'require_debug_false', 'require_really_deployed'],
'formatter': 'default'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': FILE_LOG_PATH,
},
'errors_file': {
'level': 'WARNING',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': ERROR_FILE_LOG_PATH,
},
},
'loggers': {
'': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': (['zulip_admins'] if ERROR_REPORTING else [] +
['console', 'file', 'errors_file']),
'level': 'INFO',
'propagate': False,
},
'zulip.requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.queue': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'WARNING',
'propagate': False,
},
'zulip.management': {
'handlers': ['file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'WARNING',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['file'],
'propagate': False,
},
## Uncomment the following to get all database queries logged to the console
# 'django.db': {
# 'handlers': ['console'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
}
}
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = '/'
# Client-side polling timeout for get_events, in milliseconds.
# We configure this here so that the client test suite can override it.
# We already kill the connection server-side with heartbeat events,
# but it's good to have a safety. This value should be greater than
# (HEARTBEAT_MIN_FREQ_SECS + 10)
POLL_TIMEOUT = 90 * 1000
# iOS App IDs
ZULIP_IOS_APP_ID = 'com.zulip.Zulip'
DBX_IOS_APP_ID = 'com.dropbox.Zulip'
########################################################################
# SSO AND LDAP SETTINGS
########################################################################
USING_APACHE_SSO = ('zproject.backends.ZulipRemoteUserBackend' in AUTHENTICATION_BACKENDS)
if len(AUTHENTICATION_BACKENDS) == 1 and (AUTHENTICATION_BACKENDS[0] ==
"zproject.backends.ZulipRemoteUserBackend"):
HOME_NOT_LOGGED_IN = "/accounts/login/sso"
ONLY_SSO = True
else:
HOME_NOT_LOGGED_IN = '/login'
ONLY_SSO = False
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipDummyBackend',)
POPULATE_PROFILE_VIA_LDAP = bool(AUTH_LDAP_SERVER_URI)
if POPULATE_PROFILE_VIA_LDAP and \
'zproject.backends.ZulipLDAPAuthBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipLDAPUserPopulator',)
else:
POPULATE_PROFILE_VIA_LDAP = 'zproject.backends.ZulipLDAPAuthBackend' in AUTHENTICATION_BACKENDS or POPULATE_PROFILE_VIA_LDAP
########################################################################
# GITHUB AUTHENTICATION SETTINGS
########################################################################
# SOCIAL_AUTH_GITHUB_KEY is set in /etc/zulip/settings.py
SOCIAL_AUTH_GITHUB_SECRET = get_secret('social_auth_github_secret')
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login/'
SOCIAL_AUTH_GITHUB_SCOPE = ['email']
SOCIAL_AUTH_GITHUB_ORG_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_ORG_SECRET = SOCIAL_AUTH_GITHUB_SECRET
SOCIAL_AUTH_GITHUB_TEAM_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_TEAM_SECRET = SOCIAL_AUTH_GITHUB_SECRET
########################################################################
# EMAIL SETTINGS
########################################################################
# If an email host is not specified, fail silently and gracefully
if not EMAIL_HOST and PRODUCTION:
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
elif DEVELOPMENT:
# In the dev environment, emails are printed to the run-dev.py console.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_PASSWORD = get_secret('email_password')
if EMAIL_GATEWAY_PASSWORD is None:
EMAIL_GATEWAY_PASSWORD = get_secret('email_gateway_password')
if vars().get("AUTH_LDAP_BIND_PASSWORD") is None:
AUTH_LDAP_BIND_PASSWORD = get_secret('auth_ldap_bind_password')
# Set the sender email address for Django traceback error reporting
if SERVER_EMAIL is None:
SERVER_EMAIL = DEFAULT_FROM_EMAIL
########################################################################
# MISC SETTINGS
########################################################################
if PRODUCTION:
# Filter out user data
DEFAULT_EXCEPTION_REPORTER_FILTER = 'zerver.filters.ZulipExceptionReporterFilter'
# This is a debugging option only
PROFILE_ALL_REQUESTS = False
CROSS_REALM_BOT_EMAILS = set(('feedback@zulip.com', 'notification-bot@zulip.com'))
| apache-2.0 |
dbs/schemaorg | lib/rdflib/plugins/stores/sparqlstore.py | 7 | 30545 | # -*- coding: utf-8 -*-
#
"""
This is an RDFLib store around Ivan Herman et al.'s SPARQL service wrapper.
This was first done in layer-cake, and then ported to RDFLib
"""
# Defines some SPARQL keywords
LIMIT = 'LIMIT'
OFFSET = 'OFFSET'
ORDERBY = 'ORDER BY'
import re
import collections
import urllib2
# import warnings
try:
from SPARQLWrapper import SPARQLWrapper, XML, POST, GET, URLENCODED, POSTDIRECTLY
except ImportError:
raise Exception(
"SPARQLWrapper not found! SPARQL Store will not work." +
"Install with 'easy_install SPARQLWrapper'")
import sys
if getattr(sys, 'pypy_version_info', None) is not None \
or sys.platform.startswith('java') \
or sys.version_info[:2] < (2, 6):
# import elementtree as etree
from elementtree import ElementTree
assert ElementTree
else:
try:
from xml.etree import ElementTree
assert ElementTree
except ImportError:
from elementtree import ElementTree
from rdflib.plugins.stores.regexmatching import NATIVE_REGEX
from rdflib.store import Store
from rdflib.query import Result
from rdflib import Variable, Namespace, BNode, URIRef, Literal
from rdflib.graph import DATASET_DEFAULT_GRAPH_ID
import httplib
import urlparse
class NSSPARQLWrapper(SPARQLWrapper):
nsBindings = {}
def setNamespaceBindings(self, bindings):
"""
A shortcut for setting namespace bindings that will be added
to the prolog of the query
@param bindings: A dictionary of prefixs to URIs
"""
self.nsBindings.update(bindings)
def setQuery(self, query):
"""
Set the SPARQL query text. Note: no check is done on the
validity of the query (syntax or otherwise) by this module,
except for testing the query type (SELECT, ASK, etc).
Syntax and validity checking is done by the SPARQL service itself.
@param query: query text
@type query: string
@bug: #2320024
"""
self.queryType = self._parseQueryType(query)
self.queryString = self.injectPrefixes(query)
def injectPrefixes(self, query):
return '\n'.join(
['\n'.join(['PREFIX %s: <%s>' % (key, val)
for key, val in self.nsBindings.items()]),
query])
BNODE_IDENT_PATTERN = re.compile('(?P<label>_\:[^\s]+)')
SPARQL_NS = Namespace('http://www.w3.org/2005/sparql-results#')
sparqlNsBindings = {u'sparql': SPARQL_NS}
ElementTree._namespace_map["sparql"] = SPARQL_NS
def TraverseSPARQLResultDOM(doc, asDictionary=False):
"""
Returns a generator over tuples of results
"""
# namespace handling in elementtree xpath sub-set is not pretty :(
vars = [Variable(v.attrib["name"]) for v in doc.findall(
'./{http://www.w3.org/2005/sparql-results#}head/' +
'{http://www.w3.org/2005/sparql-results#}variable')]
for result in doc.findall(
'./{http://www.w3.org/2005/sparql-results#}results/' +
'{http://www.w3.org/2005/sparql-results#}result'):
currBind = {}
values = []
for binding in result.findall(
'{http://www.w3.org/2005/sparql-results#}binding'):
varVal = binding.attrib["name"]
var = Variable(varVal)
term = CastToTerm(binding.findall('*')[0])
values.append(term)
currBind[var] = term
if asDictionary:
yield currBind, vars
else:
def __locproc(values):
if len(values) == 1:
return values[0]
else:
return tuple(values)
yield __locproc(values), vars
def localName(qname):
# wtf - elementtree cant do this for me
return qname[qname.index("}") + 1:]
def CastToTerm(node):
"""
Helper function that casts XML node in SPARQL results
to appropriate rdflib term
"""
if node.tag == '{%s}bnode' % SPARQL_NS:
return BNode(node.text)
elif node.tag == '{%s}uri' % SPARQL_NS:
return URIRef(node.text)
elif node.tag == '{%s}literal' % SPARQL_NS:
value = node.text if node.text is not None else ''
if 'datatype' in node.attrib:
dT = URIRef(node.attrib['datatype'])
return Literal(value, datatype=dT)
elif '{http://www.w3.org/XML/1998/namespace}lang' in node.attrib:
return Literal(value, lang=node.attrib[
"{http://www.w3.org/XML/1998/namespace}lang"])
else:
return Literal(value)
else:
raise Exception('Unknown answer type')
class SPARQLStore(NSSPARQLWrapper, Store):
"""
An RDFLib store around a SPARQL endpoint
This is in theory context-aware and should work as expected
when a context is specified.
For ConjunctiveGraphs, reading is done from the "default graph". Exactly
what this means depends on your endpoint, because SPARQL does not offer a
simple way to query the union of all graphs as it would be expected for a
ConjuntiveGraph. This is why we recommend using Dataset instead, which is
motivated by the SPARQL 1.1.
Fuseki/TDB has a flag for specifying that the default graph
is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config).
.. warning:: The SPARQL Store does not support blank-nodes!
As blank-nodes act as variables in SPARQL queries
there is no way to query for a particular blank node.
See http://www.w3.org/TR/sparql11-query/#BGPsparqlBNodes
"""
formula_aware = False
transaction_aware = False
graph_aware = True
regex_matching = NATIVE_REGEX
def __init__(self,
endpoint=None, bNodeAsURI=False,
sparql11=True, context_aware=True,
**sparqlwrapper_kwargs):
"""
"""
super(SPARQLStore, self).__init__(endpoint, returnFormat=XML, **sparqlwrapper_kwargs)
self.setUseKeepAlive()
self.bNodeAsURI = bNodeAsURI
self.nsBindings = {}
self.sparql11 = sparql11
self.context_aware = context_aware
self.graph_aware = context_aware
# Database Management Methods
def create(self, configuration):
raise TypeError('The SPARQL store is read only')
def open(self, configuration, create=False):
"""
sets the endpoint URL for this SPARQLStore
if create==True an exception is thrown.
"""
if create:
raise Exception("Cannot create a SPARQL Endpoint")
self.query_endpoint = configuration
def __set_query_endpoint(self, queryEndpoint):
super(SPARQLStore, self).__init__(queryEndpoint, returnFormat=XML)
self.endpoint = queryEndpoint
def __get_query_endpoint(self):
return self.endpoint
query_endpoint = property(__get_query_endpoint, __set_query_endpoint)
def destroy(self, configuration):
raise TypeError('The SPARQL store is read only')
# Transactional interfaces
def commit(self):
raise TypeError('The SPARQL store is read only')
def rollback(self):
raise TypeError('The SPARQL store is read only')
def add(self, (subject, predicate, obj), context=None, quoted=False):
raise TypeError('The SPARQL store is read only')
def addN(self, quads):
raise TypeError('The SPARQL store is read only')
def remove(self, (subject, predicate, obj), context):
raise TypeError('The SPARQL store is read only')
def query(self, query,
initNs={},
initBindings={},
queryGraph=None,
DEBUG=False):
self.debug = DEBUG
assert isinstance(query, basestring)
self.setNamespaceBindings(initNs)
if initBindings:
if not self.sparql11:
raise Exception(
"initBindings not supported for SPARQL 1.0 Endpoints.")
v = list(initBindings)
# VALUES was added to SPARQL 1.1 on 2012/07/24
query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
% (" ".join("?" + str(x) for x in v),
" ".join(initBindings[x].n3() for x in v))
self.resetQuery()
if self._is_contextual(queryGraph):
self.addDefaultGraph(queryGraph)
self.setQuery(query)
return Result.parse(SPARQLWrapper.query(self).response)
def triples(self, (s, p, o), context=None):
"""
- tuple **(s, o, p)**
the triple used as filter for the SPARQL select.
(None, None, None) means anything.
- context **context**
the graph effectively calling this method.
Returns a tuple of triples executing essentially a SPARQL like
SELECT ?subj ?pred ?obj WHERE { ?subj ?pred ?obj }
**context** may include three parameter
to refine the underlying query:
* LIMIT: an integer to limit the number of results
* OFFSET: an integer to enable paging of results
* ORDERBY: an instance of Variable('s'), Variable('o') or Variable('p')
or, by default, the first 'None' from the given triple
.. warning::
- Using LIMIT or OFFSET automatically include ORDERBY otherwise this is
because the results are retrieved in a not deterministic way (depends on
the walking path on the graph)
- Using OFFSET without defining LIMIT will discard the first OFFSET - 1
results
``
a_graph.LIMIT = limit
a_graph.OFFSET = offset
triple_generator = a_graph.triples(mytriple):
#do something
#Removes LIMIT and OFFSET if not required for the next triple() calls
del a_graph.LIMIT
del a_graph.OFFSET
``
"""
if ( isinstance(s, BNode) or
isinstance(p, BNode) or
isinstance(o, BNode) ):
raise Exception("SPARQLStore does not support Bnodes! "
"See http://www.w3.org/TR/sparql11-query/#BGPsparqlBNodes")
vars = []
if not s:
s = Variable('s')
vars.append(s)
if not p:
p = Variable('p')
vars.append(p)
if not o:
o = Variable('o')
vars.append(o)
if vars:
v = ' '.join([term.n3() for term in vars])
else:
v = '*'
query = "SELECT %s WHERE { %s %s %s }" % \
(v, s.n3(), p.n3(), o.n3())
# The ORDER BY is necessary
if hasattr(context, LIMIT) or hasattr(context, OFFSET) \
or hasattr(context, ORDERBY):
var = None
if isinstance(s, Variable):
var = s
elif isinstance(p, Variable):
var = p
elif isinstance(o, Variable):
var = o
elif hasattr(context, ORDERBY) \
and isinstance(getattr(context, ORDERBY), Variable):
var = getattr(context, ORDERBY)
query = query + ' %s %s' % (ORDERBY, var.n3())
try:
query = query + ' LIMIT %s' % int(getattr(context, LIMIT))
except (ValueError, TypeError, AttributeError):
pass
try:
query = query + ' OFFSET %s' % int(getattr(context, OFFSET))
except (ValueError, TypeError, AttributeError):
pass
self.resetQuery()
if self._is_contextual(context):
self.addDefaultGraph(context.identifier)
self.setQuery(query)
doc = ElementTree.parse(SPARQLWrapper.query(self).response)
# ElementTree.dump(doc)
for rt, vars in TraverseSPARQLResultDOM(doc, asDictionary=True):
yield (rt.get(s, s),
rt.get(p, p),
rt.get(o, o)), None
def triples_choices(self, (subject, predicate, object_), context=None):
"""
A variant of triples that can take a list of terms instead of a
single term in any slot. Stores can implement this to optimize
the response time from the import default 'fallback' implementation,
which will iterate over each term in the list and dispatch to
triples.
"""
raise NotImplementedError('Triples choices currently not supported')
def __len__(self, context=None):
if not self.sparql11:
raise NotImplementedError(
"For performance reasons, this is not" +
"supported for sparql1.0 endpoints")
else:
self.resetQuery()
q = "SELECT (count(*) as ?c) WHERE {?s ?p ?o .}"
if self._is_contextual(context):
self.addDefaultGraph(context.identifier)
self.setQuery(q)
doc = ElementTree.parse(SPARQLWrapper.query(self).response)
rt, vars = iter(
TraverseSPARQLResultDOM(doc, asDictionary=True)).next()
return int(rt.get(Variable("c")))
def contexts(self, triple=None):
"""
Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }"
or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`.
Returns instances of this store with the SPARQL wrapper
object updated via addNamedGraph(?NAME).
This causes a named-graph-uri key / value pair to be sent over
the protocol.
Please note that some SPARQL endpoints are not able to find empty named
graphs.
"""
self.resetQuery()
if triple:
s, p, o = triple
params = ((s if s else Variable('s')).n3(),
(p if p else Variable('p')).n3(),
(o if o else Variable('o')).n3())
self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params)
else:
self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }')
doc = ElementTree.parse(SPARQLWrapper.query(self).response)
return (rt.get(Variable("name"))
for rt, vars in TraverseSPARQLResultDOM(doc, asDictionary=True))
# Namespace persistence interface implementation
def bind(self, prefix, namespace):
self.nsBindings[prefix] = namespace
def prefix(self, namespace):
""" """
return dict(
[(v, k) for k, v in self.nsBindings.items()]
).get(namespace)
def namespace(self, prefix):
return self.nsBindings.get(prefix)
def namespaces(self):
for prefix, ns in self.nsBindings.items():
yield prefix, ns
def add_graph(self, graph):
raise TypeError('The SPARQL store is read only')
def remove_graph(self, graph):
raise TypeError('The SPARQL store is read only')
def _is_contextual(self, graph):
""" Returns `True` if the "GRAPH" keyword must appear
in the final SPARQL query sent to the endpoint.
"""
if (not self.context_aware) or (graph is None):
return False
if isinstance(graph, basestring):
return graph != '__UNION__'
else:
return graph.identifier != DATASET_DEFAULT_GRAPH_ID
class SPARQLUpdateStore(SPARQLStore):
"""A store using SPARQL queries for reading and SPARQL Update for changes.
This can be context-aware, if so, any changes will be to the given named
graph only.
In favor of the SPARQL 1.1 motivated Dataset, we advise against using this
with ConjunctiveGraphs, as it reads and writes from and to the
"default graph". Exactly what this means depends on the endpoint and can
result in confusion.
For Graph objects, everything works as expected.
.. warning:: The SPARQL Update Store does not support blank-nodes!
As blank-nodes acts as variables in SPARQL queries
there is no way to query for a particular blank node.
See http://www.w3.org/TR/sparql11-query/#BGPsparqlBNodes
"""
where_pattern = re.compile(r"""(?P<where>WHERE\s*{)""", re.IGNORECASE)
##################################################################
### Regex for injecting GRAPH blocks into updates on a context ###
##################################################################
# Observations on the SPARQL grammar (http://www.w3.org/TR/2013/REC-sparql11-query-20130321/):
# 1. Only the terminals STRING_LITERAL1, STRING_LITERAL2,
# STRING_LITERAL_LONG1, STRING_LITERAL_LONG2, and comments can contain
# curly braces.
# 2. The non-terminals introduce curly braces in pairs only.
# 3. Unescaped " can occur only in strings and comments.
# 3. Unescaped ' can occur only in strings, comments, and IRIRefs.
# 4. \ always escapes the following character, especially \", \', and
# \\ denote literal ", ', and \ respectively.
# 5. # always starts a comment outside of string and IRI
# 6. A comment ends at the next newline
# 7. IRIREFs need to be detected, as they may contain # without starting a comment
# 8. PrefixedNames do not contain a #
# As a consequence, it should be rather easy to detect strings and comments
# in order to avoid unbalanced curly braces.
# From the SPARQL grammar
STRING_LITERAL1 = ur"'([^'\\]|\\.)*'"
STRING_LITERAL2 = ur'"([^"\\]|\\.)*"'
STRING_LITERAL_LONG1 = ur"'''(('|'')?([^'\\]|\\.))*'''"
STRING_LITERAL_LONG2 = ur'"""(("|"")?([^"\\]|\\.))*"""'
String = u'(%s)|(%s)|(%s)|(%s)' % (STRING_LITERAL1, STRING_LITERAL2, STRING_LITERAL_LONG1, STRING_LITERAL_LONG2)
IRIREF = ur'<([^<>"{}|^`\]\\\[\x00-\x20])*>'
COMMENT = ur'#[^\x0D\x0A]*([\x0D\x0A]|\Z)'
# Simplified grammar to find { at beginning and } at end of blocks
BLOCK_START = u'{'
BLOCK_END = u'}'
ESCAPED = ur'\\.'
# Match anything that doesn't start or end a block:
BlockContent = u'(%s)|(%s)|(%s)|(%s)' % (String, IRIREF, COMMENT, ESCAPED)
BlockFinding = u'(?P<block_start>%s)|(?P<block_end>%s)|(?P<block_content>%s)' % (BLOCK_START, BLOCK_END, BlockContent)
BLOCK_FINDING_PATTERN = re.compile(BlockFinding)
# Note that BLOCK_FINDING_PATTERN.finditer() will not cover the whole
# string with matches. Everything that is not matched will have to be
# part of the modified query as is.
##################################################################
def __init__(self,
queryEndpoint=None, update_endpoint=None,
bNodeAsURI=False, sparql11=True,
context_aware=True,
postAsEncoded=True, autocommit=True):
SPARQLStore.__init__(self,
queryEndpoint, bNodeAsURI, sparql11, context_aware, updateEndpoint=update_endpoint)
self.postAsEncoded = postAsEncoded
self.autocommit = autocommit
self._edits = None
def query(self,*args, **kwargs):
if not self.autocommit:
self.commit()
return SPARQLStore.query(self,*args, **kwargs)
def triples(self,*args, **kwargs):
if not self.autocommit:
self.commit()
return SPARQLStore.triples(self,*args, **kwargs)
def contexts(self,*args, **kwargs):
if not self.autocommit:
self.commit()
return SPARQLStore.contexts(self,*args, **kwargs)
def __len__(self,*args, **kwargs):
if not self.autocommit:
self.commit()
return SPARQLStore.__len__(self,*args, **kwargs)
def open(self, configuration, create=False):
"""
sets the endpoint URLs for this SPARQLStore
:param configuration: either a tuple of (queryEndpoint, update_endpoint),
or a string with the query endpoint
:param create: if True an exception is thrown.
"""
if create:
raise Exception("Cannot create a SPARQL Endpoint")
if isinstance(configuration, tuple):
self.endpoint = configuration[0]
if len(configuration) > 1:
self.updateEndpoint = configuration[1]
else:
self.endpoint = configuration
if not self.updateEndpoint:
self.updateEndpoint = self.endpoint
def _transaction(self):
if self._edits == None:
self._edits = []
return self._edits
def __set_update_endpoint(self, update_endpoint):
self.updateEndpoint = update_endpoint
def __get_update_endpoint(self):
return self.updateEndpoint
update_endpoint = property(
__get_update_endpoint,
__set_update_endpoint,
doc='the HTTP URL for the Update endpoint, typically' +
'something like http://server/dataset/update')
# Transactional interfaces
def commit(self):
""" add(), addN(), and remove() are transactional to reduce overhead of many small edits.
Read and update() calls will automatically commit any outstanding edits.
This should behave as expected most of the time, except that alternating writes
and reads can degenerate to the original call-per-triple situation that originally existed.
"""
if self._edits and len(self._edits) > 0:
r = self._do_update('\n;\n'.join(self._edits))
self._edits = None
return r
def rollback(self):
self._edits = None
def add(self, spo, context=None, quoted=False):
""" Add a triple to the store of triples. """
if not self.endpoint:
raise Exception("UpdateEndpoint is not set - call 'open'")
assert not quoted
(subject, predicate, obj) = spo
if ( isinstance(subject, BNode) or
isinstance(predicate, BNode) or
isinstance(obj, BNode) ):
raise Exception("SPARQLStore does not support Bnodes! "
"See http://www.w3.org/TR/sparql11-query/#BGPsparqlBNodes")
triple = "%s %s %s ." % (subject.n3(), predicate.n3(), obj.n3())
if self._is_contextual(context):
q = "INSERT DATA { GRAPH %s { %s } }" % (
context.identifier.n3(), triple)
else:
q = "INSERT DATA { %s }" % triple
self._transaction().append(q)
if self.autocommit:
self.commit()
def addN(self, quads):
""" Add a list of quads to the store. """
if not self.endpoint:
raise Exception("UpdateEndpoint is not set - call 'open'")
contexts = collections.defaultdict(list)
for subject, predicate, obj, context in quads:
contexts[context].append((subject,predicate,obj))
data = []
for context in contexts:
triples = ["%s %s %s ." % (x[0].n3(), x[1].n3(), x[2].n3()) for x in contexts[context]]
data.append("INSERT DATA { GRAPH <%s> { %s } }\n" % (context.identifier, '\n'.join(triples)))
self._transaction().extend(data)
if self.autocommit:
self.commit()
def remove(self, spo, context):
""" Remove a triple from the store """
if not self.endpoint:
raise Exception("UpdateEndpoint is not set - call 'open'")
(subject, predicate, obj) = spo
if not subject:
subject = Variable("S")
if not predicate:
predicate = Variable("P")
if not obj:
obj = Variable("O")
triple = "%s %s %s ." % (subject.n3(), predicate.n3(), obj.n3())
if self._is_contextual(context):
q = "DELETE { GRAPH %s { %s } } WHERE { GRAPH %s { %s } }" % (
context.identifier.n3(), triple,
context.identifier.n3(), triple)
else:
q = "DELETE { %s } WHERE { %s } " % (triple, triple)
self._transaction().append(q)
if self.autocommit:
self.commit()
def _do_update(self, update):
self.resetQuery()
self.setQuery(update)
self.setMethod(POST)
self.setRequestMethod(URLENCODED if self.postAsEncoded else POSTDIRECTLY)
result = SPARQLWrapper.query(self)
return result
def update(self, query,
initNs={},
initBindings={},
queryGraph=None,
DEBUG=False):
"""
Perform a SPARQL Update Query against the endpoint,
INSERT, LOAD, DELETE etc.
Setting initNs adds PREFIX declarations to the beginning of
the update. Setting initBindings adds inline VALUEs to the
beginning of every WHERE clause. By the SPARQL grammar, all
operations that support variables (namely INSERT and DELETE)
require a WHERE clause.
Important: initBindings fails if the update contains the
substring 'WHERE {' which does not denote a WHERE clause, e.g.
if it is part of a literal.
.. admonition:: Context-aware query rewriting
- **When:** If context-awareness is enabled and the graph is not the default graph of the store.
- **Why:** To ensure consistency with the :class:`~rdflib.plugins.memory.IOMemory` store.
The graph must except "local" SPARQL requests (requests with no GRAPH keyword)
like if it was the default graph.
- **What is done:** These "local" queries are rewritten by this store.
The content of each block of a SPARQL Update operation is wrapped in a GRAPH block
except if the block is empty.
This basically causes INSERT, INSERT DATA, DELETE, DELETE DATA and WHERE to operate
only on the context.
- **Example:** `"INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }"` is converted into
`"INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> } }"`.
- **Warning:** Queries are presumed to be "local" but this assumption is **not checked**.
For instance, if the query already contains GRAPH blocks, the latter will be wrapped in new GRAPH blocks.
- **Warning:** A simplified grammar is used that should tolerate
extensions of the SPARQL grammar. Still, the process may fail in
uncommon situations and produce invalid output.
"""
if not self.endpoint:
raise Exception("UpdateEndpoint is not set - call 'open'")
self.debug = DEBUG
assert isinstance(query, basestring)
self.setNamespaceBindings(initNs)
query = self.injectPrefixes(query)
if self._is_contextual(queryGraph):
query = self._insert_named_graph(query, queryGraph)
if initBindings:
# For INSERT and DELETE the WHERE clause is obligatory
# (http://www.w3.org/TR/2013/REC-sparql11-query-20130321/#rModify)
# Other query types do not allow variables and don't
# have a WHERE clause. This also works for updates with
# more than one INSERT/DELETE.
v = list(initBindings)
values = "\nVALUES ( %s )\n{ ( %s ) }\n"\
% (" ".join("?" + str(x) for x in v),
" ".join(initBindings[x].n3() for x in v))
query = self.where_pattern.sub("WHERE { " + values, query)
self._transaction().append(query)
if self.autocommit:
self.commit()
def _insert_named_graph(self, query, query_graph):
"""
Inserts GRAPH <query_graph> {} into blocks of SPARQL Update operations
For instance, "INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }"
is converted into
"INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> } }"
"""
graph_block_open = " GRAPH <%s> {" % query_graph
graph_block_close = "} "
# SPARQL Update supports the following operations:
# LOAD, CLEAR, DROP, ADD, MOVE, COPY, CREATE, INSERT DATA, DELETE DATA, DELETE/INSERT, DELETE WHERE
# LOAD, CLEAR, DROP, ADD, MOVE, COPY, CREATE do not make much sense in a context.
# INSERT DATA, DELETE DATA, and DELETE WHERE require the contents of their block to be wrapped in a GRAPH <?> { }.
# DELETE/INSERT supports the WITH keyword, which sets the graph to be
# used for all following DELETE/INSERT instruction including the
# non-optional WHERE block. Equivalently, a GRAPH block can be added to
# all blocks.
#
# Strategy employed here: Wrap the contents of every top-level block into a `GRAPH <?> { }`.
level = 0
modified_query = []
pos = 0
for match in self.BLOCK_FINDING_PATTERN.finditer(query):
if match.group('block_start') is not None:
level += 1
if level == 1:
modified_query.append(query[pos:match.end()])
modified_query.append(graph_block_open)
pos = match.end()
elif match.group('block_end') is not None:
if level == 1:
since_previous_pos = query[pos:match.start()]
if modified_query[-1] is graph_block_open and (since_previous_pos == "" or since_previous_pos.isspace()):
# In this case, adding graph_block_start and
# graph_block_end results in an empty GRAPH block. Some
# enpoints (e.g. TDB) can not handle this. Therefore
# remove the previously added block_start.
modified_query.pop()
modified_query.append(since_previous_pos)
else:
modified_query.append(since_previous_pos)
modified_query.append(graph_block_close)
pos = match.start()
level -= 1
modified_query.append(query[pos:])
return "".join(modified_query)
def add_graph(self, graph):
if not self.graph_aware:
Store.add_graph(self, graph)
elif graph.identifier != DATASET_DEFAULT_GRAPH_ID:
self.update("CREATE GRAPH <%s>" % graph.identifier)
def remove_graph(self, graph):
if not self.graph_aware:
Store.remove_graph(self, graph)
elif graph.identifier == DATASET_DEFAULT_GRAPH_ID:
self.update("DROP DEFAULT")
else:
self.update("DROP GRAPH <%s>" % graph.identifier)
| apache-2.0 |
paddyvishnubhatt/cryptocurrency | lib/flask/blueprints.py | 169 | 16872 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, ``None``
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprints.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None,
root_path=None):
_PackageBoundObject.__init__(self, import_name, template_folder,
root_path=root_path)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Non-decorator version of the :meth:`errorhandler` error attach
function, akin to the :meth:`~flask.Flask.register_error_handler`
application-wide function of the :class:`~flask.Flask` object but
for error handlers limited to this blueprint.
.. versionadded:: 0.11
"""
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
| apache-2.0 |
yangchandle/FlaskTaskr | env/lib/python3.5/site-packages/werkzeug/script.py | 30 | 11241 | # -*- coding: utf-8 -*-
r'''
werkzeug.script
~~~~~~~~~~~~~~~
.. admonition:: Deprecated Functionality
``werkzeug.script`` is deprecated without replacement functionality.
Python's command line support improved greatly with :mod:`argparse`
and a bunch of alternative modules.
Most of the time you have recurring tasks while writing an application
such as starting up an interactive python interpreter with some prefilled
imports, starting the development server, initializing the database or
something similar.
For that purpose werkzeug provides the `werkzeug.script` module which
helps you writing such scripts.
Basic Usage
-----------
The following snippet is roughly the same in every werkzeug script::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from werkzeug import script
# actions go here
if __name__ == '__main__':
script.run()
Starting this script now does nothing because no actions are defined.
An action is a function in the same module starting with ``"action_"``
which takes a number of arguments where every argument has a default. The
type of the default value specifies the type of the argument.
Arguments can then be passed by position or using ``--name=value`` from
the shell.
Because a runserver and shell command is pretty common there are two
factory functions that create such commands::
def make_app():
from yourapplication import YourApplication
return YourApplication(...)
action_runserver = script.make_runserver(make_app, use_reloader=True)
action_shell = script.make_shell(lambda: {'app': make_app()})
Using The Scripts
-----------------
The script from above can be used like this from the shell now:
.. sourcecode:: text
$ ./manage.py --help
$ ./manage.py runserver localhost 8080 --debugger --no-reloader
$ ./manage.py runserver -p 4000
$ ./manage.py shell
As you can see it's possible to pass parameters as positional arguments
or as named parameters, pretty much like Python function calls.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
'''
from __future__ import print_function
import sys
import inspect
import getopt
from os.path import basename
from werkzeug._compat import iteritems
argument_types = {
bool: 'boolean',
str: 'string',
int: 'integer',
float: 'float'
}
converters = {
'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
'string': str,
'integer': int,
'float': float
}
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError as e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in iteritems(arguments):
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments)
def fail(message, code=-1):
"""Fail with an error."""
print('Error: %s' % message, file=sys.stderr)
sys.exit(code)
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions
def print_usage(actions):
"""Print the usage information. (Help screen)"""
actions = sorted(iteritems(actions))
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
print(' %s --help' % basename(sys.argv[0]))
print()
print('actions:')
for name, (func, doc, arguments) in actions:
print(' %s:' % name)
for line in doc.splitlines():
print(' %s' % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print(' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
))
else:
print(' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
))
print()
def analyse_action(func):
"""Analyse a function."""
description = inspect.getdoc(func) or 'undocumented action'
arguments = []
args, varargs, kwargs, defaults = inspect.getargspec(func)
if varargs or kwargs:
raise TypeError('variable length arguments for action not allowed.')
if len(args) != len(defaults or ()):
raise TypeError('not all arguments have proper definitions')
for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
if arg.startswith('_'):
raise TypeError('arguments may not start with an underscore')
if not isinstance(definition, tuple):
shortcut = None
default = definition
else:
shortcut, default = definition
argument_type = argument_types[type(default)]
if isinstance(default, bool) and default is True:
arg = 'no-' + arg
arguments.append((arg.replace('_', '-'), shortcut,
default, argument_type))
return func, description, arguments
def make_shell(init_func=None, banner=None, use_ipython=True):
"""Returns an action callback that spawns a new interactive
python shell.
:param init_func: an optional initialization function that is
called before the shell is started. The return
value of this function is the initial namespace.
:param banner: the banner that is displayed before the shell. If
not specified a generic banner is used instead.
:param use_ipython: if set to `True` ipython is used if available.
"""
if banner is None:
banner = 'Interactive Werkzeug Shell'
if init_func is None:
init_func = dict
def action(ipython=use_ipython):
"""Start a new interactive python session."""
namespace = init_func()
if ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(global_ns={}, local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
return action
def make_runserver(app_factory, hostname='localhost', port=5000,
use_reloader=False, use_debugger=False, use_evalex=True,
threaded=False, processes=1, static_files=None,
extra_files=None, ssl_context=None):
"""Returns an action callback that spawns a new development server.
.. versionadded:: 0.5
`static_files` and `extra_files` was added.
..versionadded:: 0.6.1
`ssl_context` was added.
:param app_factory: a function that returns a new WSGI application.
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
:param use_reloader: the default setting for the reloader.
:param use_evalex: the default setting for the evalex flag of the debugger.
:param threaded: the default threading setting.
:param processes: the default number of processes to start.
:param static_files: optional dict of static files.
:param extra_files: optional list of extra files to track for reloading.
:param ssl_context: optional SSL context for running server in HTTPS mode.
"""
def action(hostname=('h', hostname), port=('p', port),
reloader=use_reloader, debugger=use_debugger,
evalex=use_evalex, threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
run_simple(hostname, port, app, reloader, debugger, evalex,
extra_files, 1, threaded, processes,
static_files=static_files, ssl_context=ssl_context)
return action
| mit |
Superjom/models-1 | dssm/reader.py | 4 | 4091 | from utils import UNK, ModelType, TaskType, load_dic, \
sent2ids, logger, ModelType
class Dataset(object):
def __init__(self, train_path, test_path, source_dic_path, target_dic_path,
model_type):
self.train_path = train_path
self.test_path = test_path
self.source_dic_path = source_dic_path
self.target_dic_path = target_dic_path
self.model_type = ModelType(model_type)
self.source_dic = load_dic(self.source_dic_path)
self.target_dic = load_dic(self.target_dic_path)
_record_reader = {
ModelType.CLASSIFICATION_MODE: self._read_classification_record,
ModelType.REGRESSION_MODE: self._read_regression_record,
ModelType.RANK_MODE: self._read_rank_record,
}
assert isinstance(model_type, ModelType)
self.record_reader = _record_reader[model_type.mode]
self.is_infer = False
def train(self):
'''
Load trainset.
'''
logger.info("[reader] load trainset from %s" % self.train_path)
with open(self.train_path) as f:
for line_id, line in enumerate(f):
yield self.record_reader(line)
def test(self):
'''
Load testset.
'''
with open(self.test_path) as f:
for line_id, line in enumerate(f):
yield self.record_reader(line)
def infer(self):
self.is_infer = True
with open(self.train_path) as f:
for line in f:
yield self.record_reader(line)
def _read_classification_record(self, line):
'''
data format:
<source words> [TAB] <target words> [TAB] <label>
@line: str
a string line which represent a record.
'''
fs = line.strip().split('\t')
assert len(fs) == 3, "wrong format for classification\n" + \
"the format shoud be " +\
"<source words> [TAB] <target words> [TAB] <label>'"
source = sent2ids(fs[0], self.source_dic)
target = sent2ids(fs[1], self.target_dic)
if not self.is_infer:
label = int(fs[2])
return (
source,
target,
label, )
return source, target
def _read_regression_record(self, line):
'''
data format:
<source words> [TAB] <target words> [TAB] <label>
@line: str
a string line which represent a record.
'''
fs = line.strip().split('\t')
assert len(fs) == 3, "wrong format for regression\n" + \
"the format shoud be " +\
"<source words> [TAB] <target words> [TAB] <label>'"
source = sent2ids(fs[0], self.source_dic)
target = sent2ids(fs[1], self.target_dic)
if not self.is_infer:
label = float(fs[2])
return (
source,
target,
[label], )
return source, target
def _read_rank_record(self, line):
'''
data format:
<source words> [TAB] <left_target words> [TAB] <right_target words> [TAB] <label>
'''
fs = line.strip().split('\t')
assert len(fs) == 4, "wrong format for rank\n" + \
"the format should be " +\
"<source words> [TAB] <left_target words> [TAB] <right_target words> [TAB] <label>"
source = sent2ids(fs[0], self.source_dic)
left_target = sent2ids(fs[1], self.target_dic)
right_target = sent2ids(fs[2], self.target_dic)
if not self.is_infer:
label = int(fs[3])
return (source, left_target, right_target, label)
return source, left_target, right_target
if __name__ == '__main__':
path = './data/classification/train.txt'
test_path = './data/classification/test.txt'
source_dic = './data/vocab.txt'
dataset = Dataset(path, test_path, source_dic, source_dic,
ModelType.CLASSIFICATION)
for rcd in dataset.train():
print rcd
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/iaas_vm_recovery_point.py | 2 | 4613 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .recovery_point import RecoveryPoint
class IaasVMRecoveryPoint(RecoveryPoint):
"""IaaS VM workload specific backup copy.
:param object_type: Polymorphic Discriminator
:type object_type: str
:param recovery_point_type: Type of the backup copy.
:type recovery_point_type: str
:param recovery_point_time: Time at which this backup copy was created.
:type recovery_point_time: datetime
:param recovery_point_additional_info: Additional information associated
with this backup copy.
:type recovery_point_additional_info: str
:param source_vm_storage_type: Storage type of the VM whose backup copy is
created.
:type source_vm_storage_type: str
:param is_source_vm_encrypted: Identifies whether the VM was encrypted
when the backup copy is created.
:type is_source_vm_encrypted: bool
:param key_and_secret: Required details for recovering an encrypted VM.
Applicable only when IsSourceVMEncrypted is true.
:type key_and_secret: :class:`KeyAndSecretDetails
<azure.mgmt.recoveryservicesbackup.models.KeyAndSecretDetails>`
:param is_instant_ilr_session_active: Is the session to recover items from
this backup copy still active.
:type is_instant_ilr_session_active: bool
:param recovery_point_tier_details: Recovery point tier information.
:type recovery_point_tier_details: list of
:class:`RecoveryPointTierInformation
<azure.mgmt.recoveryservicesbackup.models.RecoveryPointTierInformation>`
:param is_managed_virtual_machine: Whether VM is with Managed Disks
:type is_managed_virtual_machine: bool
:param virtual_machine_size: Virtual Machine Size
:type virtual_machine_size: str
:param original_storage_account_option: Original SA Option
:type original_storage_account_option: bool
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
'recovery_point_type': {'key': 'recoveryPointType', 'type': 'str'},
'recovery_point_time': {'key': 'recoveryPointTime', 'type': 'iso-8601'},
'recovery_point_additional_info': {'key': 'recoveryPointAdditionalInfo', 'type': 'str'},
'source_vm_storage_type': {'key': 'sourceVMStorageType', 'type': 'str'},
'is_source_vm_encrypted': {'key': 'isSourceVMEncrypted', 'type': 'bool'},
'key_and_secret': {'key': 'keyAndSecret', 'type': 'KeyAndSecretDetails'},
'is_instant_ilr_session_active': {'key': 'isInstantILRSessionActive', 'type': 'bool'},
'recovery_point_tier_details': {'key': 'recoveryPointTierDetails', 'type': '[RecoveryPointTierInformation]'},
'is_managed_virtual_machine': {'key': 'isManagedVirtualMachine', 'type': 'bool'},
'virtual_machine_size': {'key': 'virtualMachineSize', 'type': 'str'},
'original_storage_account_option': {'key': 'originalStorageAccountOption', 'type': 'bool'},
}
def __init__(self, recovery_point_type=None, recovery_point_time=None, recovery_point_additional_info=None, source_vm_storage_type=None, is_source_vm_encrypted=None, key_and_secret=None, is_instant_ilr_session_active=None, recovery_point_tier_details=None, is_managed_virtual_machine=None, virtual_machine_size=None, original_storage_account_option=None):
super(IaasVMRecoveryPoint, self).__init__()
self.recovery_point_type = recovery_point_type
self.recovery_point_time = recovery_point_time
self.recovery_point_additional_info = recovery_point_additional_info
self.source_vm_storage_type = source_vm_storage_type
self.is_source_vm_encrypted = is_source_vm_encrypted
self.key_and_secret = key_and_secret
self.is_instant_ilr_session_active = is_instant_ilr_session_active
self.recovery_point_tier_details = recovery_point_tier_details
self.is_managed_virtual_machine = is_managed_virtual_machine
self.virtual_machine_size = virtual_machine_size
self.original_storage_account_option = original_storage_account_option
self.object_type = 'IaasVMRecoveryPoint'
| mit |
odyaka341/pyglet | tools/genmpkg/bdist_mpkg_pyglet/tools.py | 26 | 4101 | import os
import sys
from itertools import chain
from distutils.util import spawn
from distutils.version import StrictVersion, LooseVersion
from distutils.dir_util import mkpath
import distutils.core
try:
set
except NameError:
from sets import Set as set
def Version(s):
try:
return StrictVersion(s)
except ValueError:
return LooseVersion(s)
def run_setup(*args, **kwargs):
"""
Re-entrant version of distutils.core.run_setup()
"""
PRESERVE = '_setup_stop_after', '_setup_distribution'
d = {}
for k in PRESERVE:
try:
d[k] = getattr(distutils.core, k)
except AttributeError:
pass
try:
return distutils.core.run_setup(*args, **kwargs)
finally:
for k,v in d.iteritems():
setattr(distutils.core, k, v)
def adminperms(src, verbose=0, dry_run=0):
try:
# Awful unavoidable quirk: package must be built as root.
spawn(['/usr/sbin/chown', '-R', 'root', src])
spawn(['/usr/bin/chgrp', '-R', 'admin', src])
spawn(['/bin/chmod', '-R', 'u=rwX,g=rwX,o=rX', src])
except:
raise RuntimeError('Cannot chown/chgrp/chmod. Are you running sudo?')
return True
def mkbom(src, pkgdir, verbose=0, dry_run=0, TOOL='/usr/bin/mkbom'):
"""
Create a bill-of-materials (BOM) for the given src directory and store it
to the given pkg directory
"""
dest = os.path.join(pkgdir, 'Contents', 'Archive.bom')
mkpath(os.path.dirname(dest), verbose=verbose, dry_run=dry_run)
spawn([TOOL, src, dest], verbose=verbose, dry_run=dry_run)
def pax(src, pkgdir, verbose=0, dry_run=0, TOOL='/bin/pax'):
"""
Create a pax gzipped cpio archive of the given src directory and store it
to the given pkg directory
returns size of archive
"""
dest = os.path.realpath(os.path.join(pkgdir, 'Contents', 'Archive.pax.gz'))
mkpath(os.path.dirname(dest), verbose=verbose, dry_run=dry_run)
pwd = os.path.realpath(os.getcwd())
os.chdir(src)
try:
spawn([TOOL, '-w', '-f', dest, '-x', 'cpio', '-z', '.'])
finally:
os.chdir(pwd)
return os.stat(dest).st_size
def unicode_path(path, encoding=sys.getfilesystemencoding()):
if isinstance(path, unicode):
return path
return unicode(path, encoding)
def walk_files(path):
for root, dirs, files in os.walk(path):
for fn in files:
yield os.path.join(root, fn)
def get_gid(name, _cache={}):
if not _cache:
for line in os.popen('/usr/bin/nidump group .'):
fields = line.split(':')
if len(fields) >= 3:
_cache[fields[0]] = int(fields[2])
try:
return _cache[name]
except KeyError:
raise ValueError('group %s not found' % (name,))
def find_root(path, base='/'):
"""
Return the list of files, the archive directory, and the destination path
"""
files = list(walk_files(path))
common = os.path.dirname(os.path.commonprefix(files))
prefix = os.path.join(base, common[len(os.path.join(path, '')):])
#while not os.path.exists(prefix):
# common = os.path.dirname(common)
# prefix = os.path.dirname(prefix)
prefix = os.path.realpath(prefix)
return files, common, prefix
def admin_writable(path):
gid = get_gid('admin')
while not os.path.exists(path):
path = os.path.dirname(path)
s = os.stat(path)
mode = s.st_mode
return (mode & 00002) or (s.st_gid == gid and mode & 00020)
def reduce_size(files):
return sum([os.stat(fn).st_size for fn in files])
def sw_vers(_cache=[]):
if not _cache:
info = os.popen('/usr/bin/sw_vers').read().splitlines()
for line in info:
key, value = line.split(None, 1)
if key == 'ProductVersion:':
_cache.append(Version(value.strip()))
break
else:
raise ValueError("sw_vers not behaving correctly")
return _cache[0]
def is_framework_python():
return os.path.dirname(os.path.dirname(sys.prefix)).endswith('.framework')
| bsd-3-clause |
FlaPer87/django-nonrel | django/contrib/localflavor/pl/pl_administrativeunits.py | 433 | 13194 | # -*- coding: utf-8 -*-
"""
Polish administrative units as in http://pl.wikipedia.org/wiki/Podzia%C5%82_administracyjny_Polski
"""
ADMINISTRATIVE_UNIT_CHOICES = (
('wroclaw', u'Wrocław'),
('jeleniagora', u'Jelenia Góra'),
('legnica', u'Legnica'),
('boleslawiecki', u'bolesławiecki'),
('dzierzoniowski', u'dzierżoniowski'),
('glogowski', u'głogowski'),
('gorowski', u'górowski'),
('jaworski', u'jaworski'),
('jeleniogorski', u'jeleniogórski'),
('kamiennogorski', u'kamiennogórski'),
('klodzki', u'kłodzki'),
('legnicki', u'legnicki'),
('lubanski', u'lubański'),
('lubinski', u'lubiński'),
('lwowecki', u'lwówecki'),
('milicki', u'milicki'),
('olesnicki', u'oleśnicki'),
('olawski', u'oławski'),
('polkowicki', u'polkowicki'),
('strzelinski', u'strzeliński'),
('sredzki', u'średzki'),
('swidnicki', u'świdnicki'),
('trzebnicki', u'trzebnicki'),
('walbrzyski', u'wałbrzyski'),
('wolowski', u'wołowski'),
('wroclawski', u'wrocławski'),
('zabkowicki', u'ząbkowicki'),
('zgorzelecki', u'zgorzelecki'),
('zlotoryjski', u'złotoryjski'),
('bydgoszcz', u'Bydgoszcz'),
('torun', u'Toruń'),
('wloclawek', u'Włocławek'),
('grudziadz', u'Grudziądz'),
('aleksandrowski', u'aleksandrowski'),
('brodnicki', u'brodnicki'),
('bydgoski', u'bydgoski'),
('chelminski', u'chełmiński'),
('golubsko-dobrzynski', u'golubsko-dobrzyński'),
('grudziadzki', u'grudziądzki'),
('inowroclawski', u'inowrocławski'),
('lipnowski', u'lipnowski'),
('mogilenski', u'mogileński'),
('nakielski', u'nakielski'),
('radziejowski', u'radziejowski'),
('rypinski', u'rypiński'),
('sepolenski', u'sępoleński'),
('swiecki', u'świecki'),
('torunski', u'toruński'),
('tucholski', u'tucholski'),
('wabrzeski', u'wąbrzeski'),
('wloclawski', u'wrocławski'),
('zninski', u'źniński'),
('lublin', u'Lublin'),
('biala-podlaska', u'Biała Podlaska'),
('chelm', u'Chełm'),
('zamosc', u'Zamość'),
('bialski', u'bialski'),
('bilgorajski', u'biłgorajski'),
('chelmski', u'chełmski'),
('hrubieszowski', u'hrubieszowski'),
('janowski', u'janowski'),
('krasnostawski', u'krasnostawski'),
('krasnicki', u'kraśnicki'),
('lubartowski', u'lubartowski'),
('lubelski', u'lubelski'),
('leczynski', u'łęczyński'),
('lukowski', u'łukowski'),
('opolski', u'opolski'),
('parczewski', u'parczewski'),
('pulawski', u'puławski'),
('radzynski', u'radzyński'),
('rycki', u'rycki'),
('swidnicki', u'świdnicki'),
('tomaszowski', u'tomaszowski'),
('wlodawski', u'włodawski'),
('zamojski', u'zamojski'),
('gorzow-wielkopolski', u'Gorzów Wielkopolski'),
('zielona-gora', u'Zielona Góra'),
('gorzowski', u'gorzowski'),
('krosnienski', u'krośnieński'),
('miedzyrzecki', u'międzyrzecki'),
('nowosolski', u'nowosolski'),
('slubicki', u'słubicki'),
('strzelecko-drezdenecki', u'strzelecko-drezdenecki'),
('sulecinski', u'suleńciński'),
('swiebodzinski', u'świebodziński'),
('wschowski', u'wschowski'),
('zielonogorski', u'zielonogórski'),
('zaganski', u'żagański'),
('zarski', u'żarski'),
('lodz', u'Łódź'),
('piotrkow-trybunalski', u'Piotrków Trybunalski'),
('skierniewice', u'Skierniewice'),
('belchatowski', u'bełchatowski'),
('brzezinski', u'brzeziński'),
('kutnowski', u'kutnowski'),
('laski', u'łaski'),
('leczycki', u'łęczycki'),
('lowicki', u'łowicki'),
('lodzki wschodni', u'łódzki wschodni'),
('opoczynski', u'opoczyński'),
('pabianicki', u'pabianicki'),
('pajeczanski', u'pajęczański'),
('piotrkowski', u'piotrkowski'),
('poddebicki', u'poddębicki'),
('radomszczanski', u'radomszczański'),
('rawski', u'rawski'),
('sieradzki', u'sieradzki'),
('skierniewicki', u'skierniewicki'),
('tomaszowski', u'tomaszowski'),
('wielunski', u'wieluński'),
('wieruszowski', u'wieruszowski'),
('zdunskowolski', u'zduńskowolski'),
('zgierski', u'zgierski'),
('krakow', u'Kraków'),
('tarnow', u'Tarnów'),
('nowy-sacz', u'Nowy Sącz'),
('bochenski', u'bocheński'),
('brzeski', u'brzeski'),
('chrzanowski', u'chrzanowski'),
('dabrowski', u'dąbrowski'),
('gorlicki', u'gorlicki'),
('krakowski', u'krakowski'),
('limanowski', u'limanowski'),
('miechowski', u'miechowski'),
('myslenicki', u'myślenicki'),
('nowosadecki', u'nowosądecki'),
('nowotarski', u'nowotarski'),
('olkuski', u'olkuski'),
('oswiecimski', u'oświęcimski'),
('proszowicki', u'proszowicki'),
('suski', u'suski'),
('tarnowski', u'tarnowski'),
('tatrzanski', u'tatrzański'),
('wadowicki', u'wadowicki'),
('wielicki', u'wielicki'),
('warszawa', u'Warszawa'),
('ostroleka', u'Ostrołęka'),
('plock', u'Płock'),
('radom', u'Radom'),
('siedlce', u'Siedlce'),
('bialobrzeski', u'białobrzeski'),
('ciechanowski', u'ciechanowski'),
('garwolinski', u'garwoliński'),
('gostyninski', u'gostyniński'),
('grodziski', u'grodziski'),
('grojecki', u'grójecki'),
('kozienicki', u'kozenicki'),
('legionowski', u'legionowski'),
('lipski', u'lipski'),
('losicki', u'łosicki'),
('makowski', u'makowski'),
('minski', u'miński'),
('mlawski', u'mławski'),
('nowodworski', u'nowodworski'),
('ostrolecki', u'ostrołęcki'),
('ostrowski', u'ostrowski'),
('otwocki', u'otwocki'),
('piaseczynski', u'piaseczyński'),
('plocki', u'płocki'),
('plonski', u'płoński'),
('pruszkowski', u'pruszkowski'),
('przasnyski', u'przasnyski'),
('przysuski', u'przysuski'),
('pultuski', u'pułtuski'),
('radomski', u'radomski'),
('siedlecki', u'siedlecki'),
('sierpecki', u'sierpecki'),
('sochaczewski', u'sochaczewski'),
('sokolowski', u'sokołowski'),
('szydlowiecki', u'szydłowiecki'),
('warszawski-zachodni', u'warszawski zachodni'),
('wegrowski', u'węgrowski'),
('wolominski', u'wołomiński'),
('wyszkowski', u'wyszkowski'),
('zwolenski', u'zwoleński'),
('zurominski', u'żuromiński'),
('zyrardowski', u'żyrardowski'),
('opole', u'Opole'),
('brzeski', u'brzeski'),
('glubczycki', u'głubczyski'),
('kedzierzynsko-kozielski', u'kędzierzyński-kozielski'),
('kluczborski', u'kluczborski'),
('krapkowicki', u'krapkowicki'),
('namyslowski', u'namysłowski'),
('nyski', u'nyski'),
('oleski', u'oleski'),
('opolski', u'opolski'),
('prudnicki', u'prudnicki'),
('strzelecki', u'strzelecki'),
('rzeszow', u'Rzeszów'),
('krosno', u'Krosno'),
('przemysl', u'Przemyśl'),
('tarnobrzeg', u'Tarnobrzeg'),
('bieszczadzki', u'bieszczadzki'),
('brzozowski', u'brzozowski'),
('debicki', u'dębicki'),
('jaroslawski', u'jarosławski'),
('jasielski', u'jasielski'),
('kolbuszowski', u'kolbuszowski'),
('krosnienski', u'krośnieński'),
('leski', u'leski'),
('lezajski', u'leżajski'),
('lubaczowski', u'lubaczowski'),
('lancucki', u'łańcucki'),
('mielecki', u'mielecki'),
('nizanski', u'niżański'),
('przemyski', u'przemyski'),
('przeworski', u'przeworski'),
('ropczycko-sedziszowski', u'ropczycko-sędziszowski'),
('rzeszowski', u'rzeszowski'),
('sanocki', u'sanocki'),
('stalowowolski', u'stalowowolski'),
('strzyzowski', u'strzyżowski'),
('tarnobrzeski', u'tarnobrzeski'),
('bialystok', u'Białystok'),
('lomza', u'Łomża'),
('suwalki', u'Suwałki'),
('augustowski', u'augustowski'),
('bialostocki', u'białostocki'),
('bielski', u'bielski'),
('grajewski', u'grajewski'),
('hajnowski', u'hajnowski'),
('kolnenski', u'kolneński'),
('łomzynski', u'łomżyński'),
('moniecki', u'moniecki'),
('sejnenski', u'sejneński'),
('siemiatycki', u'siematycki'),
('sokolski', u'sokólski'),
('suwalski', u'suwalski'),
('wysokomazowiecki', u'wysokomazowiecki'),
('zambrowski', u'zambrowski'),
('gdansk', u'Gdańsk'),
('gdynia', u'Gdynia'),
('slupsk', u'Słupsk'),
('sopot', u'Sopot'),
('bytowski', u'bytowski'),
('chojnicki', u'chojnicki'),
('czluchowski', u'człuchowski'),
('kartuski', u'kartuski'),
('koscierski', u'kościerski'),
('kwidzynski', u'kwidzyński'),
('leborski', u'lęborski'),
('malborski', u'malborski'),
('nowodworski', u'nowodworski'),
('gdanski', u'gdański'),
('pucki', u'pucki'),
('slupski', u'słupski'),
('starogardzki', u'starogardzki'),
('sztumski', u'sztumski'),
('tczewski', u'tczewski'),
('wejherowski', u'wejcherowski'),
('katowice', u'Katowice'),
('bielsko-biala', u'Bielsko-Biała'),
('bytom', u'Bytom'),
('chorzow', u'Chorzów'),
('czestochowa', u'Częstochowa'),
('dabrowa-gornicza', u'Dąbrowa Górnicza'),
('gliwice', u'Gliwice'),
('jastrzebie-zdroj', u'Jastrzębie Zdrój'),
('jaworzno', u'Jaworzno'),
('myslowice', u'Mysłowice'),
('piekary-slaskie', u'Piekary Śląskie'),
('ruda-slaska', u'Ruda Śląska'),
('rybnik', u'Rybnik'),
('siemianowice-slaskie', u'Siemianowice Śląskie'),
('sosnowiec', u'Sosnowiec'),
('swietochlowice', u'Świętochłowice'),
('tychy', u'Tychy'),
('zabrze', u'Zabrze'),
('zory', u'Żory'),
('bedzinski', u'będziński'),
('bielski', u'bielski'),
('bierunsko-ledzinski', u'bieruńsko-lędziński'),
('cieszynski', u'cieszyński'),
('czestochowski', u'częstochowski'),
('gliwicki', u'gliwicki'),
('klobucki', u'kłobucki'),
('lubliniecki', u'lubliniecki'),
('mikolowski', u'mikołowski'),
('myszkowski', u'myszkowski'),
('pszczynski', u'pszczyński'),
('raciborski', u'raciborski'),
('rybnicki', u'rybnicki'),
('tarnogorski', u'tarnogórski'),
('wodzislawski', u'wodzisławski'),
('zawiercianski', u'zawierciański'),
('zywiecki', u'żywiecki'),
('kielce', u'Kielce'),
('buski', u'buski'),
('jedrzejowski', u'jędrzejowski'),
('kazimierski', u'kazimierski'),
('kielecki', u'kielecki'),
('konecki', u'konecki'),
('opatowski', u'opatowski'),
('ostrowiecki', u'ostrowiecki'),
('pinczowski', u'pińczowski'),
('sandomierski', u'sandomierski'),
('skarzyski', u'skarżyski'),
('starachowicki', u'starachowicki'),
('staszowski', u'staszowski'),
('wloszczowski', u'włoszczowski'),
('olsztyn', u'Olsztyn'),
('elblag', u'Elbląg'),
('bartoszycki', u'bartoszycki'),
('braniewski', u'braniewski'),
('dzialdowski', u'działdowski'),
('elblaski', u'elbląski'),
('elcki', u'ełcki'),
('gizycki', u'giżycki'),
('goldapski', u'gołdapski'),
('ilawski', u'iławski'),
('ketrzynski', u'kętrzyński'),
('lidzbarski', u'lidzbarski'),
('mragowski', u'mrągowski'),
('nidzicki', u'nidzicki'),
('nowomiejski', u'nowomiejski'),
('olecki', u'olecki'),
('olsztynski', u'olsztyński'),
('ostrodzki', u'ostródzki'),
('piski', u'piski'),
('szczycienski', u'szczycieński'),
('wegorzewski', u'węgorzewski'),
('poznan', u'Poznań'),
('kalisz', u'Kalisz'),
('konin', u'Konin'),
('leszno', u'Leszno'),
('chodzieski', u'chodziejski'),
('czarnkowsko-trzcianecki', u'czarnkowsko-trzcianecki'),
('gnieznienski', u'gnieźnieński'),
('gostynski', u'gostyński'),
('grodziski', u'grodziski'),
('jarocinski', u'jarociński'),
('kaliski', u'kaliski'),
('kepinski', u'kępiński'),
('kolski', u'kolski'),
('koninski', u'koniński'),
('koscianski', u'kościański'),
('krotoszynski', u'krotoszyński'),
('leszczynski', u'leszczyński'),
('miedzychodzki', u'międzychodzki'),
('nowotomyski', u'nowotomyski'),
('obornicki', u'obornicki'),
('ostrowski', u'ostrowski'),
('ostrzeszowski', u'ostrzeszowski'),
('pilski', u'pilski'),
('pleszewski', u'pleszewski'),
('poznanski', u'poznański'),
('rawicki', u'rawicki'),
('slupecki', u'słupecki'),
('szamotulski', u'szamotulski'),
('sredzki', u'średzki'),
('sremski', u'śremski'),
('turecki', u'turecki'),
('wagrowiecki', u'wągrowiecki'),
('wolsztynski', u'wolsztyński'),
('wrzesinski', u'wrzesiński'),
('zlotowski', u'złotowski'),
('bialogardzki', u'białogardzki'),
('choszczenski', u'choszczeński'),
('drawski', u'drawski'),
('goleniowski', u'goleniowski'),
('gryficki', u'gryficki'),
('gryfinski', u'gryfiński'),
('kamienski', u'kamieński'),
('kolobrzeski', u'kołobrzeski'),
('koszalinski', u'koszaliński'),
('lobeski', u'łobeski'),
('mysliborski', u'myśliborski'),
('policki', u'policki'),
('pyrzycki', u'pyrzycki'),
('slawienski', u'sławieński'),
('stargardzki', u'stargardzki'),
('szczecinecki', u'szczecinecki'),
('swidwinski', u'świdwiński'),
('walecki', u'wałecki'),
)
| bsd-3-clause |
hachard/Cra-Magnet | flask/lib/python3.5/site-packages/pkg_resources/_vendor/packaging/requirements.py | 454 | 4355 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pkg_resources.extern.pyparsing import Literal as L # noqa
from pkg_resources.extern.six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = \
NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
"Invalid requirement, parse error at \"{0!r}\"".format(
requirement_string[e.loc:e.loc + 8]))
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc):
raise InvalidRequirement("Invalid URL given")
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
| gpl-3.0 |
rdblue/Impala | tests/metadata/test_recover_partitions.py | 13 | 15294 | # Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Impala tests for ALTER TABLE RECOVER PARTITIONS statement
import pytest
from tests.common.test_dimensions import ALL_NODES_ONLY
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3
from tests.util.filesystem_utils import WAREHOUSE, IS_DEFAULT_FS
# Validates ALTER TABLE RECOVER PARTITIONS statement
class TestRecoverPartitions(ImpalaTestSuite):
TEST_DB = "recover_parts_db"
TEST_TBL = "alter_recover_partitions"
TEST_TBL2 = "alter_recover_partitions_all_types"
BASE_DIR = 'test-warehouse/%s.db/%s/' % (TEST_DB, TEST_TBL)
BASE_DIR2 = 'test-warehouse/%s.db/%s/' % (TEST_DB, TEST_TBL2)
DEF_NULL_PART_KEY = "__HIVE_DEFAULT_PARTITION__"
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestRecoverPartitions, cls).add_test_dimensions()
sync_ddl_opts = [0, 1]
if cls.exploration_strategy() != 'exhaustive':
# Only run with sync_ddl on exhaustive since it increases test runtime.
sync_ddl_opts = [0]
cls.TestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=ALL_NODES_ONLY,
disable_codegen_options=[False],
batch_sizes=[0],
sync_ddl=sync_ddl_opts))
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
def setup_method(self, method):
self.cleanup_db(self.TEST_DB)
self.client.execute("create database {0} location '{1}/{0}.db'".format(self.TEST_DB,
WAREHOUSE))
self.client.execute("use %s" % self.TEST_DB)
def teardown_method(self, method):
self.cleanup_db(self.TEST_DB)
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_recover_partitions(self, vector):
"""Test that RECOVER PARTITIONS correctly discovers new partitions added externally
by the hdfs client.
"""
part_name = "p2"
leaf_dir = "i=0001/p=%s/" % part_name
malformed_dir = "i=fish/p=%s/" % part_name
file_path = "test"
inserted_value = "2"
null_dir = "i=1/p=%s/" % self.DEF_NULL_PART_KEY
null_inserted_value = "4"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (self.TEST_TBL))
# Create a path for a new partition using hdfs client and add a file with some values.
# Test that the partition can be recovered and that the inserted data are accessible.
self.hdfs_client.make_dir(self.BASE_DIR + leaf_dir)
self.hdfs_client.create_file(self.BASE_DIR + leaf_dir + file_path, inserted_value)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert self.has_value(part_name, result.data) == False
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert (self.has_value(part_name, result.data) == True,
"ALTER TABLE %s RECOVER PARTITIONS failed." % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert (self.has_value(inserted_value, result.data) == True,
"Failed to load tables after ALTER TABLE %s RECOVER PARTITIONS."
% (self.TEST_TBL))
# Test that invalid partition values are ignored during partition recovery.
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
old_length = len(result.data)
self.hdfs_client.make_dir(self.BASE_DIR + malformed_dir)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert (len(result.data) == old_length,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle invalid partition values."
% (self.TEST_TBL))
# Create a directory whose subdirectory names contain __HIVE_DEFAULT_PARTITION__
# and check that is recovered as a NULL partition.
self.hdfs_client.make_dir(self.BASE_DIR + null_dir)
self.hdfs_client.create_file(self.BASE_DIR + null_dir + file_path, null_inserted_value)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert self.has_value(self.DEF_NULL_PART_KEY, result.data) == False
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert (self.has_value("NULL", result.data) == True,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle null partition values."
% (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert self.has_value(null_inserted_value, result.data) == True
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_nondefault_location_partitions(self, vector):
"""If the location of data files in one partition is changed, test that data files
in the default location will not be loaded after partition recovery."""
file_path = "test"
leaf_dir = "i=1/p=p3/"
inserted_value = "4"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"ALTER TABLE %s ADD PARTITION(i=1, p='p3')" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"ALTER TABLE %s PARTITION (i=1, p='p3') SET LOCATION '%s/%s.db/tmp' "
% (self.TEST_TBL, WAREHOUSE, self.TEST_DB))
self.hdfs_client.delete_file_dir(self.BASE_DIR + leaf_dir, recursive=True)
self.hdfs_client.make_dir(self.BASE_DIR + leaf_dir);
self.hdfs_client.create_file(self.BASE_DIR + leaf_dir + file_path, inserted_value)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
# Ensure that no duplicate partitions are recovered.
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert (self.has_value(inserted_value, result.data) == False,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle non-default partition location."
% (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p3') VALUES(4)" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert self.has_value(inserted_value, result.data) == True
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_duplicate_partitions(self, vector):
"""Test that RECOVER PARTITIONS does not recover equivalent partitions. Two partitions
are considered equivalent if they correspond to distinct paths but can be converted
to the same partition key values (e.g. "i=0005/p=p2" and "i=05/p=p2")."""
same_value_dir1 = "i=0004/p=p2/"
same_value_dir2 = "i=000004/p=p2/"
file_path = "test"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (self.TEST_TBL))
# Create a partition with path "/i=1/p=p4".
# Create a path "/i=0001/p=p4" using hdfs client, and add a file with some values.
# Test that no new partition will be recovered and the inserted data are not accessible.
leaf_dir = "i=0001/p=p4/"
inserted_value = "5"
self.execute_query_expect_success(self.client,
"ALTER TABLE %s ADD PARTITION(i=1, p='p4')" % (self.TEST_TBL))
self.hdfs_client.make_dir(self.BASE_DIR + leaf_dir);
self.hdfs_client.create_file(self.BASE_DIR + leaf_dir + file_path, inserted_value)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert (self.has_value(inserted_value, result.data) == False,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle duplicate partition key values."
% (self.TEST_TBL))
# Create two paths '/i=0004/p=p2/' and "i=000004/p=p2/" using hdfs client.
# Test that only one partition will be added.
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
old_length = len(result.data)
self.hdfs_client.make_dir(self.BASE_DIR + same_value_dir1)
self.hdfs_client.make_dir(self.BASE_DIR + same_value_dir2)
# Only one partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert ((old_length + 1) == len(result.data),
"ALTER TABLE %s RECOVER PARTITIONS failed to handle duplicate partition key values."
% (self.TEST_TBL))
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_post_invalidate(self, vector):
"""Test that RECOVER PARTITIONS works correctly after invalidate."""
leaf_dir = "i=002/p=p2/"
file_path = "test"
inserted_value = "2"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (self.TEST_TBL))
# Test that the recovered partitions are properly stored in Hive MetaStore.
# Invalidate the table metadata and then check if the recovered partitions
# are accessible.
self.hdfs_client.make_dir(self.BASE_DIR + leaf_dir);
self.hdfs_client.create_file(self.BASE_DIR + leaf_dir + file_path, inserted_value)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert self.has_value(inserted_value, result.data) == True
self.client.execute("INVALIDATE METADATA %s" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert (self.has_value(inserted_value, result.data) == True,
"INVALIDATE can't work on partitions recovered by ALTER TABLE %s RECOVER PARTITIONS."
% (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=002, p='p2') VALUES(4)" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert self.has_value('4', result.data) == True
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_support_all_types(self, vector):
"""Test that RECOVER PARTITIONS works correctly on all supported data types."""
normal_values = ["a=1", "b=128", "c=32768", "d=2147483648", "e=11.11",
"f=22.22", "g=33.33", "j=tchar", "k=tvchar", "s=recover"]
malformed_values = ["a=a", "b=b", "c=c", "d=d", "e=e", "f=f", "g=g"]
overflow_values = ["a=128", "b=-32769", "c=-2147483649", "d=9223372036854775808",
"e=11.11111111111111111111111111111111111111111111111111111",
"f=3.40282346638528860e+39", "g=1.79769313486231570e+309"]
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (i INT) PARTITIONED BY (a TINYINT, b SMALLINT, c INT, d BIGINT,"
" e DECIMAL(4,2), f FLOAT, g DOUBLE, j CHAR(5), k VARCHAR(6), s STRING)"
% (self.TEST_TBL2))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(a=1, b=2, c=3, d=4, e=55.55, f=6.6, g=7.7, "
"j=cast('j' as CHAR(5)), k=cast('k' as VARCHAR(6)), s='s') VALUES(1)"
% (self.TEST_TBL2))
# Test valid partition values.
normal_dir = ""
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL2))
old_length = len(result.data)
normal_dir = '/'.join(normal_values)
self.hdfs_client.make_dir(self.BASE_DIR2 + normal_dir)
# One partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL2))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL2))
assert (len(result.data) == (old_length + 1),
"ALTER TABLE %s RECOVER PARTITIONS failed to handle some data types."
% (self.TEST_TBL))
# Test malformed partition values.
self.check_invalid_partition_values(normal_values, malformed_values)
# Test overflow partition values.
self.check_invalid_partition_values(normal_values, overflow_values)
def check_invalid_partition_values(self, normal_values, invalid_values):
""""Check that RECOVER PARTITIONS ignores partitions with invalid partition values."""
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL2))
old_length = len(result.data)
for i in range(len(invalid_values)):
invalid_dir = ""
for j in range(len(normal_values)):
if i != j:
invalid_dir += (normal_values[j] + "/")
else:
invalid_dir += (invalid_values[j] + "/")
self.hdfs_client.make_dir(self.BASE_DIR2 + invalid_dir)
# No partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL2))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL2))
assert (len(result.data) == old_length,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle invalid partition key values."
% (self.TEST_TBL))
def has_value(self, value, lines):
"""Check if lines contain value."""
return any([line.find(value) != -1 for line in lines])
| apache-2.0 |
privateip/ansible | lib/ansible/modules/system/ping.py | 50 | 2098 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
- This is NOT ICMP ping, this is just a trivial test module.
options: {}
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
ansible webservers -m ping
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(required=False, default=None),
),
supports_check_mode=True
)
result = dict(ping='pong')
if module.params['data']:
if module.params['data'] == 'crash':
raise Exception("boom")
result['ping'] = module.params['data']
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
michaelgallacher/intellij-community | plugins/hg4idea/testData/bin/mercurial/sshserver.py | 93 | 4270 | # sshserver.py - ssh protocol server support for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import util, hook, wireproto, changegroup
import os, sys
class sshserver(object):
def __init__(self, ui, repo):
self.ui = ui
self.repo = repo
self.lock = None
self.fin = ui.fin
self.fout = ui.fout
hook.redirect(True)
ui.fout = repo.ui.fout = ui.ferr
# Prevent insertion/deletion of CRs
util.setbinary(self.fin)
util.setbinary(self.fout)
def getargs(self, args):
data = {}
keys = args.split()
for n in xrange(len(keys)):
argline = self.fin.readline()[:-1]
arg, l = argline.split()
if arg not in keys:
raise util.Abort("unexpected parameter %r" % arg)
if arg == '*':
star = {}
for k in xrange(int(l)):
argline = self.fin.readline()[:-1]
arg, l = argline.split()
val = self.fin.read(int(l))
star[arg] = val
data['*'] = star
else:
val = self.fin.read(int(l))
data[arg] = val
return [data[k] for k in keys]
def getarg(self, name):
return self.getargs(name)[0]
def getfile(self, fpout):
self.sendresponse('')
count = int(self.fin.readline())
while count:
fpout.write(self.fin.read(count))
count = int(self.fin.readline())
def redirect(self):
pass
def groupchunks(self, changegroup):
while True:
d = changegroup.read(4096)
if not d:
break
yield d
def sendresponse(self, v):
self.fout.write("%d\n" % len(v))
self.fout.write(v)
self.fout.flush()
def sendstream(self, source):
write = self.fout.write
for chunk in source.gen:
write(chunk)
self.fout.flush()
def sendpushresponse(self, rsp):
self.sendresponse('')
self.sendresponse(str(rsp.res))
def sendpusherror(self, rsp):
self.sendresponse(rsp.res)
def sendooberror(self, rsp):
self.ui.ferr.write('%s\n-\n' % rsp.message)
self.ui.ferr.flush()
self.fout.write('\n')
self.fout.flush()
def serve_forever(self):
try:
while self.serve_one():
pass
finally:
if self.lock is not None:
self.lock.release()
sys.exit(0)
handlers = {
str: sendresponse,
wireproto.streamres: sendstream,
wireproto.pushres: sendpushresponse,
wireproto.pusherr: sendpusherror,
wireproto.ooberror: sendooberror,
}
def serve_one(self):
cmd = self.fin.readline()[:-1]
if cmd and cmd in wireproto.commands:
rsp = wireproto.dispatch(self.repo, self, cmd)
self.handlers[rsp.__class__](self, rsp)
elif cmd:
impl = getattr(self, 'do_' + cmd, None)
if impl:
r = impl()
if r is not None:
self.sendresponse(r)
else: self.sendresponse("")
return cmd != ''
def do_lock(self):
'''DEPRECATED - allowing remote client to lock repo is not safe'''
self.lock = self.repo.lock()
return ""
def do_unlock(self):
'''DEPRECATED'''
if self.lock:
self.lock.release()
self.lock = None
return ""
def do_addchangegroup(self):
'''DEPRECATED'''
if not self.lock:
self.sendresponse("not locked")
return
self.sendresponse("")
cg = changegroup.unbundle10(self.fin, "UN")
r = self.repo.addchangegroup(cg, 'serve', self._client())
self.lock.release()
return str(r)
def _client(self):
client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
return 'remote:ssh:' + client
| apache-2.0 |
ulope/django | django/contrib/messages/tests/test_session.py | 226 | 1898 | from django.contrib.messages import constants
from django.contrib.messages.tests.base import BaseTests
from django.contrib.messages.storage.base import Message
from django.contrib.messages.storage.session import SessionStorage
from django.utils.safestring import SafeData, mark_safe
from django.test import TestCase
def set_session_data(storage, messages):
"""
Sets the messages into the backend request's session and remove the
backend's loaded data cache.
"""
storage.request.session[storage.session_key] = storage.serialize_messages(messages)
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_session_messages_count(storage):
data = storage.deserialize_messages(storage.request.session.get(storage.session_key, []))
return len(data)
class SessionTest(BaseTests, TestCase):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super(SessionTest, self).get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_session_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_safedata(self):
"""
Tests that a message containing SafeData is keeping its safe status when
retrieved from the message storage.
"""
storage = self.get_storage()
message = Message(constants.DEBUG, mark_safe("<b>Hello Django!</b>"))
set_session_data(storage, [message])
self.assertIsInstance(list(storage)[0].message, SafeData)
| bsd-3-clause |
wmbutler/courtlistener | alert/lib/magic.py | 5 | 5797 | """
magic is a wrapper around the libmagic file identification library.
See README for more information.
Usage:
>>> import magic
>>> magic.from_file("testdata/test.pdf")
'PDF document, version 1.2'
>>> magic.from_file("testdata/test.pdf", mime=True)
'application/pdf'
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
>>>
"""
import os.path
import ctypes
import ctypes.util
from ctypes import c_char_p, c_int, c_size_t, c_void_p
class MagicException(Exception): pass
class Magic:
"""
Magic is a wrapper around the libmagic C library.
"""
def __init__(self, mime=False, magic_file=None, mime_encoding=False):
"""
Create a new libmagic wrapper.
mime - if True, mimetypes are returned instead of textual descriptions
mime_encoding - if True, codec is returned
magic_file - use a mime database other than the system default
"""
flags = MAGIC_NONE
if mime:
flags |= MAGIC_MIME
elif mime_encoding:
flags |= MAGIC_MIME_ENCODING
self.cookie = magic_open(flags)
magic_load(self.cookie, magic_file)
def from_buffer(self, buf):
"""
Identify the contents of `buf`
"""
return magic_buffer(self.cookie, buf)
def from_file(self, filename):
"""
Identify the contents of file `filename`
raises IOError if the file does not exist
"""
if not os.path.exists(filename):
raise IOError("File does not exist: " + filename)
return magic_file(self.cookie, filename)
def __del__(self):
if self.cookie:
magic_close(self.cookie)
self.cookie = None
_magic_mime = None
_magic = None
def _get_magic_mime():
global _magic_mime
if not _magic_mime:
_magic_mime = Magic(mime=True)
return _magic_mime
def _get_magic():
global _magic
if not _magic:
_magic = Magic()
return _magic
def _get_magic_type(mime):
if mime:
return _get_magic_mime()
else:
return _get_magic()
def from_file(filename, mime=False):
m = _get_magic_type(mime)
return m.from_file(filename)
def from_buffer(buffer, mime=False):
m = _get_magic_type(mime)
return m.from_buffer(buffer)
libmagic = None
# Let's try to find magic or magic1
dll = ctypes.util.find_library('magic') or ctypes.util.find_library('magic1')
# This is necessary because find_library returns None if it doesn't find the library
if dll:
libmagic = ctypes.CDLL(dll)
if not libmagic or not libmagic._name:
import sys
platform_to_lib = {'darwin': ['/opt/local/lib/libmagic.dylib',
'/usr/local/lib/libmagic.dylib'],
'win32': ['magic1.dll']}
for dll in platform_to_lib.get(sys.platform, []):
try:
libmagic = ctypes.CDLL(dll)
except OSError:
pass
if not libmagic or not libmagic._name:
# It is better to raise an ImportError since we are importing magic module
raise ImportError('failed to find libmagic. Check your installation')
magic_t = ctypes.c_void_p
def errorcheck(result, func, args):
err = magic_error(args[0])
if err is not None:
raise MagicException(err)
else:
return result
magic_open = libmagic.magic_open
magic_open.restype = magic_t
magic_open.argtypes = [c_int]
magic_close = libmagic.magic_close
magic_close.restype = None
magic_close.argtypes = [magic_t]
magic_error = libmagic.magic_error
magic_error.restype = c_char_p
magic_error.argtypes = [magic_t]
magic_errno = libmagic.magic_errno
magic_errno.restype = c_int
magic_errno.argtypes = [magic_t]
magic_file = libmagic.magic_file
magic_file.restype = c_char_p
magic_file.argtypes = [magic_t, c_char_p]
magic_file.errcheck = errorcheck
_magic_buffer = libmagic.magic_buffer
_magic_buffer.restype = c_char_p
_magic_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_magic_buffer.errcheck = errorcheck
def magic_buffer(cookie, buf):
return _magic_buffer(cookie, buf, len(buf))
magic_load = libmagic.magic_load
magic_load.restype = c_int
magic_load.argtypes = [magic_t, c_char_p]
magic_load.errcheck = errorcheck
magic_setflags = libmagic.magic_setflags
magic_setflags.restype = c_int
magic_setflags.argtypes = [magic_t, c_int]
magic_check = libmagic.magic_check
magic_check.restype = c_int
magic_check.argtypes = [magic_t, c_char_p]
magic_compile = libmagic.magic_compile
magic_compile.restype = c_int
magic_compile.argtypes = [magic_t, c_char_p]
MAGIC_NONE = 0x000000 # No flags
MAGIC_DEBUG = 0x000001 # Turn on debugging
MAGIC_SYMLINK = 0x000002 # Follow symlinks
MAGIC_COMPRESS = 0x000004 # Check inside compressed files
MAGIC_DEVICES = 0x000008 # Look at the contents of devices
MAGIC_MIME = 0x000010 # Return a mime string
MAGIC_MIME_ENCODING = 0x000400 # Return the MIME encoding
MAGIC_CONTINUE = 0x000020 # Return all matches
MAGIC_CHECK = 0x000040 # Print warnings to stderr
MAGIC_PRESERVE_ATIME = 0x000080 # Restore access time on exit
MAGIC_RAW = 0x000100 # Don't translate unprintable chars
MAGIC_ERROR = 0x000200 # Handle ENOENT etc as real errors
MAGIC_NO_CHECK_COMPRESS = 0x001000 # Don't check for compressed files
MAGIC_NO_CHECK_TAR = 0x002000 # Don't check for tar files
MAGIC_NO_CHECK_SOFT = 0x004000 # Don't check magic entries
MAGIC_NO_CHECK_APPTYPE = 0x008000 # Don't check application type
MAGIC_NO_CHECK_ELF = 0x010000 # Don't check for elf details
MAGIC_NO_CHECK_ASCII = 0x020000 # Don't check for ascii files
MAGIC_NO_CHECK_TROFF = 0x040000 # Don't check ascii/troff
MAGIC_NO_CHECK_FORTRAN = 0x080000 # Don't check ascii/fortran
MAGIC_NO_CHECK_TOKENS = 0x100000 # Don't check ascii/tokens
| agpl-3.0 |
BoltzmannBrain/nupic | src/nupic/research/temporal_memory.py | 5 | 29929 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal Memory implementation in Python.
"""
from collections import defaultdict, namedtuple
from operator import mul
from nupic.bindings.math import Random
from nupic.research.connections import Connections
EPSILON = 0.000001
class TemporalMemory(object):
"""
Class implementing the Temporal Memory algorithm.
"""
def __init__(self,
columnDimensions=(2048,),
cellsPerColumn=32,
activationThreshold=13,
initialPermanence=0.21,
connectedPermanence=0.50,
minThreshold=10,
maxNewSynapseCount=20,
permanenceIncrement=0.10,
permanenceDecrement=0.10,
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=255,
maxSynapsesPerSegment=255,
seed=42,
**kwargs):
"""
@param columnDimensions (list) Dimensions of the column space
@param cellsPerColumn (int) Number of cells per column
@param activationThreshold (int) If the number of active connected synapses on a segment is at least this threshold, the segment is said to be active.
@param initialPermanence (float) Initial permanence of a new synapse.
@param connectedPermanence (float) If the permanence value for a synapse is greater than this value, it is said to be connected.
@param minThreshold (int) If the number of synapses active on a segment is at least this threshold, it is selected as the best matching cell in a bursting column.
@param maxNewSynapseCount (int) The maximum number of synapses added to a segment during learning.
@param permanenceIncrement (float) Amount by which permanences of synapses are incremented during learning.
@param permanenceDecrement (float) Amount by which permanences of synapses are decremented during learning.
@param predictedSegmentDecrement (float) Amount by which active permanences of synapses of previously predicted but inactive segments are decremented.
@param seed (int) Seed for the random number generator.
Notes:
predictedSegmentDecrement: A good value is just a bit larger than
(the column-level sparsity * permanenceIncrement). So, if column-level
sparsity is 2% and permanenceIncrement is 0.01, this parameter should be
something like 4% * 0.01 = 0.0004).
"""
# Error checking
if not len(columnDimensions):
raise ValueError("Number of column dimensions must be greater than 0")
if not cellsPerColumn > 0:
raise ValueError("Number of cells per column must be greater than 0")
# TODO: Validate all parameters (and add validation tests)
# Save member variables
self.columnDimensions = columnDimensions
self.cellsPerColumn = cellsPerColumn
self.activationThreshold = activationThreshold
self.initialPermanence = initialPermanence
self.connectedPermanence = connectedPermanence
self.minThreshold = minThreshold
self.maxNewSynapseCount = maxNewSynapseCount
self.permanenceIncrement = permanenceIncrement
self.permanenceDecrement = permanenceDecrement
self.predictedSegmentDecrement = predictedSegmentDecrement
# Initialize member variables
self.connections = Connections(self.numberOfCells(),
maxSegmentsPerCell=maxSegmentsPerCell,
maxSynapsesPerSegment=maxSynapsesPerSegment)
self._random = Random(seed)
self.activeCells = set()
self.predictiveCells = set()
self.activeSegments = set()
self.winnerCells = set()
self.matchingSegments = set()
self.matchingCells = set()
# ==============================
# Main functions
# ==============================
def compute(self, activeColumns, learn=True):
"""
Feeds input record through TM, performing inference and learning.
@param activeColumns (set) Indices of active columns
@param learn (bool) Whether or not learning is enabled
Updates member variables:
- `activeCells` (set)
- `winnerCells` (set)
- `activeSegments` (set)
- `predictiveCells` (set)
- `matchingSegments`(set)
- `matchingCells` (set)
"""
prevPredictiveCells = self.predictiveCells
prevActiveSegments = self.activeSegments
prevActiveCells = self.activeCells
prevWinnerCells = self.winnerCells
prevMatchingSegments = self.matchingSegments
prevMatchingCells = self.matchingCells
activeCells = set()
winnerCells = set()
(_activeCells,
_winnerCells,
predictedActiveColumns,
predictedInactiveCells) = self.activateCorrectlyPredictiveCells(
prevPredictiveCells,
prevMatchingCells,
activeColumns)
activeCells.update(_activeCells)
winnerCells.update(_winnerCells)
(_activeCells,
_winnerCells,
learningSegments) = self.burstColumns(activeColumns,
predictedActiveColumns,
prevActiveCells,
prevWinnerCells,
self.connections)
activeCells.update(_activeCells)
winnerCells.update(_winnerCells)
if learn:
self.learnOnSegments(prevActiveSegments,
learningSegments,
prevActiveCells,
winnerCells,
prevWinnerCells,
self.connections,
predictedInactiveCells,
prevMatchingSegments)
(activeSegments,
predictiveCells,
matchingSegments,
matchingCells) = self.computePredictiveCells(activeCells, self.connections)
self.activeCells = activeCells
self.winnerCells = winnerCells
self.activeSegments = activeSegments
self.predictiveCells = predictiveCells
self.matchingSegments = matchingSegments
self.matchingCells = matchingCells
def reset(self):
"""
Indicates the start of a new sequence. Resets sequence state of the TM.
"""
self.activeCells = set()
self.predictiveCells = set()
self.activeSegments = set()
self.winnerCells = set()
# ==============================
# Phases
# ==============================
def activateCorrectlyPredictiveCells(self,
prevPredictiveCells,
prevMatchingCells,
activeColumns):
"""
Phase 1: Activate the correctly predictive cells.
Pseudocode:
- for each prev predictive cell
- if in active column
- mark it as active
- mark it as winner cell
- mark column as predicted => active
- if not in active column
- mark it as an predicted but inactive cell
@param prevPredictiveCells (set) Indices of predictive cells in `t-1`
@param activeColumns (set) Indices of active columns in `t`
@return (tuple) Contains:
`activeCells` (set),
`winnerCells` (set),
`predictedActiveColumns` (set),
`predictedInactiveCells` (set)
"""
activeCells = set()
winnerCells = set()
predictedActiveColumns = set()
predictedInactiveCells = set()
for cell in prevPredictiveCells:
column = self.columnForCell(cell)
if column in activeColumns:
activeCells.add(cell)
winnerCells.add(cell)
predictedActiveColumns.add(column)
if self.predictedSegmentDecrement > 0:
for cell in prevMatchingCells:
column = self.columnForCell(cell)
if column not in activeColumns:
predictedInactiveCells.add(cell)
return (activeCells,
winnerCells,
predictedActiveColumns,
predictedInactiveCells)
def burstColumns(self,
activeColumns,
predictedActiveColumns,
prevActiveCells,
prevWinnerCells,
connections):
"""
Phase 2: Burst unpredicted columns.
Pseudocode:
- for each unpredicted active column
- mark all cells as active
- mark the best matching cell as winner cell
- (learning)
- if it has no matching segment
- (optimization) if there are prev winner cells
- add a segment to it
- mark the segment as learning
@param activeColumns (set) Indices of active columns in `t`
@param predictedActiveColumns (set) Indices of predicted => active columns in `t`
@param prevActiveCells (set) Indices of active cells in `t-1`
@param prevWinnerCells (set) Indices of winner cells in `t-1`
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`activeCells` (set),
`winnerCells` (set),
`learningSegments` (set)
"""
activeCells = set()
winnerCells = set()
learningSegments = set()
unpredictedActiveColumns = activeColumns - predictedActiveColumns
# Sort unpredictedActiveColumns before iterating for compatibility with C++
for column in sorted(unpredictedActiveColumns):
cells = self.cellsForColumn(column)
activeCells.update(cells)
(bestCell,
bestSegment) = self.bestMatchingCell(cells,
prevActiveCells,
connections)
winnerCells.add(bestCell)
if bestSegment is None and len(prevWinnerCells):
bestSegment = connections.createSegment(bestCell)
if bestSegment is not None:
learningSegments.add(bestSegment)
return activeCells, winnerCells, learningSegments
def learnOnSegments(self,
prevActiveSegments,
learningSegments,
prevActiveCells,
winnerCells,
prevWinnerCells,
connections,
predictedInactiveCells,
prevMatchingSegments):
"""
Phase 3: Perform learning by adapting segments.
Pseudocode:
- (learning) for each prev active or learning segment
- if learning segment or from winner cell
- strengthen active synapses
- weaken inactive synapses
- if learning segment
- add some synapses to the segment
- subsample from prev winner cells
- if predictedSegmentDecrement > 0
- for each previously matching segment
- if cell is a predicted inactive cell
- weaken active synapses but don't touch inactive synapses
@param prevActiveSegments (set) Indices of active segments in `t-1`
@param learningSegments (set) Indices of learning segments in `t`
@param prevActiveCells (set) Indices of active cells in `t-1`
@param winnerCells (set) Indices of winner cells in `t`
@param prevWinnerCells (set) Indices of winner cells in `t-1`
@param connections (Connections) Connectivity of layer
@param predictedInactiveCells (set) Indices of predicted inactive cells
@param prevMatchingSegments (set) Indices of segments with
"""
segments = prevActiveSegments | learningSegments
# Sort segments before iterating for compatibility with C++
# Sort with primary key = cell idx, secondary key = segment idx
segments = sorted(
segments,
key=lambda segment: (connections.cellForSegment(segment), segment))
for segment in segments:
isLearningSegment = segment in learningSegments
isFromWinnerCell = connections.cellForSegment(segment) in winnerCells
activeSynapses = self.activeSynapsesForSegment(
segment, prevActiveCells, connections)
if isLearningSegment or isFromWinnerCell:
self.adaptSegment(segment, activeSynapses, connections,
self.permanenceIncrement,
self.permanenceDecrement)
if isLearningSegment:
n = self.maxNewSynapseCount - len(activeSynapses)
for presynapticCell in self.pickCellsToLearnOn(n,
segment,
prevWinnerCells,
connections):
connections.createSynapse(segment,
presynapticCell,
self.initialPermanence)
if self.predictedSegmentDecrement > 0:
for segment in prevMatchingSegments:
isPredictedInactiveCell = connections.cellForSegment(segment) in predictedInactiveCells
activeSynapses = self.activeSynapsesForSegment(
segment, prevActiveCells, connections)
if isPredictedInactiveCell:
self.adaptSegment(segment, activeSynapses, connections,
-self.predictedSegmentDecrement,
0.0)
def computePredictiveCells(self, activeCells, connections):
"""
Phase 4: Compute predictive cells due to lateral input
on distal dendrites.
Pseudocode:
- for each distal dendrite segment with activity >= activationThreshold
- mark the segment as active
- mark the cell as predictive
- if predictedSegmentDecrement > 0
- for each distal dendrite segment with unconnected
activity >= minThreshold
- mark the segment as matching
- mark the cell as matching
Forward propagates activity from active cells to the synapses that touch
them, to determine which synapses are active.
@param activeCells (set) Indices of active cells in `t`
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`activeSegments` (set),
`predictiveCells` (set),
`matchingSegments` (set),
`matchingCells` (set)
"""
numActiveConnectedSynapsesForSegment = defaultdict(int)
numActiveSynapsesForSegment = defaultdict(int)
activeSegments = set()
predictiveCells = set()
matchingSegments = set()
matchingCells = set()
for cell in activeCells:
for synapseData in connections.synapsesForPresynapticCell(cell).values():
segment = synapseData.segment
permanence = synapseData.permanence
if permanence >= self.connectedPermanence:
numActiveConnectedSynapsesForSegment[segment] += 1
if (numActiveConnectedSynapsesForSegment[segment] >=
self.activationThreshold):
activeSegments.add(segment)
predictiveCells.add(connections.cellForSegment(segment))
if permanence > 0 and self.predictedSegmentDecrement > 0:
numActiveSynapsesForSegment[segment] += 1
if numActiveSynapsesForSegment[segment] >= self.minThreshold:
matchingSegments.add(segment)
matchingCells.add(connections.cellForSegment(segment))
return activeSegments, predictiveCells, matchingSegments, matchingCells
# ==============================
# Helper functions
# ==============================
def bestMatchingCell(self, cells, activeCells, connections):
"""
Gets the cell with the best matching segment
(see `TM.bestMatchingSegment`) that has the largest number of active
synapses of all best matching segments.
If none were found, pick the least used cell (see `TM.leastUsedCell`).
@param cells (set) Indices of cells
@param activeCells (set) Indices of active cells
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`cell` (int),
`bestSegment` (int)
"""
maxSynapses = 0
bestCell = None
bestSegment = None
for cell in cells:
segment, numActiveSynapses = self.bestMatchingSegment(
cell, activeCells, connections)
if segment is not None and numActiveSynapses > maxSynapses:
maxSynapses = numActiveSynapses
bestCell = cell
bestSegment = segment
if bestCell is None:
bestCell = self.leastUsedCell(cells, connections)
return bestCell, bestSegment
def bestMatchingSegment(self, cell, activeCells, connections):
"""
Gets the segment on a cell with the largest number of activate synapses,
including all synapses with non-zero permanences.
@param cell (int) Cell index
@param activeCells (set) Indices of active cells
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`segment` (int),
`connectedActiveSynapses` (set)
"""
maxSynapses = self.minThreshold
bestSegment = None
bestNumActiveSynapses = None
for segment in connections.segmentsForCell(cell):
numActiveSynapses = 0
for synapse in connections.synapsesForSegment(segment):
synapseData = connections.dataForSynapse(synapse)
if ( (synapseData.presynapticCell in activeCells) and
synapseData.permanence > 0):
numActiveSynapses += 1
if numActiveSynapses >= maxSynapses:
maxSynapses = numActiveSynapses
bestSegment = segment
bestNumActiveSynapses = numActiveSynapses
return bestSegment, bestNumActiveSynapses
def leastUsedCell(self, cells, connections):
"""
Gets the cell with the smallest number of segments.
Break ties randomly.
@param cells (set) Indices of cells
@param connections (Connections) Connectivity of layer
@return (int) Cell index
"""
leastUsedCells = set()
minNumSegments = float("inf")
for cell in cells:
numSegments = len(connections.segmentsForCell(cell))
if numSegments < minNumSegments:
minNumSegments = numSegments
leastUsedCells = set()
if numSegments == minNumSegments:
leastUsedCells.add(cell)
i = self._random.getUInt32(len(leastUsedCells))
return sorted(leastUsedCells)[i]
@staticmethod
def activeSynapsesForSegment(segment, activeCells, connections):
"""
Returns the synapses on a segment that are active due to lateral input
from active cells.
@param segment (int) Segment index
@param activeCells (set) Indices of active cells
@param connections (Connections) Connectivity of layer
@return (set) Indices of active synapses on segment
"""
synapses = set()
for synapse in connections.synapsesForSegment(segment):
synapseData = connections.dataForSynapse(synapse)
if synapseData.presynapticCell in activeCells:
synapses.add(synapse)
return synapses
def adaptSegment(self, segment, activeSynapses, connections,
permanenceIncrement, permanenceDecrement):
"""
Updates synapses on segment.
Strengthens active synapses; weakens inactive synapses.
@param segment (int) Segment index
@param activeSynapses (set) Indices of active synapses
@param connections (Connections) Connectivity of layer
@param permanenceIncrement (float) Amount to increment active synapses
@param permanenceDecrement (float) Amount to decrement inactive synapses
"""
# Need to copy synapses for segment set below because it will be modified
# during iteration by `destroySynapse`
for synapse in set(connections.synapsesForSegment(segment)):
synapseData = connections.dataForSynapse(synapse)
permanence = synapseData.permanence
if synapse in activeSynapses:
permanence += permanenceIncrement
else:
permanence -= permanenceDecrement
# Keep permanence within min/max bounds
permanence = max(0.0, min(1.0, permanence))
if (permanence < EPSILON):
connections.destroySynapse(synapse)
else:
connections.updateSynapsePermanence(synapse, permanence)
def pickCellsToLearnOn(self, n, segment, winnerCells, connections):
"""
Pick cells to form distal connections to.
TODO: Respect topology and learningRadius
@param n (int) Number of cells to pick
@param segment (int) Segment index
@param winnerCells (set) Indices of winner cells in `t`
@param connections (Connections) Connectivity of layer
@return (set) Indices of cells picked
"""
candidates = set(winnerCells)
# Remove cells that are already synapsed on by this segment
for synapse in connections.synapsesForSegment(segment):
synapseData = connections.dataForSynapse(synapse)
presynapticCell = synapseData.presynapticCell
if presynapticCell in candidates:
candidates.remove(presynapticCell)
n = min(n, len(candidates))
candidates = sorted(candidates)
cells = set()
# Pick n cells randomly
for _ in range(n):
i = self._random.getUInt32(len(candidates))
cells.add(candidates[i])
del candidates[i]
return cells
def columnForCell(self, cell):
"""
Returns the index of the column that a cell belongs to.
@param cell (int) Cell index
@return (int) Column index
"""
self._validateCell(cell)
return int(cell / self.cellsPerColumn)
def cellsForColumn(self, column):
"""
Returns the indices of cells that belong to a column.
@param column (int) Column index
@return (set) Cell indices
"""
self._validateColumn(column)
start = self.cellsPerColumn * self.getCellIndex(column)
end = start + self.cellsPerColumn
return set(xrange(start, end))
def numberOfColumns(self):
"""
Returns the number of columns in this layer.
@return (int) Number of columns
"""
return reduce(mul, self.columnDimensions, 1)
def numberOfCells(self):
"""
Returns the number of cells in this layer.
@return (int) Number of cells
"""
return self.numberOfColumns() * self.cellsPerColumn
def getActiveCells(self):
"""
Returns the indices of the active cells.
@return (list) Indices of active cells.
"""
return self.getCellIndices(self.activeCells)
def getPredictiveCells(self):
"""
Returns the indices of the predictive cells.
@return (list) Indices of predictive cells.
"""
return self.getCellIndices(self.predictiveCells)
def getWinnerCells(self):
"""
Returns the indices of the winner cells.
@return (list) Indices of winner cells.
"""
return self.getCellIndices(self.winnerCells)
def getMatchingCells(self):
"""
Returns the indices of the matching cells.
@return (list) Indices of matching cells.
"""
return self.getCellIndices(self.matchingCells)
def getCellsPerColumn(self):
"""
Returns the number of cells per column.
@return (int) The number of cells per column.
"""
return self.cellsPerColumn
def mapCellsToColumns(self, cells):
"""
Maps cells to the columns they belong to
@param cells (set) Cells
@return (dict) Mapping from columns to their cells in `cells`
"""
cellsForColumns = defaultdict(set)
for cell in cells:
column = self.columnForCell(cell)
cellsForColumns[column].add(cell)
return cellsForColumns
def write(self, proto):
"""
Writes serialized data to proto object
@param proto (DynamicStructBuilder) Proto object
"""
proto.columnDimensions = self.columnDimensions
proto.cellsPerColumn = self.cellsPerColumn
proto.activationThreshold = self.activationThreshold
proto.initialPermanence = self.initialPermanence
proto.connectedPermanence = self.connectedPermanence
proto.minThreshold = self.minThreshold
proto.maxNewSynapseCount = self.maxNewSynapseCount
proto.permanenceIncrement = self.permanenceIncrement
proto.permanenceDecrement = self.permanenceDecrement
proto.predictedSegmentDecrement = self.predictedSegmentDecrement
self.connections.write(proto.connections)
self._random.write(proto.random)
proto.activeCells = list(self.activeCells)
proto.predictiveCells = list(self.predictiveCells)
proto.activeSegments = list(self.activeSegments)
proto.winnerCells = list(self.winnerCells)
proto.matchingSegments = list(self.matchingSegments)
proto.matchingCells = list(self.matchingCells)
@classmethod
def read(cls, proto):
"""
Reads deserialized data from proto object
@param proto (DynamicStructBuilder) Proto object
@return (TemporalMemory) TemporalMemory instance
"""
tm = object.__new__(cls)
tm.columnDimensions = list(proto.columnDimensions)
tm.cellsPerColumn = int(proto.cellsPerColumn)
tm.activationThreshold = int(proto.activationThreshold)
tm.initialPermanence = proto.initialPermanence
tm.connectedPermanence = proto.connectedPermanence
tm.minThreshold = int(proto.minThreshold)
tm.maxNewSynapseCount = int(proto.maxNewSynapseCount)
tm.permanenceIncrement = proto.permanenceIncrement
tm.permanenceDecrement = proto.permanenceDecrement
tm.predictedSegmentDecrement = proto.predictedSegmentDecrement
tm.connections = Connections.read(proto.connections)
tm._random = Random()
tm._random.read(proto.random)
tm.activeCells = set([int(x) for x in proto.activeCells])
tm.predictiveCells = set([int(x) for x in proto.predictiveCells])
tm.activeSegments = set([int(x) for x in proto.activeSegments])
tm.winnerCells = set([int(x) for x in proto.winnerCells])
tm.matchingSegments = set([int(x) for x in proto.matchingSegments])
tm.matchingCells = set([int(x) for x in proto.matchingCells])
return tm
def __eq__(self, other):
"""
Equality operator for TemporalMemory instances.
Checks if two instances are functionally identical
(might have different internal state).
@param other (TemporalMemory) TemporalMemory instance to compare to
"""
if self.columnDimensions != other.columnDimensions: return False
if self.cellsPerColumn != other.cellsPerColumn: return False
if self.activationThreshold != other.activationThreshold: return False
if abs(self.initialPermanence - other.initialPermanence) > EPSILON:
return False
if abs(self.connectedPermanence - other.connectedPermanence) > EPSILON:
return False
if self.minThreshold != other.minThreshold: return False
if self.maxNewSynapseCount != other.maxNewSynapseCount: return False
if abs(self.permanenceIncrement - other.permanenceIncrement) > EPSILON:
return False
if abs(self.permanenceDecrement - other.permanenceDecrement) > EPSILON:
return False
if abs(self.predictedSegmentDecrement - other.predictedSegmentDecrement) > EPSILON:
return False
if self.connections != other.connections: return False
if self.activeCells != other.activeCells: return False
if self.predictiveCells != other.predictiveCells: return False
if self.winnerCells != other.winnerCells: return False
if self.matchingSegments != other.matchingSegments: return False
if self.matchingCells != other.matchingCells: return False
return True
def __ne__(self, other):
"""
Non-equality operator for TemporalMemory instances.
Checks if two instances are not functionally identical
(might have different internal state).
@param other (TemporalMemory) TemporalMemory instance to compare to
"""
return not self.__eq__(other)
def _validateColumn(self, column):
"""
Raises an error if column index is invalid.
@param column (int) Column index
"""
if column >= self.numberOfColumns() or column < 0:
raise IndexError("Invalid column")
def _validateCell(self, cell):
"""
Raises an error if cell index is invalid.
@param cell (int) Cell index
"""
if cell >= self.numberOfCells() or cell < 0:
raise IndexError("Invalid cell")
@classmethod
def getCellIndices(cls, cells):
return [cls.getCellIndex(c) for c in cells]
@staticmethod
def getCellIndex(cell):
return cell
| agpl-3.0 |
borosnborea/SwordGO_app | example/kivymap/.buildozer/venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.py | 352 | 2776 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
pass
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""
A file was opened in text mode, but Requests determined its binary length.
"""
pass
| gpl-3.0 |
mrjacobagilbert/gnuradio | gr-digital/python/digital/qa_lfsr.py | 5 | 1405 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import math
import numpy as np
from gnuradio import gr, gr_unittest, digital
from gnuradio.digital.utils import lfsr_args
class test_lfsr(gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_lfsr_001(self):
reglen = 8
l = digital.lfsr(1, 1, reglen)
result_data = []
for i in range(4 * (reglen + 1)):
result_data.append(l.next_bit())
expected_result = 4 * ([1, ] + reglen * [0, ])
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5)
def test_lfsr_002(self):
l = digital.lfsr(*lfsr_args(0b1,5,3,0))
result_data = [l.next_bit() for _ in range(2*(2**5-1))]
expected_result = [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0]*2
self.assertEqual(expected_result, result_data)
seq1 = [l.next_bit() for _ in range(2**5-1)]
seq2 = [l.next_bit() for _ in range(2**5-1)]
self.assertEqual(seq1,seq2)
res = (np.convolve(seq1,[1,0,1,0,0,1])%2)
self.assertTrue(sum(res[5:-5])==0,"LRS not generated properly")
if __name__ == '__main__':
gr_unittest.run(test_lfsr)
| gpl-3.0 |
alexlovelltroy/namebench | libnamebench/better_webbrowser.py | 175 | 4191 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for webbrowser library, to invoke the http handler on win32."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import os.path
import subprocess
import sys
import traceback
import webbrowser
import util
def output(string):
print string
def create_win32_http_cmd(url):
"""Create a command-line tuple to launch a web browser for a given URL.
Args:
url: string
Returns:
tuple of: (executable, arg1, arg2, ...)
At the moment, this ignores all default arguments to the browser.
TODO(tstromberg): Properly parse the command-line arguments.
"""
browser_type = None
try:
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
'Software\Classes\http\shell\open\command')
browser_type = 'user'
except WindowsError:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'Software\Classes\http\shell\open\command')
browser_type = 'machine'
except:
return False
cmd = _winreg.EnumValue(key, 0)[1]
# "C:\blah blah\iexplore.exe" -nohome
# "C:\blah blah\firefox.exe" -requestPending -osint -url "%1"
if '"' in cmd:
executable = cmd.split('"')[1]
else:
executable = cmd.split(' ')[0]
if not os.path.exists(executable):
output('$ Default HTTP browser does not exist: %s' % executable)
return False
else:
output('$ %s HTTP handler: %s' % (browser_type, executable))
return (executable, url)
def open(url):
"""Opens a URL, overriding the normal webbrowser.open methods for sanity."""
try:
webbrowser.open(url, new=1, autoraise=True)
# If the user is missing the osascript binary - see
# http://code.google.com/p/namebench/issues/detail?id=88
except:
output('Failed to open: [%s]: %s' % (url, util.GetLastExceptionString()))
if os.path.exists('/usr/bin/open'):
try:
output('trying open: %s' % url)
p = subprocess.Popen(('open', url))
p.wait()
except:
output('open did not seem to work: %s' % util.GetLastExceptionString())
elif sys.platform[:3] == 'win':
try:
output('trying default Windows controller: %s' % url)
controller = webbrowser.get('windows-default')
controller.open_new(url)
except:
output('WindowsController did not work: %s' % util.GetLastExceptionString())
# *NOTE*: EVIL IMPORT SIDE EFFECTS AHEAD!
#
# If we are running on Windows, register the WindowsHttpDefault class.
if sys.platform[:3] == 'win':
import _winreg
# We don't want to load this class by default, because Python 2.4 doesn't have BaseBrowser.
class WindowsHttpDefault(webbrowser.BaseBrowser):
"""Provide an alternate open class for Windows user, using the http handler."""
def open(self, url, new=0, autoraise=1):
command_args = create_win32_http_cmd(url)
if not command_args:
output('$ Could not find HTTP handler')
return False
output('command_args:')
output(command_args)
# Avoid some unicode path issues by moving our current directory
old_pwd = os.getcwd()
os.chdir('C:\\')
try:
_unused = subprocess.Popen(command_args)
os.chdir(old_pwd)
return True
except:
traceback.print_exc()
output('$ Failed to run HTTP handler, trying next browser.')
os.chdir(old_pwd)
return False
webbrowser.register('windows-http', WindowsHttpDefault, update_tryorder=-1)
| apache-2.0 |
TarasRudnyk/scrapy | scrapy/core/downloader/webclient.py | 115 | 5048 | from time import time
from six.moves.urllib.parse import urlparse, urlunparse, urldefrag
from twisted.web.client import HTTPClientFactory
from twisted.web.http import HTTPClient
from twisted.internet import defer
from scrapy.http import Headers
from scrapy.utils.httpobj import urlparse_cached
from scrapy.responsetypes import responsetypes
def _parsed_url_args(parsed):
path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))
host = parsed.hostname
port = parsed.port
scheme = parsed.scheme
netloc = parsed.netloc
if port is None:
port = 443 if scheme == 'https' else 80
return scheme, netloc, host, port, path
def _parse(url):
url = url.strip()
parsed = urlparse(url)
return _parsed_url_args(parsed)
class ScrapyHTTPPageGetter(HTTPClient):
delimiter = '\n'
def connectionMade(self):
self.headers = Headers() # bucket for response headers
# Method command
self.sendCommand(self.factory.method, self.factory.path)
# Headers
for key, values in self.factory.headers.items():
for value in values:
self.sendHeader(key, value)
self.endHeaders()
# Body
if self.factory.body is not None:
self.transport.write(self.factory.body)
def lineReceived(self, line):
return HTTPClient.lineReceived(self, line.rstrip())
def handleHeader(self, key, value):
self.headers.appendlist(key, value)
def handleStatus(self, version, status, message):
self.factory.gotStatus(version, status, message)
def handleEndHeaders(self):
self.factory.gotHeaders(self.headers)
def connectionLost(self, reason):
self._connection_lost_reason = reason
HTTPClient.connectionLost(self, reason)
self.factory.noPage(reason)
def handleResponse(self, response):
if self.factory.method.upper() == 'HEAD':
self.factory.page('')
elif self.length is not None and self.length > 0:
self.factory.noPage(self._connection_lost_reason)
else:
self.factory.page(response)
self.transport.loseConnection()
def timeout(self):
self.transport.loseConnection()
self.factory.noPage(\
defer.TimeoutError("Getting %s took longer than %s seconds." % \
(self.factory.url, self.factory.timeout)))
class ScrapyHTTPClientFactory(HTTPClientFactory):
"""Scrapy implementation of the HTTPClientFactory overwriting the
serUrl method to make use of our Url object that cache the parse
result.
"""
protocol = ScrapyHTTPPageGetter
waiting = 1
noisy = False
followRedirect = False
afterFoundGet = False
def __init__(self, request, timeout=180):
self.url = urldefrag(request.url)[0]
self.method = request.method
self.body = request.body or None
self.headers = Headers(request.headers)
self.response_headers = None
self.timeout = request.meta.get('download_timeout') or timeout
self.start_time = time()
self.deferred = defer.Deferred().addCallback(self._build_response, request)
# Fixes Twisted 11.1.0+ support as HTTPClientFactory is expected
# to have _disconnectedDeferred. See Twisted r32329.
# As Scrapy implements it's own logic to handle redirects is not
# needed to add the callback _waitForDisconnect.
# Specifically this avoids the AttributeError exception when
# clientConnectionFailed method is called.
self._disconnectedDeferred = defer.Deferred()
self._set_connection_attributes(request)
# set Host header based on url
self.headers.setdefault('Host', self.netloc)
# set Content-Length based len of body
if self.body is not None:
self.headers['Content-Length'] = len(self.body)
# just in case a broken http/1.1 decides to keep connection alive
self.headers.setdefault("Connection", "close")
# Content-Length must be specified in POST method even with no body
elif self.method == 'POST':
self.headers['Content-Length'] = 0
def _build_response(self, body, request):
request.meta['download_latency'] = self.headers_time-self.start_time
status = int(self.status)
headers = Headers(self.response_headers)
respcls = responsetypes.from_args(headers=headers, url=self.url)
return respcls(url=self.url, status=status, headers=headers, body=body)
def _set_connection_attributes(self, request):
parsed = urlparse_cached(request)
self.scheme, self.netloc, self.host, self.port, self.path = _parsed_url_args(parsed)
proxy = request.meta.get('proxy')
if proxy:
self.scheme, _, self.host, self.port, _ = _parse(proxy)
self.path = self.url
def gotHeaders(self, headers):
self.headers_time = time()
self.response_headers = headers
| bsd-3-clause |
garthylou/pootle | pootle/core/utils/templates.py | 7 | 1320 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import find_template_loader
def get_template_source(name, dirs=None):
"""Retrieves the template's source contents.
:param name: Template's filename, as passed to the template loader.
:param dirs: list of directories to optionally override the defaults.
:return: tuple including file contents and file path.
"""
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
# The cached loader includes the actual loaders underneath
if hasattr(loader, 'loaders'):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
for loader in loaders:
try:
return loader.load_template_source(name, template_dirs=dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
| gpl-3.0 |
ecell/libmoleculizer | python-src/language_parser/moleculizer/moleculizerrules.py | 1 | 21379 | ###############################################################################
# Copyright (C) 2007, 2008, 2009 The Molecular Sciences Institute
# Original Author:
# Nathan Addy, Scientific Programmer Email: addy@molsci.org
# The Molecular Sciences Institute
#
###############################################################################
import pdb
import re
import util
from xmlobject import XmlObject
import StringIO
from sectionparameter import SymbolicExpressionEvaluator
from sectionmodifications import ModificationsSection
from sectionmols import MolsSection
from sectionallostery import AllostericPlexesSection, AllostericOmnisSection
from sectionreactionrules import ReactionRulesSection
from sectionspeciesstreams import SpeciesStreamsSection
from sectionexplicitspeciesblock import ExplicitSpeciesSection
from moleculizer_xcpt import *
class MoleculizerRulesFile:
"""
This object acts as an parsing thing that outputs moleculizer files xml,
suitable for processing internally by a mzr::moleculizer instance."""
def BlockPassesSanityCheck( linearray ):
linearray = [x for x in linearray if x.strip() != ""]
if len(linearray) == 0: return True
everyLineEndsWithSemiColon = [ x[-1] == ";" and x.count(";") == 1for x in linearray]
noWhiteSpace = [ (x.count("\n") + x.count(" ") + x.count("\t") == 0) for x in linearray]
return reduce(util.And, everyLineEndsWithSemiColon) and reduce(util.And, noWhiteSpace)
BlockPassesSanityCheck = staticmethod( BlockPassesSanityCheck )
def addWholeRulesString( self, rulesString):
print "Reading file '%s' " % rulesString
lines = rulesString.split("\n")
parameterBlock, modificationsBlock, molsBlock, allostericPlexes, allostericOmnis,\
reactionRulesBlock, dimerizationGenBlock, omniGenBlock, \
explicitSpeciesBlock, speciesStreamBlock = parseBlockTypesFromRulesFile( lines )
self.addParameterBlock( parameterBlock )
self.addModicationsBlock( modificationsBlock )
self.addMolsBlock( molsBlock )
self.addAllostericPlexesBlock( allostericPlexes )
self.addAllostericOmnisBlock( allostericOmnis )
self.addReactionRulesBlock( reactionRulesBlock, dimerizationGenBlock, \
omniGenBlock, [] )
self.addExplicitSpeciesBlock( explicitSpeciesBlock )
self.addSpeciesStreamsBlock( speciesStreamBlock )
return
def addWholeRulesFile(self, rulesFile):
parameterBlock, modificationsBlock, molsBlock, allostericPlexes, allostericOmnis, \
reactionRulesBlock, dimerizationGenBlock, omniGenBlock, \
explicitSpeciesBlock, speciesStreamBlock = parseBlockTypesFromRulesFile( open(rulesFile).readlines() )
self.addParameterBlock( parameterBlock )
self.addModicationsBlock( modificationsBlock )
self.addMolsBlock( molsBlock )
self.addAllostericPlexesBlock( allostericPlexes )
self.addAllostericOmnisBlock( allostericOmnis )
self.addReactionRulesBlock( reactionRulesBlock, dimerizationGenBlock, \
omniGenBlock, [] )
self.addExplicitSpeciesBlock( explicitSpeciesBlock )
self.addSpeciesStreamsBlock( speciesStreamBlock )
return
def addParameterStatement(self, paramStatement):
paramStatement = self.PreProcessStatement( paramStatement )
print "Adding param line: '%s'" % paramStatement
self.parameterBlock.append( paramStatement)
self.parameterEE = SymbolicExpressionEvaluator( self.parameterBlock )
return
def addModificationStatement(self, modLine):
modLine = self.PreProcessStatement( modLine )
print "Adding mod line: '%s'" % modLine
self.modificationsBlock.append( modLine)
self.modificationsSection = ModificationsSection( self.modificationsBlock )
return
def addMolsStatement(self, molsLine):
molsLine = self.PreProcessStatement( molsLine )
self.molsBlock.append( molsLine )
self.molsSection = MolsSection( molsBlock )
return
def addAllostericPlexStatement(self, alloPlexLine):
alloPlexLine = self.PreProcessStatement( alloPlexLine )
self.allostericPlexes.append( alloPlexLine )
self.allostericPlexesSection = AllostericPlexesSection( self.allostericPlexes )
return
def addAllostericOmniStatement(self, alloOmniLine):
alloOmniLine = self.PreProcessStatement( alloOmniLine )
self.allostericOmnis.append( alloOmniLine )
self.allostericOmnisSection = AllostericOmnisSection( self.allostericOmnis )
return
def addDimerizationGenStatement(self, dimerGenLine):
dimerGenLine = self.PreProcessStatement( dimerGenLine )
self.dimerizationGenBlock.append(dimerGenLine)
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addOmniGenStatement(self, omniGenLine):
omniGenLine = self.PreProcessStatement( omniGenLine )
self.omniGenLine.append( omniGenLine )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addUniMolGenStatement(self, uniMolGenLine):
uniMolGenLine = self.PreProcessStatement( uniMolGenLine )
self.uniMolGenBlock.append( uniMolGenLine )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addExplicitSpeciesStatement(self, explicitSpeciesStatement):
explicitSpeciesStatement = self.PreProcessStatement( explicitSpeciesStatement )
self.explicitSpeciesBlock.append( explicitSpeciesStatement )
self.explicitSpeciesSection = ExplicitSpeciesSection( self.explicitSpeciesBlock )
return
def addSpeciesStreamStatement(self, speciesStreamLine):
speciesStreamLine = self.PreProcessStatement( speciesStreamLine )
self.speciesStreamBlock.append( speciesStreamLine )
self.speciesStreamSection = SpeciesStreamsSection( self.speciesStreamBlock )
return
def __init__(self):
# These are the lines of input, in one statement per line form, with no whitespace
self.parameterBlock = []
self.modificationsBlock = []
self.molsBlock = []
self.allostericPlexes = []
self.allostericOmnis = []
self.reactionRulesBlock = []
self.dimerizationGenBlock = []
self.omniGenBlock = []
self.uniMolGenBlock = []
self.explicitSpeciesBlock = []
self.speciesStreamBlock = []
# These are the objects that will be used to process the parsed
# data.
# A section is an intermediate between a rules file (they have lines, for example,
# and can answer questions about what has been parsed ) and an xml section (it can
# write out an xml section -
# Parameters doesn't write anything out currently, but easily could
self.parameterSection = 0
self.modificationsSection = 0
self.molsSection = 0
self.allostericPlexesSection = 0
self.allostericOmnisSection = 0
self.reactionGensSection = 0
self.explicitSpeciesSection = 0
self.speciesStreamSection = 0
def getOutputFileName(self):
return self.outputFileName
def write(self):
self.openXmlFile = open(self.outputFileName, 'w')
self.__writeOutput(self.openXmlFile)
return
def writeToString(self):
myString = StringIO.StringIO()
self.__writeOutput( myString )
return myString.getvalue()
def close(self):
self.openXmlFile.close()
def addParameterBlock(self, parameterBlock, overwrite = False):
if self.parameterBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a parameter block twice.")
if not self.BlockPassesSanityCheck( parameterBlock ):
raise InsaneBlockOnTheLooseException(parameterBlock, "parameter block")
self.parameterBlock = parameterBlock[:]
self.parameterEE = SymbolicExpressionEvaluator( self.parameterBlock )
def addModicationsBlock(self, modificationsBlock, overwrite = False):
if self.modificationsBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a modifications block twice.")
if not self.BlockPassesSanityCheck( modificationsBlock ):
raise InsaneBlockOnTheLooseException(modificationsBlock, "modifications block")
self.modificationsBlock = modificationsBlock[:]
self.modificationsSection = ModificationsSection( self.modificationsBlock )
return
def addMolsBlock(self, molsBlock):
if self.molsBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a mols block twice.")
if not self.BlockPassesSanityCheck( molsBlock ):
raise InsaneBlockOnTheLooseException(molsBlock, "mols block")
self.molsBlock = molsBlock[:]
self.molsSection = MolsSection( molsBlock )
def addAllostericPlexesBlock(self, apBlock, overwrite = False):
if self.allostericPlexes and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add an allosteric plexes block twice.")
if not self.BlockPassesSanityCheck( apBlock ):
raise InsaneBlockOnTheLooseException(apBlock, "allosteric plexes block")
self.allostericPlexes = apBlock[:]
self.allostericPlexesSection = AllostericPlexesSection( self.allostericPlexes )
def addAllostericOmnisBlock(self, aoBlock, overwrite = False):
if self.allostericOmnis and not overwrite: raise MzrExceptions.MoleculizerException("Error: Cannot add an allosteric omnis block twice.")
if not self.BlockPassesSanityCheck( aoBlock ):
raise InsaneBlockOnTheLooseException( aoBlock, "allosteric omnis block")
self.allostericOmnis = aoBlock[:]
self.allostericOmnisSection = AllostericOmnisSection( self.allostericOmnis )
def addReactionRulesBlock( self, rrBlock, dimerGenBlock, omniGenBlock, uniMolGenBlock, overwrite = False):
if self.reactionRulesBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a reaction rules block twice.")
if not self.BlockPassesSanityCheck( rrBlock ):
raise InsaneBlockOnTheLooseException(rrBlock, "reaction rules")
if not self.BlockPassesSanityCheck( dimerGenBlock ):
raise InsaneBlockOnTheLooseException(dimerGenBlock, "dimerization gen block")
if not self.BlockPassesSanityCheck( omniGenBlock ):
raise InsaneBlockOnTheLooseException(omniGenBlock, "omni-gen block")
if not self.BlockPassesSanityCheck( uniMolGenBlock ):
raise InsaneBlockOnTheLooseException(uniMolGenBlock, "uni-mol-gen block")
self.reactionRulesBlock.extend( rrBlock )
self.dimerizationGenBlock.extend( dimerGenBlock )
self.omniGenBlock.extend( omniGenBlock )
self.uniMolGenBlock.extend( uniMolGenBlock )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
def addExplicitSpeciesBlock( self, esBlock, overwrite = False):
if self.explicitSpeciesBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add an explicit species block twice.")
if not self.BlockPassesSanityCheck( esBlock ):
raise InsaneBlockOnTheLooseException(esBlock, "explicit-species")
self.explicitSpeciesBlock = esBlock[:]
self.explicitSpeciesSection = ExplicitSpeciesSection( esBlock )
def addSpeciesStreamsBlock(self, ssBlock, overwrite = False):
if self.speciesStreamBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a species stream block twice.")
if not self.BlockPassesSanityCheck( ssBlock ):
raise InsaneBlockOnTheLooseException(ssBlock, "")
self.speciesStreamBlock = ssBlock[:]
self.speciesStreamSection = SpeciesStreamsSection( self.speciesStreamBlock )
def __processAllostericRulesBlocks( self, allostericPlexBlock, allostericOmniBlock):
return 0
def __processReactionRulesBlocks( self, rxnRulesBlock, dimerBlock, omniGenBlock, uniGenBlock):
return 0
def __processExplicitSpeciesBlock( self, explicitSpeciesBlock):
return 0
def __processSpeciesStreamBlock( self, ssBlock):
return 0
def __writeOutput(self, openXMLFile):
xmlobject = self.__constructXMLRepresentation()
xmlobject.writeall(openXMLFile)
def __constructXMLRepresentation(self):
rootNode = XmlObject("moleculizer-input")
modelElmt = XmlObject("model")
modelElmt.attachToParent(rootNode)
streamsElmt = XmlObject("streams", rootNode)
self.__addModifications( modelElmt )
self.__addMols( modelElmt )
self.__addAllostericPlexes( modelElmt )
self.__addAllostericOmnis( modelElmt )
self.__addReactionGens( modelElmt )
self.__addExplicitSpecies( modelElmt )
self.__addExplicitReactions( modelElmt )
self.__addSpeciesStreams( streamsElmt )
return rootNode
def __addModifications(self, parentObject):
# Write me!!!
modificationsSection = XmlObject("modifications", parentObject)
if self.modificationsSection:
self.modificationsSection.writeModificationsSections( modificationsSection )
return
def __addMols(self, parentObject):
molsSection = XmlObject("mols", parentObject)
if self.molsSection:
self.molsSection.writeMolsSection( molsSection)
return
def __addAllostericPlexes(self, parentObject):
allostericPlexes = XmlObject("allosteric-plexes", parentObject)
if self.allostericPlexesSection:
self.allostericPlexesSection.writeAllostericPlexesSection(allostericPlexes)
return
def __addAllostericOmnis(self, parentObject):
allostericOmnis = XmlObject("allosteric-omnis", parentObject)
if self.allostericOmnisSection:
self.allostericOmnisSection.writeAllostericOmnisSection( allostericOmnis )
return
def __addReactionGens(self, parentObject):
reactionGenElmt = XmlObject("reaction-gens", parentObject)
if self.reactionGensSection:
self.reactionGensSection.writeReactionGensSection( reactionGenElmt )
return
def __addSpeciesStreams( self, parentObject):
speciesStreamsElement = XmlObject("species-streams", parentObject)
if self.speciesStreamSection:
self.speciesStreamSection.writeSpeciesStreamSection( speciesStreamsElement )
def __addExplicitSpecies(self, parentObject):
explicitSpeciesElmt = XmlObject("explicit-species", parentObject)
if self.explicitSpeciesSection:
self.explicitSpeciesSection.writeExplicitSpeciesSection( explicitSpeciesElmt )
return
def __addExplicitReactions( self, modelElmt ):
explicitReactionsElmt = XmlObject("explicit-reactions", modelElmt)
return
def parseBlockTypesFromRulesFile(textRulesFile):
textRulesFile = [re.sub("#.*$", "", x) for x in textRulesFile] # Delete all comments
# textRulesFile = [re.sub("//.*$", "", x) for x in textRulesFile] # Delete all comments
textRulesFile = [re.sub(r"\s*", "", x) for x in textRulesFile] # Delete all whitespace
textRulesFile = [x.strip() for x in textRulesFile] # Strip it for good measure
textRulesFile = [x for x in textRulesFile if x != ""] # This must be last, because line.strip() results in some empty lines.
parameterBlock = []
modificationsBlock = []
molsBlock = []
allostericPlexes = []
allostericOmnis = []
reactionRulesBlock = []
dimerizationGenBlock = []
omniGenBlock = []
uniMolGenBlock = []
explicitSpeciesBlock = []
speciesStreamBlock = []
# textRulesFile = '\n'.join(textRulesFile)
# textRulesFile = re.sub(r"\\\s*\n\s*", " ", textRulesFile)
# textRulesFile = textRulesFile.split("\n")
blockCodes = ["Parameters", "Modifications", "Molecules", "Explicit-Allostery", "Allosteric-Classes",
"Reaction-Rules", "Association-Reactions", "Transformation-Reactions",
"Explicit-Species", "Species-Classes" ]
blockObjNdx = -1
blockDataObj = [ (blockCodes[0], parameterBlock), \
(blockCodes[1], modificationsBlock), \
(blockCodes[2], molsBlock), \
(blockCodes[3], allostericPlexes),
(blockCodes[4], allostericOmnis),
(blockCodes[5], reactionRulesBlock), \
(blockCodes[6], dimerizationGenBlock), \
(blockCodes[7], omniGenBlock), \
(blockCodes[8], explicitSpeciesBlock),\
(blockCodes[9], speciesStreamBlock) ]
currentDmp = []
try:
assert( textRulesFile[0].startswith("="))
except:
raise Exception("Line '%s' should start with a '=', but does not." % textRulesFile[0])
blockObjNdx = -1
for line in textRulesFile:
if line.startswith("="):
blockObjNdx = returnNewIndex(line, blockDataObj)
currentDmp = blockDataObj[blockObjNdx][1]
else:
currentDmp.append(line)
return getFormattedArray(parameterBlock), getFormattedArray(modificationsBlock), getFormattedArray(molsBlock), getFormattedArray(allostericPlexes), getFormattedArray(allostericOmnis), \
getFormattedArray(reactionRulesBlock), getFormattedArray(dimerizationGenBlock), getFormattedArray(omniGenBlock), \
getFormattedArray(explicitSpeciesBlock), getFormattedArray(speciesStreamBlock)
def returnNewIndex(lineOfText, blockObjData):
key = lineOfText.strip().strip("=").strip()
for ndx in range(len(blockObjData)):
if key == blockObjData[ndx][0]:
return ndx
raise Exception("Section title '%s' cannot be found" % key)
return -1
def barf(msg):
sys.stderr.write(msg + '\n')
sys.stderr.write("Crashing....\n")
sys.exit(1)
def printerror(msg):
sys.stderr.write(msg + '\n')
return
def getFormattedArray( arrayToFormat ):
tmpArray = getBalancedArray( arrayToFormat )
tmpString = "".join( tmpArray )
if tmpString == "":
return []
try:
assert( tmpString[-1] == ";" )
except:
raise Exception("Error parsing block '%s'. Line does not end in ';'." % repr(arrayToFormat))
tmpArray = tmpString.split(";")
tmpArray.pop() # Last entry is blank
tmpArray = [tok + ";" for tok in tmpArray]
return tmpArray
def getBalancedArray( arrayToBalance ):
if not EachEntryIsParenBalanced( arrayToBalance ):
# Combine the ..., ndx_i, ndx_(i+1) where ndx_i is the smallest i not balanced
return getBalancedArray( GetIncrementallyBetterArray( arrayToBalance ) )
else:
return arrayToBalance
def GetIncrementallyBetterArray( anArray ):
values = [ StringIsParenBalenced(x) for x in anArray]
# This is correct: this function should only be used if the array does not pass
# EachEntryIsParenBalanced.
assert( False in values)
badNdx = values.index( False )
combinedTokens = anArray[badNdx] + anArray[badNdx + 1]
returnArray = anArray[ : badNdx]
returnArray.append( combinedTokens )
returnArray.extend( anArray[badNdx + 2 : ] )
return returnArray
def EachEntryIsParenBalanced( array ):
entries = [ StringIsParenBalenced(x) for x in array ]
returnVal = True
for val in entries:
returnVal = returnVal and val
return returnVal
def StringIsParenBalenced(line):
return ( line.count("(") == line.count(")") and
line.count("[") == line.count("]") and
line.count("{") == line.count("}") )
| gpl-2.0 |
mdunker/usergrid | utils/usergrid-util-python/usergrid_tools/queue/dlq-iterator-checker.py | 2 | 4809 | # */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
from multiprocessing.pool import Pool
import argparse
import json
import datetime
import os
import time
import sys
import boto
from boto import sqs
import requests
__author__ = 'Jeff.West@yahoo.com'
sqs_conn = None
sqs_queue = None
# THIS WAS USED TO TAKE MESSAGES OUT OF THE DEAD LETTER AND TEST WHETHER THEY EXISTED OR NOT
def total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
def total_milliseconds(td):
return (td.microseconds + td.seconds * 1000000) / 1000
def get_time_remaining(count, rate):
if rate == 0:
return 'NaN'
seconds = count * 1.0 / rate
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def parse_args():
parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
parser.add_argument('-c', '--config',
help='The queue to load into',
type=str,
default='4g.json')
my_args = parser.parse_args(sys.argv[1:])
print str(my_args)
return vars(my_args)
def check_exists(sqs_message):
# checks whether an entity is deleted. if the entity is found then it prints an error message.
# this was used when there were many messages going to DLQ and the reason was because the entity had been deleted
try:
message = json.loads(sqs_message.get_body())
except ValueError:
print 'Unable to decode JSON: %s' % sqs_message.get_body()
return
try:
for event_name, event_data in message.iteritems():
entity_id_scope = event_data.get('entityIdScope')
app_id = entity_id_scope.get('applicationScope', {}).get('application', {}).get('uuid')
entity_id_scope = entity_id_scope.get('id')
entity_id = entity_id_scope.get('uuid')
entity_type = entity_id_scope.get('type')
url = 'http://localhost:8080/{app_id}/{entity_type}/{entity_id}'.format(
app_id=app_id,
entity_id=entity_id,
entity_type=entity_type
)
url = 'https://{host}/{basepath}/{app_id}/{entity_type}/{entity_id}'.format(
host='REPLACE',
basepath='REPLACE',
app_id=app_id,
entity_id=entity_id,
entity_type=entity_type
)
r = requests.get(url=url,
headers={
'Authorization': 'Bearer XCA'
})
if r.status_code != 404:
print 'ERROR/FOUND [%s]: %s' % (r.status_code, url)
else:
print '[%s]: %s' % (r.status_code, url)
deleted = sqs_conn.delete_message_from_handle(sqs_queue, sqs_message.receipt_handle)
if not deleted:
print 'no delete!'
except KeyboardInterrupt, e:
raise e
def main():
global sqs_conn, sqs_queue
args = parse_args()
start_time = datetime.datetime.utcnow()
first_start_time = start_time
print "first start: %s" % first_start_time
with open(args.get('config'), 'r') as f:
config = json.load(f)
sqs_config = config.get('sqs')
sqs_conn = boto.sqs.connect_to_region(**sqs_config)
queue_name = 'baas20sr_usea_baas20sr_usea_index_all_dead'
sqs_queue = sqs_conn.get_queue(queue_name)
last_size = sqs_queue.count()
print 'Last Size: ' + str(last_size)
pool = Pool(10)
keep_going = True
while keep_going:
sqs_messages = sqs_queue.get_messages(
num_messages=10,
visibility_timeout=10,
wait_time_seconds=10)
if len(sqs_messages) > 0:
pool.map(check_exists, sqs_messages)
else:
print 'DONE!'
pool.terminate()
keep_going = False
if __name__ == '__main__':
main()
| apache-2.0 |
LeslieZhu/tango_with_django_project | tango_with_django_project/rango/views.py | 2 | 10530 | from django.shortcuts import render
from django.http import HttpResponse
from rango.models import Category,Page
from rango.forms import CategoryForm
from django.contrib.auth.decorators import login_required
from datetime import datetime
def index(request):
category_list = Category.objects.order_by('-likes')[:15]
page_list = Page.objects.order_by('-views')[:15]
context_dict = {'categories': category_list, 'pages': page_list}
visits = request.session.get('visits')
if not visits:
visits = 1
reset_last_visit_time = False
last_visit = request.session.get('last_visit')
if last_visit:
last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")
if (datetime.now() - last_visit_time).seconds > 0:
# ...reassign the value of the cookie to +1 of what it was before...
visits = visits + 1
# ...and update the last visit cookie, too.
reset_last_visit_time = True
else:
# Cookie last_visit doesn't exist, so create it to the current date/time.
reset_last_visit_time = True
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = visits
context_dict['visits'] = visits
response = render(request,'rango/index.html', context_dict)
return response
def about(request):
context_dict = {}
visits = request.session.get('visits')
last_visit = request.session.get('last_visit')
context_dict['visits'] = visits;
context_dict['last_visit'] = last_visit;
return render(request,'rango/about.html',context_dict)
def category(request, category_name_slug):
# Create a context dictionary which we can pass to the template rendering engine.
context_dict = {}
try:
# Can we find a category name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# So the .get() method returns one model instance or raises an exception.
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
context_dict['category_name_slug'] = category_name_slug
# Retrieve all of the associated pages.
# Note that filter returns >= 1 model instance.
pages = Page.objects.filter(category=category)
# Adds our results list to the template context under name pages.
context_dict['pages'] = pages
# We also add the category object from the database to the context dictionary.
# We'll use this in the template to verify that the category exists.
context_dict['category'] = category
except Category.DoesNotExist:
# We get here if we didn't find the specified category.
# Don't do anything - the template displays the "no category" message for us.
pass
# Go render the response and return it to the client.
return render(request, 'rango/category.html', context_dict)
@login_required
def add_category(request):
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
form.save(commit=True)
# Now call the index() view.
# The user will be shown the homepage.
return index(request)
else:
# The supplied form contained errors - just print them to the terminal.
print form.errors
else:
# If the request was not a POST, display the form to enter details.
form = CategoryForm()
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
from rango.forms import PageForm
@login_required
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
if not Page.objects.filter(title = page.title,category=cat):
page.category = cat
page.views = 0
page.save()
# probably better to use a redirect here.
return category(request, category_name_slug)
else:
print form.errors
else:
form = PageForm()
context_dict = {'form':form, 'category': cat}
return render(request, 'rango/add_page.html', context_dict)
from rango.forms import UserForm, UserProfileForm
def register(request):
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
# Did the user provide a profile picture?
# If so, we need to get it from the input form and put it in the UserProfile model.
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# Now we save the UserProfile model instance.
profile.save()
# Update our variable to tell the template registration was successful.
registered = True
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print user_form.errors, profile_form.errors
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context.
return render(request,
'rango/register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect, HttpResponse
def user_login(request):
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
# We use request.POST.get('<variable>') as opposed to request.POST['<variable>'],
# because the request.POST.get('<variable>') returns None, if the value does not exist,
# while the request.POST['<variable>'] will raise key error exception
username = request.POST.get('username')
password = request.POST.get('password')
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return HttpResponseRedirect('/rango/')
else:
# An inactive account was used - no logging in!
return HttpResponse("Your Rango account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Invalid login details supplied.")
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the
# blank dictionary object...
return render(request, 'rango/login.html', {})
@login_required
def restricted(request):
return HttpResponse("Since you're logged in, you can see this text!")
from django.contrib.auth import logout
# Use the login_required() decorator to ensure only those logged in can access the view.
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/rango/')
from rango.bing_search import run_query
def search(request):
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
return render(request, 'rango/search.html', {'result_list': result_list})
| gpl-2.0 |
2014cdag2/w17x1 | static/Brython3.1.3-20150514-095342/Lib/unittest/loader.py | 739 | 13883 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import functools
from fnmatch import fnmatch
from . import case, suite, util
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
def _jython_aware_splitext(path):
if path.lower().endswith('$py.class'):
return path[:-9]
return os.path.splitext(path)[0]
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = staticmethod(util.three_way_cmp)
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.FunctionType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
# static methods follow a different path
if not isinstance(getattr(inst, name), types.FunctionType):
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
if callable(obj):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
callable(getattr(testCaseClass, attrname))
testFnNames = list(filter(isTestMethod, dir(testCaseClass)))
if self.sortTestMethodsUsing:
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them and return all
tests found within them. Only test files that match the pattern will
be loaded. (Using shell style pattern matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
path = _jython_aware_splitext(os.path.normpath(path))
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = _jython_aware_splitext(os.path.realpath(mod_file))
fullpath_noext = _jython_aware_splitext(os.path.realpath(full_path))
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = _jython_aware_splitext(os.path.basename(full_path))
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=util.three_way_cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(
testCaseClass)
def findTestCases(module, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(\
module)
| agpl-3.0 |
numenta/nupic.geospatial | server.py | 3 | 2729 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
from flask import Flask, request
from tools.preprocess_data import preprocess
from model.geospatial_anomaly import runGeospatialAnomaly
from tools.anomaly_to_js_data import postprocess
app = Flask(__name__)
DIR_OUTPUT = "output"
FILE_DATA = "data.csv"
FILE_PROCESSED_DATA = "processed_data.csv"
FILE_MODEL_OUTPUT = "model_output.csv"
DIR_STATIC_JS = os.path.join("static", "js")
FILE_JS_DATA = "data.js"
@app.route('/')
def visualize():
return app.send_static_file('visualize.html')
@app.route('/simulate')
def simulate():
return app.send_static_file('simulate.html')
@app.route('/process', methods=['POST'])
def process():
dataFile = os.path.join(DIR_OUTPUT, FILE_DATA)
processedDataFile = os.path.join(DIR_OUTPUT, FILE_PROCESSED_DATA)
modelOutputFile = os.path.join(DIR_OUTPUT, FILE_MODEL_OUTPUT)
jsDataFile = os.path.join(DIR_STATIC_JS, FILE_JS_DATA)
with open(dataFile, 'w') as f:
f.write(request.data)
preprocess(dataFile, processedDataFile)
runGeospatialAnomaly(processedDataFile, modelOutputFile, verbose=True)
postprocess(modelOutputFile, jsDataFile)
return "Done."
@app.route('/js/<path:path>')
def js(path):
return app.send_static_file(os.path.join('js', path))
@app.route('/css/<path:path>')
def css(path):
return app.send_static_file(os.path.join('css', path))
@app.route('/img/<path:path>')
def img(path):
return app.send_static_file(os.path.join('img', path))
# Disable cache
@app.after_request
def add_header(response):
response.cache_control.max_age = 0
return response
if __name__ == "__main__":
if not os.path.exists(DIR_OUTPUT):
os.makedirs(DIR_OUTPUT)
app.run(debug=True, host="0.0.0.0")
| agpl-3.0 |
Sean3Don/inkscape | share/extensions/dxf_templates.py | 7 | 3541 | #!/usr/bin/env python
r14_header = ''' 0
SECTION
2
HEADER
9
$ACADVER
1
AC1014
9
$HANDSEED
5
FFFF
9
$MEASUREMENT
70
1
0
ENDSEC
0
SECTION
2
TABLES
0
TABLE
2
VPORT
5
8
330
0
100
AcDbSymbolTable
70
4
0
VPORT
5
2E
330
8
100
AcDbSymbolTableRecord
100
AcDbViewportTableRecord
2
*ACTIVE
70
0
10
0.0
20
0.0
11
1.0
21
1.0
12
210.0
22
148.5
13
0.0
23
0.0
14
10.0
24
10.0
15
10.0
25
10.0
16
0.0
26
0.0
36
1.0
17
0.0
27
0.0
37
0.0
40
341.0
41
1.24
42
50.0
43
0.0
44
0.0
50
0.0
51
0.0
71
0
72
100
73
1
74
3
75
0
76
0
77
0
78
0
0
ENDTAB
0
TABLE
2
LTYPE
5
5
330
0
100
AcDbSymbolTable
70
1
0
LTYPE
5
14
330
5
100
AcDbSymbolTableRecord
100
AcDbLinetypeTableRecord
2
BYBLOCK
70
0
3
72
65
73
0
40
0.0
0
LTYPE
5
15
330
5
100
AcDbSymbolTableRecord
100
AcDbLinetypeTableRecord
2
BYLAYER
70
0
3
72
65
73
0
40
0.0
0
LTYPE
5
16
330
5
100
AcDbSymbolTableRecord
100
AcDbLinetypeTableRecord
2
CONTINUOUS
70
0
3
Solid line
72
65
73
0
40
0.0
0
ENDTAB
0
TABLE
'''
r14_style = ''' 0
ENDTAB
0
TABLE
2
STYLE
5
3
330
0
100
AcDbSymbolTable
70
1
0
STYLE
5
11
330
3
100
AcDbSymbolTableRecord
100
AcDbTextStyleTableRecord
2
STANDARD
70
0
40
0.0
41
1.0
50
0.0
71
0
42
2.5
3
txt
4
0
ENDTAB
0
TABLE
2
VIEW
5
6
330
0
100
AcDbSymbolTable
70
0
0
ENDTAB
0
TABLE
2
UCS
5
7
330
0
100
AcDbSymbolTable
70
0
0
ENDTAB
0
TABLE
2
APPID
5
9
330
0
100
AcDbSymbolTable
70
2
0
APPID
5
12
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
ACAD
70
0
0
ENDTAB
0
TABLE
2
DIMSTYLE
5
A
330
0
100
AcDbSymbolTable
70
1
0
DIMSTYLE
105
27
330
A
100
AcDbSymbolTableRecord
100
AcDbDimStyleTableRecord
2
ISO-25
70
0
3
4
5
6
7
40
1.0
41
2.5
42
0.625
43
3.75
44
1.25
45
0.0
46
0.0
47
0.0
48
0.0
140
2.5
141
2.5
142
0.0
143
0.03937007874016
144
1.0
145
0.0
146
1.0
147
0.625
71
0
72
0
73
0
74
0
75
0
76
0
77
1
78
8
170
0
171
3
172
1
173
0
174
0
175
0
176
0
177
0
178
0
270
2
271
2
272
2
273
2
274
3
340
11
275
0
280
0
281
0
282
0
283
0
284
8
285
0
286
0
287
3
288
0
0
ENDTAB
0
TABLE
2
BLOCK_RECORD
5
1
330
0
100
AcDbSymbolTable
70
1
0
BLOCK_RECORD
5
1F
330
1
100
AcDbSymbolTableRecord
100
AcDbBlockTableRecord
2
*MODEL_SPACE
0
BLOCK_RECORD
5
1B
330
1
100
AcDbSymbolTableRecord
100
AcDbBlockTableRecord
2
*PAPER_SPACE
0
ENDTAB
0
ENDSEC
0
SECTION
2
BLOCKS
0
BLOCK
5
20
330
1F
100
AcDbEntity
8
0
100
AcDbBlockBegin
2
*MODEL_SPACE
70
0
10
0.0
20
0.0
30
0.0
3
*MODEL_SPACE
1
0
ENDBLK
5
21
330
1F
100
AcDbEntity
8
0
100
AcDbBlockEnd
0
BLOCK
5
1C
330
1B
100
AcDbEntity
67
1
8
0
100
AcDbBlockBegin
2
*PAPER_SPACE
1
0
ENDBLK
5
1D
330
1B
100
AcDbEntity
67
1
8
0
100
AcDbBlockEnd
0
ENDSEC
0
SECTION
2
ENTITIES
'''
r14_footer = ''' 0
ENDSEC
0
SECTION
2
OBJECTS
0
DICTIONARY
5
C
330
0
100
AcDbDictionary
3
ACAD_GROUP
350
D
3
ACAD_MLINESTYLE
350
17
0
DICTIONARY
5
D
330
C
100
AcDbDictionary
0
DICTIONARY
5
1A
330
C
100
AcDbDictionary
0
DICTIONARY
5
17
330
C
100
AcDbDictionary
3
STANDARD
350
18
0
DICTIONARY
5
19
330
C
100
AcDbDictionary
0
ENDSEC
0
EOF'''
| gpl-2.0 |
wilebeast/FireFox-OS | B2G/gecko/testing/marionette/client/marionette/tests/unit/test_emulator.py | 2 | 2495 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_test import MarionetteTestCase
from errors import JavascriptException, MarionetteException
class TestEmulatorContent(MarionetteTestCase):
def test_emulator_cmd(self):
self.marionette.set_script_timeout(10000)
expected = ["<build>",
"OK"]
result = self.marionette.execute_async_script("""
runEmulatorCmd("avd name", marionetteScriptFinished)
""");
self.assertEqual(result, expected)
# disabled due to bug 758329
# def test_emulator_order(self):
# self.marionette.set_script_timeout(10000)
# self.assertRaises(MarionetteException,
# self.marionette.execute_async_script,
# """runEmulatorCmd("gsm status", function(result) {});
# marionetteScriptFinished(true);
# """);
class TestEmulatorChrome(TestEmulatorContent):
def setUp(self):
super(TestEmulatorChrome, self).setUp()
self.marionette.set_context("chrome")
class TestEmulatorScreen(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.screen = self.marionette.emulator.screen
self.screen.initialize()
def test_emulator_orientation(self):
self.assertEqual(self.screen.orientation, self.screen.SO_PORTRAIT_PRIMARY,
'Orientation has been correctly initialized.')
self.screen.orientation = self.screen.SO_PORTRAIT_SECONDARY
self.assertEqual(self.screen.orientation, self.screen.SO_PORTRAIT_SECONDARY,
'Orientation has been set to portrait-secondary')
self.screen.orientation = self.screen.SO_LANDSCAPE_PRIMARY
self.assertEqual(self.screen.orientation, self.screen.SO_LANDSCAPE_PRIMARY,
'Orientation has been set to landscape-primary')
self.screen.orientation = self.screen.SO_LANDSCAPE_SECONDARY
self.assertEqual(self.screen.orientation, self.screen.SO_LANDSCAPE_SECONDARY,
'Orientation has been set to landscape-secondary')
self.screen.orientation = self.screen.SO_PORTRAIT_PRIMARY
self.assertEqual(self.screen.orientation, self.screen.SO_PORTRAIT_PRIMARY,
'Orientation has been set to portrait-primary')
| apache-2.0 |
yujunglo/teletraan | deploy-agent/tests/unit/deploy/common/test_config.py | 8 | 2099 | # Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import os.path
import tempfile
import unittest
import tests
from deployd.common.config import Config
from deployd.common.types import DeployStatus, OpCode, DeployStage
from deployd.types.ping_response import PingResponse
class TestConfigFunctions(tests.TestCase):
@classmethod
def setUpClass(cls):
cls.dirname = tempfile.mkdtemp()
env_filename = os.path.join(cls.dirname, "variables")
lines = ['env1 = \"test1\"\n',
'env2 = \'test2\'\n',
'env3 = test3\n']
with open(env_filename, 'w') as f:
f.writelines(lines)
config_reader = mock.Mock()
config_reader.get = mock.Mock(return_value="/tmp")
cls.config = Config(config_reader=config_reader)
def test_get_target(self):
deploy_goal = {}
deploy_goal['deployId'] = '123'
deploy_goal['stageName'] = 'beta'
deploy_goal['envName'] = 'pinboard'
deploy_goal['deployStage'] = DeployStage.SERVING_BUILD
ping_response = {'deployGoal': deploy_goal, 'opCode': OpCode.NOOP}
response = PingResponse(jsonValue=ping_response)
self.config.update_variables(DeployStatus(response))
self.assertEqual(os.environ['DEPLOY_ID'], '123')
self.assertEqual(os.environ['ENV_NAME'], 'pinboard')
self.assertEqual(os.environ['STAGE_NAME'], 'beta')
self.assertEqual(self.config.get_target(), '/tmp/pinboard')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
gully/PyKE | pyke/kepfourier.py | 2 | 1484 | from .utils import PyKEArgumentHelpFormatter
import numpy as np
def ft(x, y, f1, f2, df, verbose):
"""
Compute the Fourier transform of a signal ``y`` with support ``x``
in the bandwidth between the frequencies ``f1`` to ``f2``. ``df``
is the frequency of resolution.
Parameters
----------
x : ndarray
Support of the signal (usually time)
y : ndarray
Signal values
f1, f2 : float
Initial and last frequencies
df : float
Frequency of resolution
Returns
-------
fr : ndarray
Frequency support
power : ndarray
Power spectrum
"""
notnans = (~np.isnan(x)) & (~np.isnan(y))
x = x[notnans]
y = y[notnans]
ft_real, ft_imag, power, fr = [], [], [], []
nstep = 0
len_x = len(x)
for freq in np.arange(f1, f2, df):
ft_real.append(0.0)
ft_imag.append(0.0)
omega = 2.0 * np.pi * freq
for i in range(len_x):
expo = omega * x[i]
c = np.cos(expo)
s = np.sin(expo)
ft_real[-1] += y[i] * c
ft_imag[-1] += y[i] * s
fr.append(freq)
power.append((ft_real[-1]**2 + ft_imag[-1]**2) / len_x ** 2)
nstep += 1
if verbose:
print('Step: {0} Period: {1} (d) Power: {2}'
.format(nstep, 1.0 / fr[-1], power[-1]))
fr = np.array(fr, dtype='float32')
power = np.array(power, dtype='float32')
return fr, power
| mit |
fbradyirl/home-assistant | tests/components/zwave/test_workaround.py | 4 | 2561 | """Test Z-Wave workarounds."""
from homeassistant.components.zwave import const, workaround
from tests.mock.zwave import MockNode, MockValue
def test_get_device_no_component_mapping():
"""Test that None is returned."""
node = MockNode(manufacturer_id=" ")
value = MockValue(data=0, node=node)
assert workaround.get_device_component_mapping(value) is None
def test_get_device_component_mapping():
"""Test that component is returned."""
node = MockNode(manufacturer_id="010f", product_type="0b00")
value = MockValue(data=0, node=node, command_class=const.COMMAND_CLASS_SENSOR_ALARM)
assert workaround.get_device_component_mapping(value) == "binary_sensor"
def test_get_device_component_mapping_mti():
"""Test that component is returned."""
# GE Fan controller
node = MockNode(manufacturer_id="0063", product_type="4944", product_id="3034")
value = MockValue(
data=0, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL
)
assert workaround.get_device_component_mapping(value) == "fan"
# GE Dimmer
node = MockNode(manufacturer_id="0063", product_type="4944", product_id="3031")
value = MockValue(
data=0, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL
)
assert workaround.get_device_component_mapping(value) is None
def test_get_device_no_mapping():
"""Test that no device mapping is returned."""
node = MockNode(manufacturer_id=" ")
value = MockValue(data=0, node=node)
assert workaround.get_device_mapping(value) is None
def test_get_device_mapping_mt():
"""Test that device mapping mt is returned."""
node = MockNode(manufacturer_id="0047", product_type="5a52")
value = MockValue(data=0, node=node)
assert workaround.get_device_mapping(value) == "workaround_no_position"
def test_get_device_mapping_mtii():
"""Test that device mapping mtii is returned."""
node = MockNode(manufacturer_id="013c", product_type="0002", product_id="0002")
value = MockValue(data=0, node=node, index=0)
assert workaround.get_device_mapping(value) == "trigger_no_off_event"
def test_get_device_mapping_mti_instance():
"""Test that device mapping mti_instance is returned."""
node = MockNode(manufacturer_id="013c", product_type="0001", product_id="0005")
value = MockValue(data=0, node=node, instance=1)
assert workaround.get_device_mapping(value) == "refresh_node_on_update"
value = MockValue(data=0, node=node, instance=2)
assert workaround.get_device_mapping(value) is None
| apache-2.0 |
einarhuseby/arctic | arctic/_util.py | 3 | 1846 | from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from pymongo.errors import OperationFailure
import string
import logging
logger = logging.getLogger(__name__)
def indent(s, num_spaces):
s = string.split(s, '\n')
s = [(num_spaces * ' ') + line for line in s]
s = string.join(s, '\n')
return s
def are_equals(o1, o2, **kwargs):
try:
if isinstance(o1, DataFrame):
assert_frame_equal(o1, o2, kwargs)
return True
return o1 == o2
except Exception:
return False
def enable_sharding(arctic, library_name, hashed=False):
c = arctic._conn
lib = arctic[library_name]._arctic_lib
dbname = lib._db.name
library_name = lib.get_top_level_collection().name
try:
c.admin.command('enablesharding', dbname)
except OperationFailure, e:
if not 'failed: already enabled' in str(e):
raise
if not hashed:
logger.info("Range sharding 'symbol' on: " + dbname + '.' + library_name)
c.admin.command('shardCollection', dbname + '.' + library_name, key={'symbol': 1})
else:
logger.info("Hash sharding 'symbol' on: " + dbname + '.' + library_name)
c.admin.command('shardCollection', dbname + '.' + library_name, key={'symbol': 'hashed'})
def enable_powerof2sizes(arctic, library_name):
lib = arctic[library_name]._arctic_lib
collection = lib.get_top_level_collection()
lib._db.command({"collMod": collection.name, 'usePowerOf2Sizes': "true"})
logger.info("usePowerOf2Sizes enabled for %s", collection.name)
for coll in collection.database.collection_names():
if coll.startswith("%s." % collection.name):
lib._db.command({"collMod": coll, 'usePowerOf2Sizes': "true"})
logger.info("usePowerOf2Sizes enabled for %s", coll)
| lgpl-2.1 |
projectcalico/calico-nova | nova/api/openstack/compute/contrib/rescue.py | 7 | 4012 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The rescue mode extension."""
from oslo.config import cfg
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import utils
CONF = cfg.CONF
authorize = exts.extension_authorizer('compute', 'rescue')
class RescueController(wsgi.Controller):
def __init__(self, ext_mgr, *args, **kwargs):
super(RescueController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
@wsgi.action('rescue')
def _rescue(self, req, id, body):
"""Rescue an instance."""
context = req.environ["nova.context"]
authorize(context)
if body['rescue'] and 'adminPass' in body['rescue']:
password = body['rescue']['adminPass']
else:
password = utils.generate_password()
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
rescue_image_ref = None
if self.ext_mgr.is_loaded("os-extended-rescue-with-image"):
if body['rescue'] and 'rescue_image_ref' in body['rescue']:
rescue_image_ref = body['rescue']['rescue_image_ref']
self.compute_api.rescue(context, instance,
rescue_password=password, rescue_image_ref=rescue_image_ref)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rescue', id)
except exception.InvalidVolume as volume_error:
raise exc.HTTPConflict(explanation=volume_error.format_message())
except exception.InstanceNotRescuable as non_rescuable:
raise exc.HTTPBadRequest(
explanation=non_rescuable.format_message())
return {'adminPass': password}
@wsgi.action('unrescue')
def _unrescue(self, req, id, body):
"""Unrescue an instance."""
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.unrescue(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unrescue',
id)
return webob.Response(status_int=202)
class Rescue(exts.ExtensionDescriptor):
"""Instance rescue mode."""
name = "Rescue"
alias = "os-rescue"
namespace = "http://docs.openstack.org/compute/ext/rescue/api/v1.1"
updated = "2011-08-18T00:00:00Z"
def get_controller_extensions(self):
controller = RescueController(self.ext_mgr)
extension = exts.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
sarakha63/persomov | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/playvid.py | 41 | 2818 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
clean_html,
ExtractorError,
)
class PlayvidIE(InfoExtractor):
_VALID_URL = r'https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
_TEST = {
'url': 'http://www.playvid.com/watch/RnmBNgtrrJu',
'md5': 'ffa2f6b2119af359f544388d8c01eb6c',
'info_dict': {
'id': 'RnmBNgtrrJu',
'ext': 'mp4',
'title': 'md5:9256d01c6317e3f703848b5906880dc8',
'duration': 82,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_error = re.search(
r'<div class="block-error">\s*<div class="heading">\s*<div>(?P<msg>.+?)</div>\s*</div>', webpage)
if m_error:
raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
video_title = None
duration = None
video_thumbnail = None
formats = []
# most of the information is stored in the flashvars
flashvars = self._html_search_regex(
r'flashvars="(.+?)"', webpage, 'flashvars')
infos = compat_urllib_parse.unquote(flashvars).split(r'&')
for info in infos:
videovars_match = re.match(r'^video_vars\[(.+?)\]=(.+?)$', info)
if videovars_match:
key = videovars_match.group(1)
val = videovars_match.group(2)
if key == 'title':
video_title = compat_urllib_parse.unquote_plus(val)
if key == 'duration':
try:
duration = int(val)
except ValueError:
pass
if key == 'big_thumb':
video_thumbnail = val
videourl_match = re.match(
r'^video_urls\]\[(?P<resolution>[0-9]+)p', key)
if videourl_match:
height = int(videourl_match.group('resolution'))
formats.append({
'height': height,
'url': val,
})
self._sort_formats(formats)
# Extract title - should be in the flashvars; if not, look elsewhere
if video_title is None:
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
return {
'id': video_id,
'formats': formats,
'title': video_title,
'thumbnail': video_thumbnail,
'duration': duration,
'description': None,
'age_limit': 18
}
| gpl-3.0 |
pliu55/RSEM | pRSEM/Transcript.py | 3 | 6028 | __doc__="""
peng 20131009
Data structure copied from RSEM, made some name changes
"""
class Transcript:
def __init__(self):
self.transcript_id = None
self.gene_id = None
self.gene = None
self.transcript_group = None
self.chrom = None ## RSEM Transcript's string seqname
self.strand = None
self.length = None
self.exon_ranges = []; ## RSEM Transcript's vector<Interval> structure
self.gtf_attr = {}; ## RSEM Transcript's string left
self.gtf_additional_info = None;
self.start = None; ## genomic starting postion,
## regardless of strand direction
## always have a number smaller than self.end
self.end = None; ## genomic ending postion
self.tss = None; ## genomic coordinate of transcription starting site
self.tes = None; ## genomic coordinate of transcription ending site
## mappability
self.ave_mpp_around_TSS = None ## [TSS-flanking_width, TSS+flanking_width]
self.ave_mpp_around_body = None ## (TSS+flanking_width, TES-flanking_width)
self.ave_mpp_around_TES = None ## [TES-flanking_width, TES+flanking_width]
def __str__(self):
s = "%s\n%s\n%s\n%s %d\n" % (self.transcript_id, self.gene_id, self.chrom,
self.strand, self.length);
s += "%d" % len(self.exon_ranges);
for (start, end) in self.exon_ranges:
s += " %d %d" % (start, end);
s += "\n";
for key in self.gtf_attr.keys():
for val in self.gtf_attr[key]:
s += '%s "%s"; ' % (key, val);
s = s.rstrip();
return s;
def constructFromRSEMTI(self, ti_lines):
"""
construct Transcript from the 6 lines from RSEM .TI file
"""
self.quicklyConstructFromRSEMTI(ti_lines);
feature_words = ti_lines[5].rstrip(';').split(';');
for feature_word in feature_words:
feature_word.lstrip();
(key, val) = feature_word.split();
if not self.gtf_attr.has_key(key):
self.gtf_attr[key] = [];
self.gtf_attr[key].append(val.strip('"'));
def quicklyConstructFromRSEMTI(self, ti_lines):
"""
quickly construct Transcript from the 6 lines from RSEM .TI file, the last
line won't be parsed.
"""
self.transcript_id = ti_lines[0].split("\t")[0]
self.gene_id = ti_lines[1].split("\t")[0]
self.chrom = ti_lines[2];
(self.strand, self.length) = ti_lines[3].split();
self.length = int(self.length);
words = ti_lines[4].split();
for j in range(0, int(words[0])):
start = int(words[j*2+1]);
end = int(words[j*2+2]);
self.exon_ranges.append( (start, end) );
self.start = self.exon_ranges[0][0];
self.end = self.exon_ranges[-1][-1];
if self.strand == '+':
self.tss = self.start
self.tes = self.end
elif self.strand == '-':
self.tss = self.end
self.tes = self.start
self.gtf_additional_info = ti_lines[5];
def defineTSSAndTES(self):
"""
define TSS and TES
"""
if (self.tss is None) or (self.tes is None):
if self.strand == '+':
self.tss = self.start;
self.tes = self.end;
elif self.strand == '-':
self.tss = self.end;
self.tes = self.start;
def calculateMappability(self, bin_bigwigsummary, fbigwig, width=500,
quiet=True):
"""
calculate average mappability for a transcript's
TSS region: [TSS-width, TSS+width],
body region: [start+width+1, end-width-1],
TES region: [TES-width, TES+width]
if start+width+1 > end-width-1, then define body region as
[end-width-1, start+width+1]
assign the values for
self.ave_mpp_around_TSS, self.max_mpp_around_TSS
self.ave_mpp_around_body, self.max_mpp_around_body
self.ave_mpp_around_TES, self.max_mpp_around_TES
"""
import Util
if (self.tss is None) or (self.tes is None):
self.defineTSSAndTES()
self.ave_mpp_around_TSS = Util.calculateMappability('mean', self.chrom,
self.tss - width, self.tss + width,
bin_bigwigsummary, fbigwig, quiet)
if (self.start + width + 1) < (self.end - width - 1):
self.ave_mpp_around_body = Util.calculateMappability('mean', self.chrom,
self.start+width+1, self.end-width-1,
bin_bigwigsummary, fbigwig, quiet)
elif (self.start + width + 1) > (self.end - width - 1):
self.ave_mpp_around_body = Util.calculateMappability('mean', self.chrom,
self.end-width-1, self.start+width+1,
bin_bigwigsummary, fbigwig, quiet)
elif (self.start + width + 1) == (self.end - width - 1):
self.ave_mpp_around_body = 1.0
self.ave_mpp_around_TES = Util.calculateMappability('mean', self.chrom,
self.tes - width, self.tes + width,
bin_bigwigsummary, fbigwig, quiet)
def readRSEMTI(fin):
"""
read RSEM's .ti file, return a list of Transcripts objects
"""
import Util
lines = Util.readFile(fin);
(ntranscripts, foo) = lines[0].split();
ntranscripts = int(ntranscripts);
transcripts = [];
for i in range(0, ntranscripts):
tr = Transcript();
tr.constructFromRSEMTI(lines[i*6+1:i*6+7]);
transcripts.append(tr);
if (i > 0) and (i % 20000 == 0):
print "processed %d transcripts" % i;
return transcripts;
def quicklyReadRSEMTI(fin):
"""
read RSEM's .ti file without parsing the additional information line (the last
line in a transcript's block
return a list of Transcripts objects
"""
import Util
lines = Util.readFile(fin);
(ntranscripts, foo) = lines[0].split();
ntranscripts = int(ntranscripts);
transcripts = [];
for i in range(0, ntranscripts):
tr = Transcript();
tr.quicklyConstructFromRSEMTI(lines[i*6+1:i*6+7]);
transcripts.append(tr);
if (i > 0) and (i % 20000 == 0):
print "processed %d transcripts" % i;
return transcripts;
| gpl-3.0 |
nevermoreluo/privateoverseas | overseas/migrations/0001_initial.py | 1 | 3314 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-05 02:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agid', models.PositiveIntegerField(unique=True)),
('name', models.CharField(max_length=100)),
('desc', models.CharField(max_length=200)),
('api_correlation_id', models.CharField(max_length=100)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Geo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metro', models.CharField(blank=True, max_length=100, null=True)),
('region', models.CharField(max_length=100)),
('requests', models.DecimalField(decimal_places=2, max_digits=20)),
('throughput', models.DecimalField(decimal_places=2, max_digits=20)),
('peak_throughput', models.DecimalField(decimal_places=2, max_digits=20)),
('bandwidth', models.DecimalField(decimal_places=2, max_digits=20)),
('peak_bandwidth', models.DecimalField(decimal_places=2, max_digits=20)),
('hit_rate', models.DecimalField(decimal_places=2, max_digits=20)),
('status_4XX', models.DecimalField(decimal_places=2, max_digits=20)),
('status_5XX', models.DecimalField(decimal_places=2, max_digits=20)),
('time', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='NetworkIdentifiers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ni', models.CharField(max_length=100, unique=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scid', models.CharField(max_length=50, unique=True)),
('active', models.BooleanField(default=True)),
('access_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.AccessGroup')),
],
),
migrations.AddField(
model_name='networkidentifiers',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.Service'),
),
migrations.AddField(
model_name='geo',
name='ni',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.NetworkIdentifiers'),
),
]
| gpl-3.0 |
jaskaye17/nomadpad | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/_luabuiltins.py | 275 | 6863 | # -*- coding: utf-8 -*-
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().iteritems():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print '> Downloading function index for Lua %s' % version
functions = get_lua_functions(version)
print '> %d functions found:' % len(functions)
modules = {}
for full_function_name in functions:
print '>> %s' % full_function_name
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
| mit |
basicthinker/THNVM | tests/configs/simple-timing-mp.py | 69 | 2376 | # Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.objects import *
from base_config import *
nb_cores = 4
root = BaseSESystem(mem_mode='timing', cpu_class=TimingSimpleCPU,
num_cpus=nb_cores).create_root()
| bsd-3-clause |
Jgarcia-IAS/localizacion | openerp/addons/website_event/models/event.py | 89 | 5542 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
import re
from openerp.addons.website.models.website import slug
class event(osv.osv):
_name = 'event.event'
_inherit = ['event.event','website.seo.metadata']
_track = {
'website_published': {
'website_event.mt_event_published': lambda self, cr, uid, obj, ctx=None: obj.website_published,
'website_event.mt_event_unpublished': lambda self, cr, uid, obj, ctx=None: not obj.website_published
},
}
def _get_new_menu_pages(self, cr, uid, event, context=None):
context = context or {}
todo = [
(_('Introduction'), 'website_event.template_intro'),
(_('Location'), 'website_event.template_location')
]
web = self.pool.get('website')
result = []
for name,path in todo:
name2 = name+' '+event.name
newpath = web.new_page(cr, uid, name2, path, ispage=False, context=context)
url = "/event/"+slug(event)+"/page/" + newpath
result.append((name, url))
return result
def _set_show_menu(self, cr, uid, ids, name, value, arg, context=None):
menuobj = self.pool.get('website.menu')
eventobj = self.pool.get('event.event')
for event in self.browse(cr, uid, [ids], context=context):
if event.menu_id and not value:
menuobj.unlink(cr, uid, [event.menu_id.id], context=context)
elif value and not event.menu_id:
root = menuobj.create(cr, uid, {
'name': event.name
}, context=context)
tocreate = self._get_new_menu_pages(cr, uid, event, context)
tocreate.append((_('Register'), '/event/%s/register' % slug(event)))
sequence = 0
for name,url in tocreate:
menuobj.create(cr, uid, {
'name': name,
'url': url,
'parent_id': root,
'sequence': sequence
}, context=context)
sequence += 1
eventobj.write(cr, uid, [event.id], {'menu_id': root}, context=context)
return True
def _get_show_menu(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = bool(event.menu_id)
return res
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = "/event/" + slug(event)
return res
def _default_hashtag(self, cr, uid, context={}):
name = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.name
return re.sub("[- \\.\\(\\)\\@\\#\\&]+", "", name).lower()
_columns = {
'twitter_hashtag': fields.char('Twitter Hashtag'),
'website_published': fields.boolean('Visible in Website', copy=False),
# TDE TODO FIXME: when website_mail/mail_thread.py inheritance work -> this field won't be necessary
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Messages',
help="Website communication history",
),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'show_menu': fields.function(_get_show_menu, fnct_inv=_set_show_menu, type='boolean', string='Dedicated Menu'),
'menu_id': fields.many2one('website.menu', 'Event Menu'),
}
_defaults = {
'show_menu': False,
'twitter_hashtag': _default_hashtag
}
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
event = self.browse(cr, uid, ids[0], context=context)
if event.address_id:
return self.browse(cr, SUPERUSER_ID, ids[0], context=context).address_id.google_map_img()
return None
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
event = self.browse(cr, uid, ids[0], context=context)
if event.address_id:
return self.browse(cr, SUPERUSER_ID, ids[0], context=context).address_id.google_map_link()
return None
| agpl-3.0 |
gitcoinco/web | app/dashboard/migrations/0123_auto_20200617_1549.py | 1 | 4319 | # Generated by Django 2.2.4 on 2020-06-17 15:49
import app.utils
from django.db import migrations, models
import django.db.models.deletion
import economy.models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0122_auto_20200615_1510'),
]
operations = [
migrations.CreateModel(
name='PollMedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True, default=economy.models.get_time)),
('modified_on', models.DateTimeField(default=economy.models.get_time)),
('name', models.CharField(max_length=350)),
('image', models.ImageField(blank=True, help_text='Poll media asset', null=True, upload_to=app.utils.get_upload_filename)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='hackathonregistration',
name='looking_project',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='hackathonregistration',
name='looking_team_members',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='question',
name='hook',
field=models.CharField(choices=[('NO_ACTION', 'No trigger any action'), ('TOWNSQUARE_INTRO', 'Create intro on Townsquare'), ('LOOKING_TEAM_PROJECT', 'Looking for team or project')], default='NO_ACTION', max_length=50),
),
migrations.AlterField(
model_name='activity',
name='activity_type',
field=models.CharField(blank=True, choices=[('wall_post', 'Wall Post'), ('status_update', 'Update status'), ('new_bounty', 'New Bounty'), ('start_work', 'Work Started'), ('stop_work', 'Work Stopped'), ('work_submitted', 'Work Submitted'), ('work_done', 'Work Done'), ('worker_approved', 'Worker Approved'), ('worker_rejected', 'Worker Rejected'), ('worker_applied', 'Worker Applied'), ('increased_bounty', 'Increased Funding'), ('killed_bounty', 'Canceled Bounty'), ('new_tip', 'New Tip'), ('receive_tip', 'Tip Received'), ('bounty_abandonment_escalation_to_mods', 'Escalated checkin from @gitcoinbot about bounty status'), ('bounty_abandonment_warning', 'Checkin from @gitcoinbot about bounty status'), ('bounty_removed_slashed_by_staff', 'Dinged and Removed from Bounty by Staff'), ('bounty_removed_by_staff', 'Removed from Bounty by Staff'), ('bounty_removed_by_funder', 'Removed from Bounty by Funder'), ('new_crowdfund', 'New Crowdfund Contribution'), ('new_grant', 'New Grant'), ('update_grant', 'Updated Grant'), ('killed_grant', 'Cancelled Grant'), ('negative_contribution', 'Negative Grant Contribution'), ('new_grant_contribution', 'Contributed to Grant'), ('new_grant_subscription', 'Subscribed to Grant'), ('killed_grant_contribution', 'Cancelled Grant Contribution'), ('new_kudos', 'New Kudos'), ('created_kudos', 'Created Kudos'), ('receive_kudos', 'Receive Kudos'), ('joined', 'Joined Gitcoin'), ('played_quest', 'Played Quest'), ('beat_quest', 'Beat Quest'), ('created_quest', 'Created Quest'), ('updated_avatar', 'Updated Avatar'), ('mini_clr_payout', 'Mini CLR Payout'), ('leaderboard_rank', 'Leaderboard Rank'), ('consolidated_leaderboard_rank', 'Consolidated Leaderboard Rank'), ('consolidated_mini_clr_payout', 'Consolidated CLR Payout'), ('hackathon_registration', 'Hackathon Registration'), ('hackathon_new_hacker', 'Hackathon Registration'), ('new_hackathon_project', 'New Hackathon Project'), ('flagged_grant', 'Flagged Grant')], db_index=True, max_length=50),
),
migrations.AlterField(
model_name='question',
name='question_type',
field=models.CharField(choices=[('SINGLE_OPTION', 'Single option'), ('SINGLE_CHOICE', 'Single Choice'), ('MULTIPLE_CHOICE', 'Multiple Choices'), ('OPEN', 'Open')], max_length=50),
),
migrations.AddField(
model_name='question',
name='header',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='dashboard.PollMedia'),
),
]
| agpl-3.0 |
DeltaEpsilon-HackFMI2/FMICalendar-REST | venv/lib/python2.7/site-packages/django/db/models/fields/files.py | 111 | 15938 | import datetime
import os
from django import forms
from django.db.models.fields import Field
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile
from django.db.models import signals
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> instance.file = File(...)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def contribute_to_class(self, cls, name):
super(ImageField, self).contribute_to_class(cls, name)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| mit |
gigawhitlocks/zulip | zerver/templatetags/minified_js.py | 118 | 1402 | from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
register = Library()
class MinifiedJSNode(Node):
def __init__(self, sourcefile):
self.sourcefile = sourcefile
def render(self, context):
if settings.DEBUG:
scripts = settings.JS_SPECS[self.sourcefile]['source_filenames']
else:
scripts = [settings.JS_SPECS[self.sourcefile]['output_filename']]
script_urls = [staticfiles_storage.url(script) for script in scripts]
script_tags = ['<script type="text/javascript" src="%s" charset="utf-8"></script>'
% url for url in script_urls]
return '\n'.join(script_tags)
@register.tag
def minified_js(parser, token):
try:
tag_name, sourcefile = token.split_contents()
except ValueError:
raise TemplateSyntaxError("%s tag requires an argument" % tag_name)
if not (sourcefile[0] == sourcefile[-1] and sourcefile[0] in ('"', "'")):
raise TemplateSyntaxError("%s tag should be quoted" % tag_name)
sourcefile = sourcefile[1:-1]
if sourcefile not in settings.JS_SPECS:
raise TemplateSyntaxError("%s tag invalid argument: no JS file %s"
% (tag_name, sourcefile))
return MinifiedJSNode(sourcefile)
| apache-2.0 |
Plain-Andy-legacy/android_external_chromium_org | tools/perf/benchmarks/sunspider.py | 27 | 5929 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import json
import os
from metrics import power
from telemetry import benchmark
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
_URL = 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html'
DESCRIPTIONS = {
'3d-cube':
'Pure JavaScript computations of the kind you might use to do 3d '
'rendering, but without the rendering. This ends up mostly hitting '
'floating point math and array access.',
'3d-morph':
'Pure JavaScript computations of the kind you might use to do 3d '
'rendering, but without the rendering. This ends up mostly hitting '
'floating point math and array access.',
'3d-raytrace':
'Pure JavaScript computations of the kind you might use to do 3d '
'rendering, but without the rendering. This ends up mostly hitting '
'floating point math and array access.',
'access-binary-trees': 'Array, object property and variable access.',
'access-fannkuch': 'Array, object property and variable access.',
'access-nbody': 'Array, object property and variable access.',
'access-nsieve': 'Array, object property and variable access.',
'bitops-3bit-bits-in-byte':
'Bitwise operations, these can be useful for various things '
'including games, mathematical computations, and various kinds of '
'encoding/decoding. It\'s also the only kind of math in JavaScript '
'that is done as integer, not floating point.',
'bitops-bits-in-byte':
'Bitwise operations, these can be useful for various things '
'including games, mathematical computations, and various kinds of '
'encoding/decoding. It\'s also the only kind of math in JavaScript '
'that is done as integer, not floating point.',
'bitops-bitwise-and':
'Bitwise operations, these can be useful for various things '
'including games, mathematical computations, and various kinds of '
'encoding/decoding. It\'s also the only kind of math in JavaScript '
'that is done as integer, not floating point.',
'bitops-nsieve-bits':
'Bitwise operations, these can be useful for various things '
'including games, mathematical computations, and various kinds of '
'encoding/decoding. It\'s also the only kind of math in JavaScript '
'that is done as integer, not floating point.',
'controlflow-recursive':
'Control flow constructs (looping, recursion, conditionals). Right '
'now it mostly covers recursion, as the others are pretty well covered '
'by other tests.',
'crypto-aes': 'Real cryptography code related to AES.',
'crypto-md5': 'Real cryptography code related to MD5.',
'crypto-sha1': 'Real cryptography code related to SHA1.',
'date-format-tofte': 'Performance of JavaScript\'s "date" objects.',
'date-format-xparb': 'Performance of JavaScript\'s "date" objects.',
'math-cordic': 'Various mathematical type computations.',
'math-partial-sums': 'Various mathematical type computations.',
'math-spectral-norm': 'Various mathematical type computations.',
'regexp-dna': 'Regular expressions performance.',
'string-base64': 'String processing.',
'string-fasta': 'String processing',
'string-tagcloud': 'String processing code to generate a giant "tagcloud".',
'string-unpack-code': 'String processing code to extracting compressed JS.',
'string-validate-input': 'String processing.',
}
class _SunspiderMeasurement(page_test.PageTest):
def __init__(self):
super(_SunspiderMeasurement, self).__init__()
self._power_metric = None
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.location.pathname.indexOf("results.html") >= 0'
'&& typeof(output) != "undefined"', 300)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
js_get_results = 'JSON.stringify(output);'
js_results = json.loads(tab.EvaluateJavaScript(js_get_results))
# Below, r is a map of benchmark names to lists of result numbers,
# and totals is a list of totals of result numbers.
# js_results is: formatted like this:
# [
# {'3d-cube': v1, '3d-morph': v2, ...},
# {'3d-cube': v3, '3d-morph': v4, ...},
# ...
# ]
r = collections.defaultdict(list)
totals = []
for result in js_results:
total = 0
for key, value in result.iteritems():
r[key].append(value)
total += value
totals.append(total)
for key, values in r.iteritems():
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, key, 'ms', values, important=False,
description=DESCRIPTIONS.get(key)))
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, 'Total', 'ms', totals,
description='Totals of run time for each different type of benchmark '
'in sunspider'))
class Sunspider(benchmark.Benchmark):
"""Apple's SunSpider JavaScript benchmark."""
test = _SunspiderMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
archive_data_file='../page_sets/data/sunspider.json',
make_javascript_deterministic=False,
file_path=os.path.abspath(__file__))
ps.AddPageWithDefaultRunNavigate(_URL)
return ps
| bsd-3-clause |
vFense/vFenseAgent-nix | agent/deps/mac/Python-2.7.5/lib/python2.7/_weakrefset.py | 62 | 5607 | # Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
| lgpl-3.0 |
0x414c/lastgraph | lastslice/slice.py | 2 | 6976 | #!/usr/bin/python
import os
import sys
import web
import time
import random
import datetime
import threading
from StringIO import StringIO
FILEROOT = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(FILEROOT, ".."))
sys.path.insert(1, os.path.join(FILEROOT, "..", "lib"))
os.environ['DJANGO_SETTINGS_MODULE'] = "settings"
from colorsys import *
from graphication import *
from graphication.wavegraph import WaveGraph
from graphication.color import hex_to_rgba
from PIL import Image
import lastslice.shortslice_css as slice_style
import lastslice.longslice_css as long_style
from lastgui.fetch import fetcher
from django.core.cache import cache
errfile = open("/tmp/sliceerr.txt", "a")
urls = (
"/slice/([^/]+)/", "Slice",
"/slice/([^/]+)/(\d+)/(\d+)/", "Slice",
"/slice/([^/]+)/(\d+)/(\d+)/([^/]+)/", "Slice",
"/longslice/([^/]+)/", "LongSlice",
"/longslice/([^/]+)/.pdf", "LongSlicePDF",
"/longslice/([^/]+)/(\d+)/(\d+)/", "LongSlice",
"/longslice/([^/]+)/(\d+)/(\d+)/([^/]+)/", "LongSlice",
"/colours/([^/]+)/", "Colours",
)
fetcher.debug = False
class DataError(StandardError): pass
def rgba_to_hex(r, g, b, a):
return "%02x%02x%02x%02x" % (r*255,g*255,b*255,a*255)
class ThreadedWeek(threading.Thread):
def __init__(self, user, start, end):
threading.Thread.__init__(self)
self.user = user
self.range = start, end
def run(self):
self.data = list(fetcher.weekly_artists(self.user, self.range[0], self.range[1]))
def get_data(user, length=4):
cache_key = 'user_%s:%s' % (length, user.replace(" ","+"))
data = None #cache.get(cache_key)
while data == "locked":
time.sleep(0.01)
if not data:
#cache.set(cache_key, "locked", 5)
try:
weeks = list(fetcher.weeks(user))
except:
import traceback
try:
errfile.write(traceback.format_exc())
errfile.flush()
except:
pass
return None, None
threads = [ThreadedWeek(user, start, end) for start, end in weeks[-length:]]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
data = ([thread.data for thread in threads], weeks[-length:])
#cache.set(cache_key, data, 30)
return data
def get_series(user, length=4, limit=15):
if ":" in user:
user = user.replace("_", "+")
data, weeks = get_data(user, length)
if not data and not weeks:
data, weeks = get_data(user, length)
if not data and not weeks:
data, weeks = get_data(user, length)
if not data and not weeks:
return None
artists = {}
for week in data:
for artist, plays in week:
artists[artist] = []
for week in data:
week = dict(week)
for artist in artists:
plays = week.get(artist, 0)
if plays < 2:
plays = 0
artists[artist].append(plays)
artists = artists.items()
artists.sort(key=lambda (x,y):max(y))
artists.reverse()
sh, ss, sv = rgb_to_hsv(*hex_to_rgba("ec2d60")[:3])
eh, es, ev = rgb_to_hsv(*hex_to_rgba("0c4da2")[:3])
a = True
ad = 0.3
th, ts, tv = (eh-sh)/float(limit), (es-ss)/float(limit), (ev-sv)/float(limit)
series_set = SeriesSet()
for artist, data in artists[:15]:
series_set.add_series(Series(
artist,
dict([(datetime.datetime.fromtimestamp(weeks[i][0]), x) for i, x in enumerate(data)]),
rgba_to_hex(*(hsv_to_rgb(sh, ss, sv) + (a and 1 or 1-ad,))),
))
sh += th
ss += ts
sv += tv
a = not a
ad += (0.6/limit)
return series_set
class Slice(object):
def GET(self, username, width=230, height=138, labels=False):
web.header("Content-Type", "image/png")
width = int(width)
height = int(height)
series_set = get_series(username)
output = FileOutput(padding=0, style=slice_style)
if series_set:
# Create the output
scale = AutoWeekDateScale(series_set, short_labels=True)
# OK, render that.
wg = WaveGraph(series_set, scale, slice_style, bool(labels), vertical_scale=False)
output.add_item(wg, x=0, y=0, width=width, height=height)
else:
output.add_item(Label("invalid username"), x=0, y=0, width=width, height=height)
print output.stream('png').read()
class LongSlice(object):
def GET(self, username, width=1200, height=400, labels=False):
web.header("Content-Type", "image/png")
width = int(width)
height = int(height)
series_set = get_series(username, 12, 25)
# Create the output
output = FileOutput(padding=0, style=long_style)
if series_set:
scale = AutoWeekDateScale(series_set, year_once=False)
# OK, render that.
wg = WaveGraph(series_set, scale, long_style, not bool(labels), textfix=True)
output.add_item(wg, x=0, y=0, width=width, height=height)
else:
output.add_item(Label("invalid username"), x=0, y=0, width=width, height=height)
# Load it into a PIL image
img = Image.open(output.stream('png'))
# Load the watermark
mark = Image.open(os.path.join(os.path.dirname(__file__), "watermark.png"))
# Combine them
nw, nh = img.size
nh += 40
out = Image.new("RGB", (nw, nh), "White")
out.paste(img, (0,0))
out.paste(mark, (width-210, height+10))
# Stream the result
outf = StringIO()
out.save(outf, "png")
outf.seek(0)
print outf.read()
class LongSlicePDF(object):
def GET(self, username, width=1200, height=400, labels=False):
web.header("Content-Type", "application/x-pdf")
width = int(width)
height = int(height)
series_set = get_series(username, 12, 25)
# Create the output
output = FileOutput(padding=0, style=long_style)
scale = AutoWeekDateScale(series_set)
# OK, render that.
wg = WaveGraph(series_set, scale, long_style, not bool(labels), textfix=True)
output.add_item(wg, x=0, y=0, width=width, height=height)
print output.stream('pdf').read()
class Colours:
def GET(self, username):
series_set = get_series(username)
for series in series_set:
print "%s,%s" % (series.title, series.color)
#web.webapi.internalerror = web.debugerror
if __name__ == "__main__": web.run(urls, globals())
| bsd-3-clause |
aashish24/VTK-old | Examples/Infovis/Python/Rcalculator_mst.py | 9 | 2498 |
# Python examples script that uses the R calculator filter to find the
# maxiumum spanning tree of a random input graph by inverting the edge
# weights of the graph in R. The MST algorithm then finds the maximum
# spanning tree instead of the minimum spanning tree.
# VTK must be built with VTK_USE_GNU_R turned on for this example to work!
from vtk import *
if __name__ == "__main__":
# Generate a random graph with 20 vertices and a random number of edges
source = vtkRandomGraphSource()
source.SetNumberOfVertices(20)
source.SetEdgeProbability(0.2)
source.SetUseEdgeProbability(True)
source.SetStartWithTree(True)
source.IncludeEdgeWeightsOn()
source.AllowParallelEdgesOn()
# Create RCalculatorFilter for interaction with R
rcalculator = vtkRCalculatorFilter()
rcalculator.SetInputConnection(source.GetOutputPort())
# Display R output on the terminal
rcalculator.SetRoutput(1)
# Copy edge weight array to R as variable ew
rcalculator.PutArray("edge weight","ew")
# Invert the edge weight data.
rcalculator.SetRscript("ew = 1.0/ew\n")
# Copy edge weight array back to VTK.
rcalculator.GetArray("edge weight","ew")
# Find the minimal spanning tree (will be maximal spanning tree)
mstTreeSelection = vtkBoostKruskalMinimumSpanningTree()
mstTreeSelection.SetInputConnection(rcalculator.GetOutputPort())
mstTreeSelection.SetEdgeWeightArrayName("edge weight")
mstTreeSelection.NegateEdgeWeightsOn()
mstTreeSelection.Update()
# Create a graph layout view
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(rcalculator.GetOutputPort())
view.SetColorVertices(True)
view.SetEdgeLabelArrayName("edge weight")
view.SetEdgeLabelVisibility(True)
view.SetEdgeColorArrayName("edge weight")
view.SetColorEdges(True)
view.SetLayoutStrategyToSimple2D()
view.SetVertexLabelFontSize(14)
view.SetEdgeLabelFontSize(12)
# Make sure the representation is using a pedigree id selection
view.GetRepresentation(0).SetSelectionType(2)
# Set the selection to be the MST
view.GetRepresentation(0).GetAnnotationLink().SetCurrentSelection(mstTreeSelection.GetOutput())
# Set the theme on the view
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(5)
theme.SetPointSize(10)
theme.SetCellOpacity(1)
theme.SetSelectedCellColor(1,0,1)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
| bsd-3-clause |
isabernardes/Heriga | Herigaenv/lib/python2.7/site-packages/django/conf/locale/az/formats.py | 1059 | 1267 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y г.'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j E Y г. G:i'
YEAR_MONTH_FORMAT = 'F Y г.'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
zstackio/zstack-woodpecker | integrationtest/vm/mini/paths/path54.py | 1 | 2373 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", path_list=[
[TestAction.add_image, 'image1', 'root', os.environ.get('isoForVmUrl')],
[TestAction.create_vm_by_image, 'image1', 'iso', 'vm1'],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.create_mini_vm, 'vm2', 'memory=random'],
[TestAction.resize_data_volume, 'volume1', 5*1024*1024],
[TestAction.attach_volume, 'vm2', 'volume1'],
[TestAction.create_mini_vm, 'vm3', 'network=random'],
[TestAction.create_volume, 'volume2', 'flag=thin,scsi'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_volume_backup, 'volume1-backup1'],
[TestAction.start_vm, 'vm2'],
[TestAction.delete_image, 'image2'],
[TestAction.recover_image, 'image2'],
[TestAction.delete_image, 'image2'],
[TestAction.expunge_image, 'image2'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup2'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.create_volume, 'volume3', 'size=random', 'flag=scsi'],
[TestAction.delete_volume, 'volume3'],
[TestAction.stop_vm, 'vm3'],
[TestAction.add_image, 'image3', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_volume, 'volume1'],
[TestAction.expunge_volume, 'volume1'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup3'],
[TestAction.resize_volume, 'vm3', 5*1024*1024],
[TestAction.delete_image, 'image1'],
[TestAction.delete_vm_backup, 'vm2-backup3'],
[TestAction.add_image, 'image4', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_volume, 'volume4', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume2'],
])
'''
The final status:
Running:['vm1', 'vm2']
Stopped:['vm3']
Enadbled:['volume1-backup1', 'vm1-backup2', 'image3', 'image4']
attached:['volume2']
Detached:['volume4']
Deleted:['volume3', 'vm2-backup3', 'image1']
Expunged:['volume1', 'image2']
Ha:[]
Group:
vm_backup1:['vm1-backup2']---vm1_
'''
| apache-2.0 |
albertomurillo/ansible | lib/ansible/modules/cloud/rackspace/rax_cdb_database.py | 102 | 4470 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: rax_cdb_database
short_description: 'create / delete a database in the Cloud Databases'
description:
- create / delete a database in the Cloud Databases.
version_added: "1.8"
options:
cdb_id:
description:
- The databases server UUID
name:
description:
- Name to give to the database
character_set:
description:
- Set of symbols and encodings
default: 'utf8'
collate:
description:
- Set of rules for comparing characters in a character set
default: 'utf8_general_ci'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: "Simon JAILLET (@jails)"
extends_documentation_fragment:
- rackspace
- rackspace.openstack
'''
EXAMPLES = '''
- name: Build a database in Cloud Databases
tasks:
- name: Database build request
local_action:
module: rax_cdb_database
credentials: ~/.raxpub
region: IAD
cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
name: db1
state: present
register: rax_db_database
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
def find_database(instance, name):
try:
database = instance.get_database(name)
except Exception:
return False
return database
def save_database(module, cdb_id, name, character_set, collate):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception as e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if not database:
try:
database = instance.create_database(name=name,
character_set=character_set,
collate=collate)
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='create',
database=rax_to_dict(database))
def delete_database(module, cdb_id, name):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception as e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if database:
try:
database.delete()
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='delete',
database=rax_to_dict(database))
def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
# act on the state
if state == 'present':
save_database(module, cdb_id, name, character_set, collate)
elif state == 'absent':
delete_database(module, cdb_id, name)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
cdb_id=dict(type='str', required=True),
name=dict(type='str', required=True),
character_set=dict(type='str', default='utf8'),
collate=dict(type='str', default='utf8_general_ci'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
cdb_id = module.params.get('cdb_id')
name = module.params.get('name')
character_set = module.params.get('character_set')
collate = module.params.get('collate')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_cdb_database(module, state, cdb_id, name, character_set, collate)
if __name__ == '__main__':
main()
| gpl-3.0 |
StephenKing/summerschool-2015-ryu | ryu/contrib/ovs/process.py | 56 | 1467 | # Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
def _signal_status_msg(type_, signr):
s = "%s by signal %d" % (type_, signr)
for name in signal.__dict__:
if name.startswith("SIG") and getattr(signal, name) == signr:
return "%s (%s)" % (s, name)
return s
def status_msg(status):
"""Given 'status', which is a process status in the form reported by
waitpid(2) and returned by process_status(), returns a string describing
how the process terminated."""
if os.WIFEXITED(status):
s = "exit status %d" % os.WEXITSTATUS(status)
elif os.WIFSIGNALED(status):
s = _signal_status_msg("killed", os.WTERMSIG(status))
elif os.WIFSTOPPED(status):
s = _signal_status_msg("stopped", os.WSTOPSIG(status))
else:
s = "terminated abnormally (%x)" % status
if os.WCOREDUMP(status):
s += ", core dumped"
return s
| apache-2.0 |
Aaron1992/v2ex | mail.py | 20 | 2098 | #!/usr/bin/env python
# coding=utf-8
import logging
import re
from v2ex.babel import Member
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
from google.appengine.ext.webapp.util import run_wsgi_app
from twitter.oauthtwitter import OAuthApi
from twitter.oauth import OAuthToken
from config import twitter_consumer_key as CONSUMER_KEY
from config import twitter_consumer_secret as CONSUMER_SECRET
def extract_address(raw):
if raw.find('<') == -1:
return raw
else:
return re.findall('<(.+)>', raw)[0]
class MailHandler(InboundMailHandler):
def receive(self, message):
bodies = message.bodies(content_type = 'text/plain')
for body in bodies:
to = extract_address(message.to)
sender = extract_address(message.sender.lower())
if to[0:5].lower() == 'tweet':
q = db.GqlQuery("SELECT * FROM Member WHERE email = :1", sender)
if q.count() == 1:
member = q[0]
if member.twitter_oauth == 1:
access_token = OAuthToken.from_string(member.twitter_oauth_string)
twitter = OAuthApi(CONSUMER_KEY, CONSUMER_SECRET, access_token)
status = body[1].decode()
if len(status) > 140:
status = status[0:140]
try:
logging.info("About to send tweet: " + status)
twitter.PostUpdate(status.encode('utf-8'))
logging.info("Successfully tweet: " + status)
except:
logging.error("Failed to tweet for " + member.username)
else:
logging.error("User " + sender + " doesn't have Twitter link.")
application = webapp.WSGIApplication([
MailHandler.mapping()
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() | bsd-3-clause |
absoludity/servo | components/script/dom/bindings/codegen/ply/ply/yacc.py | 319 | 128492 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
| mpl-2.0 |
bryx-inc/boto | tests/integration/awslambda/__init__.py | 586 | 1123 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
memo/tensorflow | tensorflow/python/kernel_tests/sparse_reshape_op_test.py | 48 | 12887 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseReshapeTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_2x3x4(self):
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1],
[1, 1, 3], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6())
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reshape(sp_input, shape=(1, 5, 2, 3))
self.assertAllEqual((1, 5, 2, 3), sp_output.get_shape())
def testSameShape(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [5, 6])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedSameShape(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [5, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testWorksWellWithTfShape(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
shape = array_ops.shape(sp_input) # tf.shape generates int32 output
sp_output = sparse_ops.sparse_reshape(sp_input, shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedSameShapeWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [-1, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedNewShapeSameRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, 10])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testFeedNewShapeSameRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testUpRank(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [2, 3, 5])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedUpRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedUpRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, -1, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedDownRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, 4])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
def testFeedDownRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
def testFeedMultipleInferredDims(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1, -1])
with self.assertRaisesOpError("only one output shape size may be -1"):
sess.run(sp_output, {sp_input: input_val})
def testProvideStaticallyMismatchedSizes(self):
input_val = self._SparseTensorValue_5x6()
sp_input = sparse_tensor.SparseTensor.from_value(input_val)
with self.assertRaisesRegexp(ValueError, "Cannot reshape"):
sparse_ops.sparse_reshape(sp_input, [4, 7])
def testFeedMismatchedSizes(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, 7])
with self.assertRaisesOpError(
"Input to reshape is a tensor with 30 dense values"):
sess.run(sp_output, {sp_input: input_val})
def testFeedMismatchedSizesWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1])
with self.assertRaisesOpError("requested shape requires a multiple"):
sess.run(sp_output, {sp_input: input_val})
def testFeedPartialShapes(self):
with self.test_session(use_gpu=False):
# Incorporate new rank into shape information if known
sp_input = self._SparseTensorPlaceholder()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [None, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Incorporate known shape information about input indices in output
# indices
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Even if new_shape has no shape information, we know the ranks of
# output indices and shape
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
new_shape = array_ops.placeholder(dtypes.int64)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, None])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [None])
def testFeedDenseReshapeSemantics(self):
with self.test_session(use_gpu=False) as sess:
# Compute a random rank-5 initial shape and new shape, randomly sparsify
# it, and check that the output of SparseReshape has the same semantics
# as a dense reshape.
factors = np.array([2] * 4 + [3] * 4 + [5] * 4) # 810k total elements
orig_rank = np.random.randint(2, 7)
orig_map = np.random.randint(orig_rank, size=factors.shape)
orig_shape = [np.prod(factors[orig_map == d]) for d in range(orig_rank)]
new_rank = np.random.randint(2, 7)
new_map = np.random.randint(new_rank, size=factors.shape)
new_shape = [np.prod(factors[new_map == d]) for d in range(new_rank)]
orig_dense = np.random.uniform(size=orig_shape)
orig_indices = np.transpose(np.nonzero(orig_dense < 0.5))
orig_values = orig_dense[orig_dense < 0.5]
new_dense = np.reshape(orig_dense, new_shape)
new_indices = np.transpose(np.nonzero(new_dense < 0.5))
new_values = new_dense[new_dense < 0.5]
sp_input = self._SparseTensorPlaceholder()
input_val = sparse_tensor.SparseTensorValue(orig_indices, orig_values,
orig_shape)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, new_indices)
self.assertAllEqual(output_val.values, new_values)
self.assertAllEqual(output_val.dense_shape, new_shape)
if __name__ == "__main__":
test.main()
| apache-2.0 |
wscullin/spack | var/spack/repos/builtin.mock/packages/multivalue_variant/package.py | 3 | 2032 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class MultivalueVariant(Package):
homepage = "http://www.llnl.gov"
url = "http://www.llnl.gov/mpileaks-1.0.tar.gz"
version(1.0, 'foobarbaz')
version(2.1, 'foobarbaz')
version(2.2, 'foobarbaz')
version(2.3, 'foobarbaz')
variant('debug', default=False, description='Debug variant')
variant(
'foo',
description='Multi-valued variant',
values=('bar', 'baz', 'barbaz'),
multi=True
)
variant(
'fee',
description='Single-valued variant',
default='bar',
values=('bar', 'baz', 'barbaz'),
multi=False
)
depends_on('mpi')
depends_on('callpath')
depends_on('a')
depends_on('a@1.0', when='fee=barbaz')
def install(self, spec, prefix):
pass
| lgpl-2.1 |
yangdongsheng/autotest | utils/build_externals.py | 6 | 5474 | #!/usr/bin/python
#
# Please keep this code python 2.4 compatible and stand alone.
"""
Fetch, build and install external Python library dependancies.
This fetches external python libraries, builds them using your host's
python and installs them under our own autotest/site-packages/ directory.
Usage? Just run it.
utils/build_externals.py
"""
import compileall, logging, os, shutil, sys, tempfile, time, urllib2
import subprocess, re
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared import logging_config, logging_manager
from autotest.client.shared import utils
from autotest.utils import external_packages
# bring in site packages as well
utils.import_site_module(__file__, 'autotest.utils.site_external_packages')
# Where package source be fetched to relative to the top of the autotest tree.
PACKAGE_DIR = 'ExternalSource'
# Where packages will be installed to relative to the top of the autotest tree.
INSTALL_DIR = 'site-packages'
# Installs all packages, even if the system already has the version required
INSTALL_ALL = False
# Want to add more packages to fetch, build and install? See the class
# definitions at the end of external_packages.py for examples of how to do it.
class BuildExternalsLoggingConfig(logging_config.LoggingConfig):
def configure_logging(self, results_dir=None, verbose=False):
super(BuildExternalsLoggingConfig, self).configure_logging(
use_console=True,
verbose=verbose)
def main():
"""
Find all ExternalPackage classes defined in this file and ask them to
fetch, build and install themselves.
"""
logging_manager.configure_logging(BuildExternalsLoggingConfig(),
verbose=True)
os.umask(022)
top_of_tree = external_packages.find_top_of_autotest_tree()
package_dir = os.path.join(top_of_tree, PACKAGE_DIR)
install_dir = os.path.join(top_of_tree, INSTALL_DIR)
# Make sure the install_dir is in our python module search path
# as well as the PYTHONPATH being used by all our setup.py
# install subprocesses.
if install_dir not in sys.path:
sys.path.insert(0, install_dir)
env_python_path_varname = 'PYTHONPATH'
env_python_path = os.environ.get(env_python_path_varname, '')
if install_dir+':' not in env_python_path:
os.environ[env_python_path_varname] = ':'.join([
install_dir, env_python_path])
fetched_packages, fetch_errors = fetch_necessary_packages(package_dir,
install_dir)
install_errors = build_and_install_packages(fetched_packages, install_dir)
# Byte compile the code after it has been installed in its final
# location as .pyc files contain the path passed to compile_dir().
# When printing exception tracebacks, python uses that path first to look
# for the source code before checking the directory of the .pyc file.
# Don't leave references to our temporary build dir in the files.
logging.info('compiling .py files in %s to .pyc', install_dir)
compileall.compile_dir(install_dir, quiet=True)
# Some things install with whacky permissions, fix that.
external_packages.system("chmod -R a+rX '%s'" % install_dir)
errors = fetch_errors + install_errors
for error_msg in errors:
logging.error(error_msg)
return len(errors)
def fetch_necessary_packages(dest_dir, install_dir):
"""
Fetches all ExternalPackages into dest_dir.
@param dest_dir: Directory the packages should be fetched into.
@param install_dir: Directory where packages will later installed.
@returns A tuple containing two lists:
* A list of ExternalPackage instances that were fetched and
need to be installed.
* A list of error messages for any failed fetches.
"""
names_to_check = sys.argv[1:]
errors = []
fetched_packages = []
for package_class in external_packages.ExternalPackage.subclasses:
package = package_class()
if names_to_check and package.name.lower() not in names_to_check:
continue
if not package.is_needed(install_dir):
logging.info('A new %s is not needed on this system.',
package.name)
if INSTALL_ALL:
logging.info('Installing anyways...')
else:
continue
if not package.fetch(dest_dir):
msg = 'Unable to download %s' % package.name
logging.error(msg)
errors.append(msg)
else:
fetched_packages.append(package)
return fetched_packages, errors
def build_and_install_packages(packages, install_dir):
"""
Builds and installs all packages into install_dir.
@param packages - A list of already fetched ExternalPackage instances.
@param install_dir - Directory the packages should be installed into.
@returns A list of error messages for any installs that failed.
"""
errors = []
for package in packages:
if not package.build_and_install(install_dir):
msg = 'Unable to build and install %s' % package.name
logging.error(msg)
errors.append(msg)
return errors
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
maalmeida1837/deepdive | examples/spouse_example/postgres/plpy_extractor/udf/ext_has_spouse.py | 15 | 3244 | #! /usr/bin/env python
#! /usr/bin/env python
# Imports written in this area is useless, just for local debugging
import ddext
from ddext import SD
import itertools
import os
from collections import defaultdict
# input: sentences.id, p1.id, p1.text, p2.id, p2.text
# output: has_spouse
# returns:
def init():
SD['APP_HOME'] = os.environ['APP_HOME']
# SD['json'] = __import__('json')
ddext.import_lib('csv')
ddext.import_lib('os')
# from collections import defaultdict
ddext.import_lib('defaultdict', 'collections')
# Other examples of import_lib:
# # "from collections import defaultdict as defdict":
# ddext.import_lib('defaultdict', 'collections', 'defdict')
# # "import defaultdict as defdict":
# ddext.import_lib('defaultdict', as_name='defdict')
# Input commands MUST HAVE CORRECT ORDER:
# SAME AS SELECT ORDER, and SAME AS "run" ARGUMENT ORDER
ddext.input('sentence_id', 'text')
ddext.input('p1_id', 'text')
ddext.input('p1_text', 'text')
ddext.input('p2_id', 'text')
ddext.input('p2_text', 'text')
# Returns commands MUST HAVE CORRECT ORDER
ddext.returns('person1_id', 'text')
ddext.returns('person2_id', 'text')
ddext.returns('sentence_id', 'text')
ddext.returns('description', 'text')
ddext.returns('is_true', 'boolean')
ddext.returns('relation_id', 'text')
def run(sentence_id, p1_id, p1_text, p2_id, p2_text):
####### NOTICE: SHARED MEMORY ########
# If you really need shared memory / global dir, do "from ddext import SD."
# Use SD as the global shared dict.
# Load the spouse dictionary for distant supervision
spouses = defaultdict(lambda: None)
if 'spouses' in SD:
spouses = SD['spouses']
else: # Read data from file once, and share it
SD['spouses'] = spouses
# Read dict from file: MAKE SURE YOUR DATABASE SERVER
# HAVE THE ACCESS TO FILE!
# Please use absolute path!
with open (SD['APP_HOME'] + "/input/spouses.csv") as csvfile:
reader = csv.reader(csvfile)
for line in reader:
spouses[line[0].strip().lower()] = line[1].strip().lower()
if 'non_spouses' in SD:
non_spouses = SD['non_spouses']
else:
non_spouses = set()
SD['non_spouses'] = non_spouses
lines = open(SD['APP_HOME'] + '/input/non-spouses.tsv').readlines()
for line in lines:
name1, name2, relation = line.strip().split('\t')
non_spouses.add((name1, name2)) # Add a non-spouse relation pair
# NOTICE: PLPY DOES NOT ALLOW overwritting input arguments!!!
# will return "UnboundLocalError".
p1_t = p1_text.strip()
p2_t = p2_text.strip()
p1_text_lower = p1_t.lower()
p2_text_lower = p2_t.lower()
is_true = None
if spouses[p1_text_lower] == p2_text_lower:
is_true = True
elif spouses[p2_text_lower] == p1_text_lower:
is_true = True
# same person
elif (p1_t == p2_t) or (p1_t in p2_t) or (p2_t in p1_t):
is_true = False
elif (p1_text_lower, p2_text_lower) in non_spouses:
is_true = False
elif (p2_text_lower, p1_text_lower) in non_spouses:
is_true = False
# Must return a tuple of arrays.
yield [p1_id, p2_id, sentence_id, "%s-%s" % (p1_t, p2_t), is_true, "%s_%s" % (p1_id, p2_id)]
# return [[p1_id, p2_id, sentence_id, "%s-%s" % (p1_t, p2_t), is_true]]
| apache-2.0 |
rynomster/django | tests/utils_tests/test_termcolors.py | 337 | 6461 | import unittest
from django.utils.termcolors import (
DARK_PALETTE, DEFAULT_PALETTE, LIGHT_PALETTE, NOCOLOR_PALETTE, PALETTES,
colorize, parse_color_setting,
)
class TermColorTests(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(parse_color_setting(''), PALETTES[DEFAULT_PALETTE])
def test_simple_palette(self):
self.assertEqual(parse_color_setting('light'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting('dark'), PALETTES[DARK_PALETTE])
self.assertEqual(parse_color_setting('nocolor'), None)
def test_fg(self):
self.assertEqual(parse_color_setting('error=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
def test_fg_bg(self):
self.assertEqual(parse_color_setting('error=green/blue'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'}))
def test_fg_opts(self):
self.assertEqual(parse_color_setting('error=green,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink', 'bold')}))
def test_fg_bg_opts(self):
self.assertEqual(parse_color_setting('error=green/blue,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green/blue,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink', 'bold')}))
def test_override_palette(self):
self.assertEqual(parse_color_setting('light;error=green'),
dict(PALETTES[LIGHT_PALETTE], ERROR={'fg': 'green'}))
def test_override_nocolor(self):
self.assertEqual(parse_color_setting('nocolor;error=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
def test_reverse_override(self):
self.assertEqual(parse_color_setting('error=green;light'), PALETTES[LIGHT_PALETTE])
def test_multiple_roles(self):
self.assertEqual(parse_color_setting('error=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}, SQL_FIELD={'fg': 'blue'}))
def test_override_with_multiple_roles(self):
self.assertEqual(parse_color_setting('light;error=green;sql_field=blue'),
dict(PALETTES[LIGHT_PALETTE], ERROR={'fg': 'green'}, SQL_FIELD={'fg': 'blue'}))
def test_empty_definition(self):
self.assertEqual(parse_color_setting(';'), None)
self.assertEqual(parse_color_setting('light;'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting(';;;'), None)
def test_empty_options(self):
self.assertEqual(parse_color_setting('error=green,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=green,,,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=green,,blink,,'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
def test_bad_palette(self):
self.assertEqual(parse_color_setting('unknown'), None)
def test_bad_role(self):
self.assertEqual(parse_color_setting('unknown='), None)
self.assertEqual(parse_color_setting('unknown=green'), None)
self.assertEqual(parse_color_setting('unknown=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'}))
def test_bad_color(self):
self.assertEqual(parse_color_setting('error='), None)
self.assertEqual(parse_color_setting('error=;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'}))
self.assertEqual(parse_color_setting('error=unknown'), None)
self.assertEqual(parse_color_setting('error=unknown;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE], SQL_FIELD={'fg': 'blue'}))
self.assertEqual(parse_color_setting('error=green/unknown'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=green/blue/something'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'}))
self.assertEqual(parse_color_setting('error=green/blue/something,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue', 'opts': ('blink',)}))
def test_bad_option(self):
self.assertEqual(parse_color_setting('error=green,unknown'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=green,unknown,blink'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
def test_role_case(self):
self.assertEqual(parse_color_setting('ERROR=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('eRrOr=green'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
def test_color_case(self):
self.assertEqual(parse_color_setting('error=GREEN'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=GREEN/BLUE'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'}))
self.assertEqual(parse_color_setting('error=gReEn'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green'}))
self.assertEqual(parse_color_setting('error=gReEn/bLuE'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'bg': 'blue'}))
def test_opts_case(self):
self.assertEqual(parse_color_setting('error=green,BLINK'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green,bLiNk'),
dict(PALETTES[NOCOLOR_PALETTE], ERROR={'fg': 'green', 'opts': ('blink',)}))
def test_colorize_empty_text(self):
self.assertEqual(colorize(text=None), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=''), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=None, opts=('noreset')), '\x1b[m')
self.assertEqual(colorize(text='', opts=('noreset')), '\x1b[m')
| bsd-3-clause |
mikebsg01/Programming-Contests | ACM-ICPC/Training/20190427/B.py | 3 | 1037 | from sys import stdin
from datetime import datetime as dt, timedelta
def readLine():
return stdin.readline().strip()
def readInt():
return int(readLine())
def readInts():
return list(map(int, readLine().split()))
def daysBetween(d1, d2):
y1 = '2018' if d1 > d2 else '2019'
y2 = '2019'
d1 = dt.strptime('%s-%s' % (y1, d1), '%Y-%m-%d')
d2 = dt.strptime('%s-%s' % (y2, d2), '%Y-%m-%d')
return abs((d2 - d1).days)
def yesterday(d):
d = dt.strptime('2019-%s' % d, '%Y-%m-%d')
y = d - timedelta(days=1)
return y.strftime('%m-%d')
def getIdx(N, A):
for i in range(N):
if A[i] > '10-28':
return i
return 0
def main():
N = readInt()
A = [None for i in range(N)]
for i in range(N):
s, d = readLine().split(' ')
A[i] = d
A.sort()
idx = getIdx(N, A)
longest = daysBetween(A[idx - 1], A[idx])
ans = A[idx]
i = (idx + 1) % N
while i != idx:
res = daysBetween(A[i - 1], A[i])
if res > longest:
longest = res
ans = A[i]
i = (i + 1) % N
print(yesterday(ans))
if __name__ == '__main__':
main() | gpl-3.0 |
lzw120/django | tests/regressiontests/test_client_regress/urls.py | 65 | 2288 | from __future__ import absolute_import
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from . import views
urlpatterns = patterns('',
(r'^no_template_view/$', views.no_template_view),
(r'^staff_only/$', views.staff_only_view),
(r'^get_view/$', views.get_view),
(r'^request_data/$', views.request_data),
(r'^request_data_extended/$', views.request_data, {'template':'extended.html', 'data':'bacon'}),
url(r'^arg_view/(?P<name>.+)/$', views.view_with_argument, name='arg_view'),
(r'^login_protected_redirect_view/$', views.login_protected_redirect_view),
(r'^redirects/$', RedirectView.as_view(url='/test_client_regress/redirects/further/')),
(r'^redirects/further/$', RedirectView.as_view(url='/test_client_regress/redirects/further/more/')),
(r'^redirects/further/more/$', RedirectView.as_view(url='/test_client_regress/no_template_view/')),
(r'^redirect_to_non_existent_view/$', RedirectView.as_view(url='/test_client_regress/non_existent_view/')),
(r'^redirect_to_non_existent_view2/$', RedirectView.as_view(url='/test_client_regress/redirect_to_non_existent_view/')),
(r'^redirect_to_self/$', RedirectView.as_view(url='/test_client_regress/redirect_to_self/')),
(r'^circular_redirect_1/$', RedirectView.as_view(url='/test_client_regress/circular_redirect_2/')),
(r'^circular_redirect_2/$', RedirectView.as_view(url='/test_client_regress/circular_redirect_3/')),
(r'^circular_redirect_3/$', RedirectView.as_view(url='/test_client_regress/circular_redirect_1/')),
(r'^redirect_other_host/$', RedirectView.as_view(url='https://otherserver:8443/test_client_regress/no_template_view/')),
(r'^set_session/$', views.set_session_view),
(r'^check_session/$', views.check_session_view),
(r'^request_methods/$', views.request_methods_view),
(r'^check_unicode/$', views.return_unicode),
(r'^parse_unicode_json/$', views.return_json_file),
(r'^check_headers/$', views.check_headers),
(r'^check_headers_redirect/$', RedirectView.as_view(url='/test_client_regress/check_headers/')),
(r'^body/$', views.body),
(r'^read_all/$', views.read_all),
(r'^read_buffer/$', views.read_buffer),
(r'^request_context_view/$', views.request_context_view),
)
| bsd-3-clause |
payeldillip/django | django/contrib/gis/gdal/prototypes/ds.py | 349 | 4403 | """
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_long, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import GDAL_VERSION, lgdal
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, geom_output, int64_output, int_output,
srs_output, void_output, voidptr_output,
)
c_int_p = POINTER(c_int) # shortcut type
# Driver Routines
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p], errcheck=False)
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p], decoding='ascii')
# DataSource
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
# Layer Routines
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p])
get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p])
set_spatial_filter = void_output(lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False)
set_spatial_filter_rect = void_output(lgdal.OGR_L_SetSpatialFilterRect,
[c_void_p, c_double, c_double, c_double, c_double], errcheck=False
)
# Feature Definition Routines
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
# Feature Routines
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(lgdal.OGR_F_GetFieldAsDateTime,
[c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p]
)
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
if GDAL_VERSION >= (2, 0):
get_field_as_integer64 = int64_output(lgdal.OGR_F_GetFieldAsInteger64, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
# Field Routines
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
| bsd-3-clause |
mdsafwan/Deal-My-Stuff | Lib/site-packages/django/contrib/gis/utils/ogrinspect.py | 82 | 9146 | """
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
"""
# Requires GDAL to use.
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.utils import six
from django.utils.six.moves import zip
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generates a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, six.string_types):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_':
mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom and gtype.num in (1, 2, 3):
prefix = 'MULTI'
else:
prefix = ''
_mapping[geom_name] = prefix + str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
`__unicode__`/`__str__` function (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: This routine calls the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, six.string_types):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields:
kwlist.append('null=True')
if field_name.lower() in blank_fields:
kwlist.append('blank=True')
if kwlist:
return ', ' + ', '.join(kwlist)
else:
return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in zip(
ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_':
mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (
mfield, width, precision, kwargs_str
)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom and gtype.num in (1, 2, 3):
geom_field = 'Multi%s' % gtype.django
else:
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
yield ' objects = models.GeoManager()'
if name_field:
yield ''
yield ' def __%s__(self): return self.%s' % (
'str' if six.PY3 else 'unicode', name_field)
| apache-2.0 |
cournape/ensetuptools | setuptools/command/setopt.py | 1 | 4977 | import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind=='local':
return 'setup.cfg'
if kind=='global':
return os.path.join(
os.path.dirname(distutils.__file__),'distutils.cfg'
)
if kind=='user':
dot = os.name=='posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from ConfigParser import RawConfigParser
log.debug("Reading configuration from %s", filename)
opts = RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option,value in options.items():
if value is None:
log.debug("Deleting %s.%s from %s",
section, option, filename)
opts.remove_option(section,option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug("Setting %s.%s to %r in %s",
section, option, value, filename)
opts.set(section,option,value)
log.info("Writing %s", filename)
if not dry_run:
f = open(filename,'w'); opts.write(f); f.close()
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames)>1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-','_'):self.set_value}
},
self.dry_run
)
| bsd-3-clause |
airbnb/superset | superset/migrations/versions/46f444d8b9b7_remove_coordinator_from_druid_cluster_.py | 5 | 1660 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""remove_coordinator_from_druid_cluster_model.py
Revision ID: 46f444d8b9b7
Revises: 4ce8df208545
Create Date: 2018-11-26 00:01:04.781119
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "46f444d8b9b7"
down_revision = "4ce8df208545"
def upgrade():
with op.batch_alter_table("clusters") as batch_op:
batch_op.drop_column("coordinator_host")
batch_op.drop_column("coordinator_endpoint")
batch_op.drop_column("coordinator_port")
def downgrade():
op.add_column(
"clusters", sa.Column("coordinator_host", sa.String(length=256), nullable=True)
)
op.add_column(
"clusters", sa.Column("coordinator_port", sa.Integer(), nullable=True)
)
op.add_column(
"clusters",
sa.Column("coordinator_endpoint", sa.String(length=256), nullable=True),
)
| apache-2.0 |
itdc/sublimetext-itdchelper | itdchelper/asanalib/requests/packages/charade/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| mit |
pwmarcz/django | django/contrib/sessions/backends/cache.py | 102 | 2499 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import caches
from django.utils.six.moves import xrange
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return (KEY_PREFIX + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
@classmethod
def clear_expired(cls):
pass
| bsd-3-clause |
v6ak/qubes-core-admin | core/storage/__init__.py | 2 | 15124 | #!/usr/bin/python2
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2013 Marek Marczykowski <marmarek@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
from __future__ import absolute_import
import ConfigParser
import os
import os.path
import shutil
import subprocess
import sys
import qubes.qubesutils
from qubes.qubes import QubesException, defaults, system_path
CONFIG_FILE = '/etc/qubes/storage.conf'
class QubesVmStorage(object):
"""
Class for handling VM virtual disks. This is base class for all other
implementations, mostly with Xen on Linux in mind.
"""
def __init__(self, vm,
private_img_size = None,
root_img_size = None,
modules_img = None,
modules_img_rw = False):
self.vm = vm
self.vmdir = vm.dir_path
if private_img_size:
self.private_img_size = private_img_size
else:
self.private_img_size = defaults['private_img_size']
if root_img_size:
self.root_img_size = root_img_size
else:
self.root_img_size = defaults['root_img_size']
self.root_dev = "xvda"
self.private_dev = "xvdb"
self.volatile_dev = "xvdc"
self.modules_dev = "xvdd"
# For now compute this path still in QubesVm
self.modules_img = modules_img
self.modules_img_rw = modules_img_rw
# Additional drive (currently used only by HVM)
self.drive = None
def format_disk_dev(self, path, script, vdev, rw=True, type="disk",
domain=None):
if path is None:
return ''
template = " <disk type='block' device='{type}'>\n" \
" <driver name='phy'/>\n" \
" <source dev='{path}'/>\n" \
" <target dev='{vdev}' bus='xen'/>\n" \
"{params}" \
" </disk>\n"
params = ""
if not rw:
params += " <readonly/>\n"
if domain:
params += " <backenddomain name='%s'/>\n" % domain
if script:
params += " <script path='%s'/>\n" % script
return template.format(path=path, vdev=vdev, type=type, params=params)
def get_config_params(self):
args = {}
args['rootdev'] = self.root_dev_config()
args['privatedev'] = self.private_dev_config()
args['volatiledev'] = self.volatile_dev_config()
args['otherdevs'] = self.other_dev_config()
return args
def root_dev_config(self):
raise NotImplementedError
def private_dev_config(self):
raise NotImplementedError
def volatile_dev_config(self):
raise NotImplementedError
def other_dev_config(self):
if self.modules_img is not None:
return self.format_disk_dev(self.modules_img,
None,
self.modules_dev,
self.modules_img_rw)
elif self.drive is not None:
(drive_type, drive_domain, drive_path) = self.drive.split(":")
if drive_type == "hd":
drive_type = "disk"
writable = False
if drive_type == "disk":
writable = True
if drive_domain.lower() == "dom0":
drive_domain = None
return self.format_disk_dev(drive_path, None,
self.modules_dev,
rw=writable,
type=drive_type,
domain=drive_domain)
else:
return ''
def _copy_file(self, source, destination):
"""
Effective file copy, preserving sparse files etc.
"""
# TODO: Windows support
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", "--reflink=auto", source, destination])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(source, destination))
def get_disk_utilization(self):
return qubes.qubesutils.get_disk_usage(self.vmdir)
def get_disk_utilization_private_img(self):
return qubes.qubesutils.get_disk_usage(self.private_img)
def get_private_img_sz(self):
if not os.path.exists(self.private_img):
return 0
return os.path.getsize(self.private_img)
def resize_private_img(self, size):
raise NotImplementedError
def create_on_disk_private_img(self, verbose, source_template = None):
raise NotImplementedError
def create_on_disk_root_img(self, verbose, source_template = None):
raise NotImplementedError
def create_on_disk(self, verbose, source_template = None):
if source_template is None:
source_template = self.vm.template
old_umask = os.umask(002)
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.vmdir)
os.mkdir (self.vmdir)
self.create_on_disk_private_img(verbose, source_template)
self.create_on_disk_root_img(verbose, source_template)
self.reset_volatile_storage(verbose, source_template)
os.umask(old_umask)
def clone_disk_files(self, src_vm, verbose):
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.vmdir)
os.mkdir (self.vmdir)
if src_vm.private_img is not None and self.private_img is not None:
if verbose:
print >> sys.stderr, "--> Copying the private image:\n{0} ==>\n{1}".\
format(src_vm.private_img, self.private_img)
self._copy_file(src_vm.private_img, self.private_img)
if src_vm.updateable and src_vm.root_img is not None and self.root_img is not None:
if verbose:
print >> sys.stderr, "--> Copying the root image:\n{0} ==>\n{1}".\
format(src_vm.root_img, self.root_img)
self._copy_file(src_vm.root_img, self.root_img)
# TODO: modules?
def rename(self, old_name, new_name):
old_vmdir = self.vmdir
new_vmdir = os.path.join(os.path.dirname(self.vmdir), new_name)
os.rename(self.vmdir, new_vmdir)
self.vmdir = new_vmdir
if self.private_img:
self.private_img = self.private_img.replace(old_vmdir, new_vmdir)
if self.root_img:
self.root_img = self.root_img.replace(old_vmdir, new_vmdir)
if self.volatile_img:
self.volatile_img = self.volatile_img.replace(old_vmdir, new_vmdir)
def verify_files(self):
if not os.path.exists (self.vmdir):
raise QubesException (
"VM directory doesn't exist: {0}".\
format(self.vmdir))
if self.root_img and not os.path.exists (self.root_img):
raise QubesException (
"VM root image file doesn't exist: {0}".\
format(self.root_img))
if self.private_img and not os.path.exists (self.private_img):
raise QubesException (
"VM private image file doesn't exist: {0}".\
format(self.private_img))
if self.modules_img is not None:
if not os.path.exists(self.modules_img):
raise QubesException (
"VM kernel modules image does not exists: {0}".\
format(self.modules_img))
def remove_from_disk(self):
shutil.rmtree (self.vmdir)
def reset_volatile_storage(self, verbose = False, source_template = None):
if source_template is None:
source_template = self.vm.template
# Re-create only for template based VMs
if source_template is not None and self.volatile_img:
if os.path.exists(self.volatile_img):
os.remove(self.volatile_img)
# For StandaloneVM create it only if not already exists (eg after backup-restore)
if self.volatile_img and not os.path.exists(self.volatile_img):
if verbose:
print >> sys.stderr, "--> Creating volatile image: {0}...".\
format(self.volatile_img)
subprocess.check_call([system_path["prepare_volatile_img_cmd"],
self.volatile_img, str(self.root_img_size / 1024 / 1024)])
def prepare_for_vm_startup(self, verbose):
self.reset_volatile_storage(verbose=verbose)
if self.private_img and not os.path.exists (self.private_img):
print >>sys.stderr, "WARNING: Creating empty VM private image file: {0}".\
format(self.private_img)
self.create_on_disk_private_img(verbose=False)
def dump(o):
""" Returns a string represention of the given object
Args:
o (object): anything that response to `__module__` and `__class__`
Given the class :class:`qubes.storage.QubesVmStorage` it returns
'qubes.storage.QubesVmStorage' as string
"""
return o.__module__ + '.' + o.__class__.__name__
def load(string):
""" Given a dotted full module string representation of a class it loads it
Args:
string (str) i.e. 'qubes.storage.xen.QubesXenVmStorage'
Returns:
type
See also:
:func:`qubes.storage.dump`
"""
if not type(string) is str:
# This is a hack which allows giving a real class to a vm instead of a
# string as string_class parameter.
return string
components = string.split(".")
module_path = ".".join(components[:-1])
klass = components[-1:][0]
module = __import__(module_path, fromlist=[klass])
return getattr(module, klass)
def get_pool(name, vm):
""" Instantiates the storage for the specified vm """
config = _get_storage_config_parser()
klass = _get_pool_klass(name, config)
keys = [k for k in config.options(name) if k != 'driver' and k != 'class']
values = [config.get(name, o) for o in keys]
config_kwargs = dict(zip(keys, values))
if name == 'default':
kwargs = defaults['pool_config'].copy()
kwargs.update(keys)
else:
kwargs = config_kwargs
return klass(vm, **kwargs)
def pool_exists(name):
""" Check if the specified pool exists """
try:
_get_pool_klass(name)
return True
except StoragePoolException:
return False
def add_pool(name, **kwargs):
""" Add a storage pool to config."""
config = _get_storage_config_parser()
config.add_section(name)
for key, value in kwargs.iteritems():
config.set(name, key, value)
_write_config(config)
def remove_pool(name):
""" Remove a storage pool from config file. """
config = _get_storage_config_parser()
config.remove_section(name)
_write_config(config)
def _write_config(config):
with open(CONFIG_FILE, 'w') as configfile:
config.write(configfile)
def _get_storage_config_parser():
""" Instantiates a `ConfigParaser` for specified storage config file.
Returns:
RawConfigParser
"""
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
return config
def _get_pool_klass(name, config=None):
""" Returns the storage klass for the specified pool.
Args:
name: The pool name.
config: If ``config`` is not specified
`_get_storage_config_parser()` is called.
Returns:
type: A class inheriting from `QubesVmStorage`
"""
if config is None:
config = _get_storage_config_parser()
if not config.has_section(name):
raise StoragePoolException('Uknown storage pool ' + name)
if config.has_option(name, 'class'):
klass = load(config.get(name, 'class'))
elif config.has_option(name, 'driver'):
pool_driver = config.get(name, 'driver')
klass = defaults['pool_drivers'][pool_driver]
else:
raise StoragePoolException('Uknown storage pool driver ' + name)
return klass
class StoragePoolException(QubesException):
pass
class Pool(object):
def __init__(self, vm, dir_path):
assert vm is not None
assert dir_path is not None
self.vm = vm
self.dir_path = dir_path
self.create_dir_if_not_exists(self.dir_path)
self.vmdir = self.vmdir_path(vm, self.dir_path)
appvms_path = os.path.join(self.dir_path, 'appvms')
self.create_dir_if_not_exists(appvms_path)
servicevms_path = os.path.join(self.dir_path, 'servicevms')
self.create_dir_if_not_exists(servicevms_path)
vm_templates_path = os.path.join(self.dir_path, 'vm-templates')
self.create_dir_if_not_exists(vm_templates_path)
def vmdir_path(self, vm, pool_dir):
""" Returns the path to vmdir depending on the type of the VM.
The default QubesOS file storage saves the vm images in three
different directories depending on the ``QubesVM`` type:
* ``appvms`` for ``QubesAppVm`` or ``QubesHvm``
* ``vm-templates`` for ``QubesTemplateVm`` or ``QubesTemplateHvm``
* ``servicevms`` for any subclass of ``QubesNetVm``
Args:
vm: a QubesVM
pool_dir: the root directory of the pool
Returns:
string (str) absolute path to the directory where the vm files
are stored
"""
if vm.is_appvm():
subdir = 'appvms'
elif vm.is_template():
subdir = 'vm-templates'
elif vm.is_netvm():
subdir = 'servicevms'
elif vm.is_disposablevm():
subdir = 'appvms'
return os.path.join(pool_dir, subdir, vm.template.name + '-dvm')
else:
raise QubesException(vm.type() + ' unknown vm type')
return os.path.join(pool_dir, subdir, vm.name)
def create_dir_if_not_exists(self, path):
""" Check if a directory exists in if not create it.
This method does not create any parent directories.
"""
if not os.path.exists(path):
os.mkdir(path)
| gpl-2.0 |
sodafree/backend | build/lib.linux-i686-2.7/django/contrib/gis/gdal/feature.py | 92 | 3941 | # The GDAL C library, OGR exception, and the Field object
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
# ctypes function prototypes
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"A class that wraps an OGR Feature, needs to be instantiated from a Layer object."
#### Python 'magic' routines ####
def __init__(self, feat, fdefn):
"Initializes on the pointers for the feature and the layer definition."
if not feat or not fdefn:
raise OGRException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._fdefn = fdefn
def __del__(self):
"Releases a reference to this object."
if self._ptr: capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, basestring):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self.ptr, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in xrange(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
#### Feature Properties ####
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
return capi.get_feat_name(self._fdefn)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._fdefn, i))
for i in xrange(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._fdefn))
#### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, field_name)
if i < 0: raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
| bsd-3-clause |
adieu/django-nonrel | tests/regressiontests/m2m_regress/models.py | 92 | 1930 | from django.db import models
from django.contrib.auth import models as auth
# No related name is needed here, since symmetrical relations are not
# explicitly reversible.
class SelfRefer(models.Model):
name = models.CharField(max_length=10)
references = models.ManyToManyField('self')
related = models.ManyToManyField('self')
def __unicode__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return self.name
# Regression for #11956 -- a many to many to the base class
class TagCollection(Tag):
tags = models.ManyToManyField(Tag, related_name='tag_collections')
def __unicode__(self):
return self.name
# A related_name is required on one of the ManyToManyField entries here because
# they are both addressable as reverse relations from Tag.
class Entry(models.Model):
name = models.CharField(max_length=10)
topics = models.ManyToManyField(Tag)
related = models.ManyToManyField(Tag, related_name="similar")
def __unicode__(self):
return self.name
# Two models both inheriting from a base model with a self-referential m2m field
class SelfReferChild(SelfRefer):
pass
class SelfReferChildSibling(SelfRefer):
pass
# Many-to-Many relation between models, where one of the PK's isn't an Autofield
class Line(models.Model):
name = models.CharField(max_length=100)
class Worksheet(models.Model):
id = models.CharField(primary_key=True, max_length=100)
lines = models.ManyToManyField(Line, blank=True, null=True)
# Regression for #11226 -- A model with the same name that another one to
# which it has a m2m relation. This shouldn't cause a name clash between
# the automatically created m2m intermediary table FK field names when
# running syncdb
class User(models.Model):
name = models.CharField(max_length=30)
friends = models.ManyToManyField(auth.User)
| bsd-3-clause |
MonicaHsu/truvaluation | venv/lib/python2.7/popen2.py | 304 | 8416 | """Spawn a command with pipes to its stdin, stdout, and optionally stderr.
The normal os.popen(cmd, mode) call spawns a shell command and provides a
file interface to just the input or output of the process depending on
whether mode is 'r' or 'w'. This module provides the functions popen2(cmd)
and popen3(cmd) which return two or three pipes to the spawned command.
"""
import os
import sys
import warnings
warnings.warn("The popen2 module is deprecated. Use the subprocess module.",
DeprecationWarning, stacklevel=2)
__all__ = ["popen2", "popen3", "popen4"]
try:
MAXFD = os.sysconf('SC_OPEN_MAX')
except (AttributeError, ValueError):
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
class Popen3:
"""Class representing a child process. Normally, instances are created
internally by the functions popen2() and popen3()."""
sts = -1 # Child not completed yet
def __init__(self, cmd, capturestderr=False, bufsize=-1):
"""The parameter 'cmd' is the shell command to execute in a
sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments
will be passed directly to the program without shell intervention (as
with os.spawnv()). If 'cmd' is a string it will be passed to the shell
(as with os.system()). The 'capturestderr' flag, if true, specifies
that the object should capture standard error output of the child
process. The default is false. If the 'bufsize' parameter is
specified, it specifies the size of the I/O buffers to/from the child
process."""
_cleanup()
self.cmd = cmd
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
if capturestderr:
errout, errin = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
if capturestderr:
os.dup2(errin, 2)
self._run_child(cmd)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
if capturestderr:
os.close(errin)
self.childerr = os.fdopen(errout, 'r', bufsize)
else:
self.childerr = None
def __del__(self):
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.sts < 0:
if _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _run_child(self, cmd):
if isinstance(cmd, basestring):
cmd = ['/bin/sh', '-c', cmd]
os.closerange(3, MAXFD)
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
def poll(self, _deadstate=None):
"""Return the exit status of the child process if it has finished,
or -1 if it hasn't finished yet."""
if self.sts < 0:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
# pid will be 0 if self.pid hasn't terminated
if pid == self.pid:
self.sts = sts
except os.error:
if _deadstate is not None:
self.sts = _deadstate
return self.sts
def wait(self):
"""Wait for and return the exit status of the child process."""
if self.sts < 0:
pid, sts = os.waitpid(self.pid, 0)
# This used to be a test, but it is believed to be
# always true, so I changed it to an assertion - mvl
assert pid == self.pid
self.sts = sts
return self.sts
class Popen4(Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
_cleanup()
self.cmd = cmd
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
self._run_child(cmd)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
if sys.platform[:3] == "win" or sys.platform == "os2emx":
# Some things don't make sense on non-Unix platforms.
del Popen3, Popen4
def popen2(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout, child_stdin) are returned."""
w, r = os.popen2(cmd, mode, bufsize)
return r, w
def popen3(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout, child_stdin, child_stderr) are returned."""
w, r, e = os.popen3(cmd, mode, bufsize)
return r, w, e
def popen4(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout_stderr, child_stdin) are returned."""
w, r = os.popen4(cmd, mode, bufsize)
return r, w
else:
def popen2(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout, child_stdin) are returned."""
inst = Popen3(cmd, False, bufsize)
return inst.fromchild, inst.tochild
def popen3(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout, child_stdin, child_stderr) are returned."""
inst = Popen3(cmd, True, bufsize)
return inst.fromchild, inst.tochild, inst.childerr
def popen4(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout_stderr, child_stdin) are returned."""
inst = Popen4(cmd, bufsize)
return inst.fromchild, inst.tochild
__all__.extend(["Popen3", "Popen4"])
| mit |
vrenaville/ngo-addons-backport | addons/stock/wizard/stock_inventory_merge.py | 57 | 3815 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_inventory_merge(osv.osv_memory):
_name = "stock.inventory.merge"
_description = "Merge Inventory"
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
"""
Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context={}
res = super(stock_inventory_merge, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if context.get('active_model','') == 'stock.inventory' and len(context['active_ids']) < 2:
raise osv.except_osv(_('Warning!'),
_('Please select multiple physical inventories to merge in the list view.'))
return res
def do_merge(self, cr, uid, ids, context=None):
""" To merge selected Inventories.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
invent_obj = self.pool.get('stock.inventory')
invent_line_obj = self.pool.get('stock.inventory.line')
invent_lines = {}
if context is None:
context = {}
for inventory in invent_obj.browse(cr, uid, context['active_ids'], context=context):
if inventory.state == "done":
raise osv.except_osv(_('Warning!'),
_('Merging is only allowed on draft inventories.'))
for line in inventory.inventory_line_id:
key = (line.location_id.id, line.product_id.id, line.product_uom.id)
if key in invent_lines:
invent_lines[key] += line.product_qty
else:
invent_lines[key] = line.product_qty
new_invent = invent_obj.create(cr, uid, {
'name': 'Merged inventory'
}, context=context)
for key, quantity in invent_lines.items():
invent_line_obj.create(cr, uid, {
'inventory_id': new_invent,
'location_id': key[0],
'product_id': key[1],
'product_uom': key[2],
'product_qty': quantity,
})
return {'type': 'ir.actions.act_window_close'}
stock_inventory_merge()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
PandaPayProject/PandaPay | qa/rpc-tests/invalidblockrequest.py | 38 | 4237 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import copy
import time
'''
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
sync_masternodes(self.nodes)
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in xrange(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 50 * COIN)
tx2 = create_transaction(tx1, 0, b'\x51', 50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]])
height += 1
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 1000 * COIN # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| mit |
joacub/zf-joacub-uploader-twb | vendor/jQuery-File-Upload/server/gae-python/main.py | 168 | 5596 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.1.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'http://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
blobstore.delete(self.request.get('key') or '')
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
| bsd-3-clause |
gabrielkrell/sendgrid-python | examples/campaigns/campaigns.py | 2 | 4132 | import sendgrid
import json
import os
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
##################################################
# Create a Campaign #
# POST /campaigns #
data = {
"categories": [
"spring line"
],
"custom_unsubscribe_url": "",
"html_content": "<html><head><title></title></head><body><p>Check out our spring line!</p></body></html>",
"ip_pool": "marketing",
"list_ids": [
110,
124
],
"plain_content": "Check out our spring line!",
"segment_ids": [
110
],
"sender_id": 124451,
"subject": "New Products for Spring!",
"suppression_group_id": 42,
"title": "March Newsletter"
}
response = sg.client.campaigns.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve all Campaigns #
# GET /campaigns #
params = {'limit': 1, 'offset': 1}
response = sg.client.campaigns.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update a Campaign #
# PATCH /campaigns/{campaign_id} #
data = {
"categories": [
"summer line"
],
"html_content": "<html><head><title></title></head><body><p>Check out our summer line!</p></body></html>",
"plain_content": "Check out our summer line!",
"subject": "New Products for Summer!",
"title": "May Newsletter"
}
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve a single campaign #
# GET /campaigns/{campaign_id} #
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Delete a Campaign #
# DELETE /campaigns/{campaign_id} #
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update a Scheduled Campaign #
# PATCH /campaigns/{campaign_id}/schedules #
data = {
"send_at": 1489451436
}
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).schedules.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Schedule a Campaign #
# POST /campaigns/{campaign_id}/schedules #
data = {
"send_at": 1489771528
}
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).schedules.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# View Scheduled Time of a Campaign #
# GET /campaigns/{campaign_id}/schedules #
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).schedules.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Unschedule a Scheduled Campaign #
# DELETE /campaigns/{campaign_id}/schedules #
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).schedules.delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Send a Campaign #
# POST /campaigns/{campaign_id}/schedules/now #
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).schedules.now.post()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Send a Test Campaign #
# POST /campaigns/{campaign_id}/schedules/test #
data = {
"to": "your.email@example.com"
}
campaign_id = "test_url_param"
response = sg.client.campaigns._(campaign_id).schedules.test.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
| mit |
LS80/script.module.livestreamer | lib/livestreamer/plugins/livestation.py | 34 | 2666 | import re
from livestreamer.plugin import Plugin, PluginError, PluginOptions
from livestreamer.plugin.api import http, validate
from livestreamer.stream import HLSStream
LOGIN_PAGE_URL = "http://www.livestation.com/en/users/new"
LOGIN_POST_URL = "http://www.livestation.com/en/sessions.json"
_csrf_token_re = re.compile("<meta content=\"([^\"]+)\" name=\"csrf-token\"")
_hls_playlist_re = re.compile("<meta content=\"([^\"]+.m3u8)\" property=\"og:video\" />")
_url_re = re.compile("http(s)?://(\w+\.)?livestation.com")
_csrf_token_schema = validate.Schema(
validate.transform(_csrf_token_re.search),
validate.any(None, validate.get(1))
)
_hls_playlist_schema = validate.Schema(
validate.transform(_hls_playlist_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.url(scheme="http", path=validate.endswith(".m3u8"))
)
)
)
_login_schema = validate.Schema({
"email": validate.text,
validate.optional("errors"): validate.all(
{
"base": [validate.text]
},
validate.get("base"),
)
})
class Livestation(Plugin):
options = PluginOptions({
"email": "",
"password": ""
})
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _authenticate(self, email, password):
csrf_token = http.get(LOGIN_PAGE_URL, schema=_csrf_token_schema)
if not csrf_token:
raise PluginError("Unable to find CSRF token")
data = {
"authenticity_token": csrf_token,
"channel_id": "",
"commit": "Login",
"plan_id": "",
"session[email]": email,
"session[password]": password,
"utf8": "\xE2\x9C\x93", # Check Mark Character
}
res = http.post(LOGIN_POST_URL, data=data, acceptable_status=(200, 422))
result = http.json(res, schema=_login_schema)
errors = result.get("errors")
if errors:
errors = ", ".join(errors)
raise PluginError("Unable to authenticate: {0}".format(errors))
self.logger.info("Successfully logged in as {0}", result["email"])
def _get_streams(self):
login_email = self.options.get("email")
login_password = self.options.get("password")
if login_email and login_password:
self._authenticate(login_email, login_password)
hls_playlist = http.get(self.url, schema=_hls_playlist_schema)
if not hls_playlist:
return
return HLSStream.parse_variant_playlist(self.session, hls_playlist)
__plugin__ = Livestation
| bsd-2-clause |
WenZhuang/pyspider | pyspider/database/base/taskdb.py | 57 | 2609 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-08 10:28:48
# task schema
{
'task': {
'taskid': str, # new, not change
'project': str, # new, not change
'url': str, # new, not change
'status': int, # change
'schedule': {
'priority': int,
'retries': int,
'retried': int,
'exetime': int,
'age': int,
'itag': str,
# 'recrawl': int
}, # new and restart
'fetch': {
'method': str,
'headers': dict,
'data': str,
'timeout': int,
'save': dict,
}, # new and restart
'process': {
'callback': str,
}, # new and restart
'track': {
'fetch': {
'ok': bool,
'time': int,
'status_code': int,
'headers': dict,
'encoding': str,
'content': str,
},
'process': {
'ok': bool,
'time': int,
'follows': int,
'outputs': int,
'logs': str,
'exception': str,
},
'save': object, # jsonable object saved by processor
}, # finish
'lastcrawltime': int, # keep between request
'updatetime': int, # keep between request
}
}
class TaskDB(object):
ACTIVE = 1
SUCCESS = 2
FAILED = 3
BAD = 4
projects = set() # projects in taskdb
def load_tasks(self, status, project=None, fields=None):
raise NotImplementedError
def get_task(self, project, taskid, fields=None):
raise NotImplementedError
def status_count(self, project):
'''
return a dict
'''
raise NotImplementedError
def insert(self, project, taskid, obj={}):
raise NotImplementedError
def update(self, project, taskid, obj={}, **kwargs):
raise NotImplementedError
def drop(self, project):
raise NotImplementedError
@staticmethod
def status_to_string(status):
return {
1: 'ACTIVE',
2: 'SUCCESS',
3: 'FAILED',
4: 'BAD',
}.get(status, 'UNKNOWN')
@staticmethod
def status_to_int(status):
return {
'ACTIVE': 1,
'SUCCESS': 2,
'FAILED': 3,
'BAD': 4,
}.get(status, 4)
| apache-2.0 |
kingvuplus/Gui3 | lib/python/Plugins/SystemPlugins/FastScan/plugin.py | 59 | 13191 | # -*- coding: utf-8 -*-
from os import path as os_path, walk as os_walk, unlink as os_unlink
from Plugins.Plugin import PluginDescriptor
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.config import config, ConfigSelection, ConfigYesNo, getConfigListEntry, ConfigSubsection, ConfigText
from Components.ConfigList import ConfigListScreen
from Components.NimManager import nimmanager
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.ProgressBar import ProgressBar
from Components.ServiceList import refreshServiceList
from Components.ActionMap import ActionMap
from enigma import eFastScan, eDVBFrontendParametersSatellite, eTimer
config.misc.fastscan = ConfigSubsection()
config.misc.fastscan.last_configuration = ConfigText(default = "()")
config.misc.fastscan.auto = ConfigYesNo(default = False)
class FastScanStatus(Screen):
skin = """
<screen position="150,115" size="420,180" title="Fast Scan">
<widget name="frontend" pixmap="icons/scan-s.png" position="5,5" size="64,64" transparent="1" alphatest="on" />
<widget name="scan_state" position="10,120" zPosition="2" size="400,30" font="Regular;18" />
<widget name="scan_progress" position="10,155" size="400,15" pixmap="progress_big.png" borderWidth="2" borderColor="#cccccc" />
</screen>"""
def __init__(self, session, scanTuner=0, transponderParameters=None, scanPid=900, keepNumbers=False, keepSettings=False, providerName='Favorites'):
Screen.__init__(self, session)
self.setTitle(_("Fast Scan"))
self.scanPid = scanPid
self.scanTuner = scanTuner
self.transponderParameters = transponderParameters
self.keepNumbers = keepNumbers
self.keepSettings = keepSettings
self.providerName = providerName
self.isDone = False
self.onClose.append(self.__onClose)
self["frontend"] = Pixmap()
self["scan_progress"] = ProgressBar()
self["scan_state"] = Label(_("scan state"))
if self.session.pipshown:
from Screens.InfoBar import InfoBar
InfoBar.instance and hasattr(InfoBar.instance, "showPiP") and InfoBar.instance.showPiP()
self.prevservice = self.session.nav.getCurrentlyPlayingServiceReference()
self.session.nav.stopService()
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.ok,
"cancel": self.cancel
})
self.onFirstExecBegin.append(self.doServiceScan)
def __onClose(self):
self.scan.scanCompleted.get().remove(self.scanCompleted)
self.scan.scanProgress.get().remove(self.scanProgress)
del self.scan
def doServiceScan(self):
self["scan_state"].setText(_('Scanning %s...') % (self.providerName))
self["scan_progress"].setValue(0)
self.scan = eFastScan(self.scanPid, self.providerName, self.transponderParameters, self.keepNumbers, self.keepSettings)
self.scan.scanCompleted.get().append(self.scanCompleted)
self.scan.scanProgress.get().append(self.scanProgress)
fstfile = None
fntfile = None
for root, dirs, files in os_walk('/tmp/'):
for f in files:
if f.endswith('.bin'):
if '_FST' in f:
fstfile = os_path.join(root, f)
elif '_FNT' in f:
fntfile = os_path.join(root, f)
if fstfile and fntfile:
self.scan.startFile(fntfile, fstfile)
os_unlink(fstfile)
os_unlink(fntfile)
else:
self.scan.start(self.scanTuner)
def scanProgress(self, progress):
self["scan_progress"].setValue(progress)
def scanCompleted(self, result):
self.isDone = True
if result < 0:
self["scan_state"].setText(_('Scanning failed!'))
else:
self["scan_state"].setText(ngettext('List version %d, found %d channel', 'List version %d, found %d channels', result) % (self.scan.getVersion(), result))
def restoreService(self):
if self.prevservice:
self.session.nav.playService(self.prevservice)
def ok(self):
if self.isDone:
self.cancel()
def cancel(self):
if self.isDone:
refreshServiceList()
self.restoreService()
self.close()
class FastScanScreen(ConfigListScreen, Screen):
skin = """
<screen position="100,115" size="520,290" title="Fast Scan">
<widget name="config" position="10,10" size="500,250" scrollbarMode="showOnDemand" />
<widget name="introduction" position="10,265" size="500,25" font="Regular;20" halign="center" />
</screen>"""
providers = [
('Canal Digitaal', (1, 900, True)),
('TV Vlaanderen', (1, 910, True)),
('TéléSAT', (0, 920, True)),
('HD Austria', (0, 950, False)),
('Skylink Czech Republic', (1, 30, False)),
('Skylink Slovak Republic', (1, 31, False)),
('AustriaSat Magyarország Eutelsat 9E', (2, 951, False)),
('AustriaSat Magyarország Astra 3', (1, 951, False)),
('TéléSAT Astra3', (1, 920, True)),
('HD Austria Astra3', (1, 950, False)),
('Canal Digitaal Astra 1', (0, 900, True)),
('TV Vlaanderen Astra 1', (0, 910, True))]
transponders = ((12515000, 22000000, eDVBFrontendParametersSatellite.FEC_5_6, 192,
eDVBFrontendParametersSatellite.Polarisation_Horizontal, eDVBFrontendParametersSatellite.Inversion_Unknown,
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_QPSK,
eDVBFrontendParametersSatellite.RollOff_alpha_0_35, eDVBFrontendParametersSatellite.Pilot_Off),
(12070000, 27500000, eDVBFrontendParametersSatellite.FEC_3_4, 235,
eDVBFrontendParametersSatellite.Polarisation_Horizontal, eDVBFrontendParametersSatellite.Inversion_Unknown,
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_QPSK,
eDVBFrontendParametersSatellite.RollOff_alpha_0_35, eDVBFrontendParametersSatellite.Pilot_Off),
(12074000, 27500000, eDVBFrontendParametersSatellite.FEC_3_4, 90,
eDVBFrontendParametersSatellite.Polarisation_Vertical, eDVBFrontendParametersSatellite.Inversion_Unknown,
eDVBFrontendParametersSatellite.System_DVB_S2, eDVBFrontendParametersSatellite.Modulation_8PSK,
eDVBFrontendParametersSatellite.RollOff_alpha_0_35, eDVBFrontendParametersSatellite.Pilot_On))
def __init__(self, session, nimList):
Screen.__init__(self, session)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keySave,
"cancel": self.keyCancel,
"menu": self.closeRecursive,
}, -2)
providerList = list(x[0] for x in self.providers)
lastConfiguration = eval(config.misc.fastscan.last_configuration.value)
if not lastConfiguration or not tuple(x for x in self.providers if x[0] == lastConfiguration[1]):
lastConfiguration = (nimList[0][0], providerList[0], True, True, False)
self.scan_nims = ConfigSelection(default = lastConfiguration[0], choices = nimList)
self.scan_provider = ConfigSelection(default = lastConfiguration[1], choices = providerList)
self.scan_hd = ConfigYesNo(default = lastConfiguration[2])
self.scan_keepnumbering = ConfigYesNo(default = lastConfiguration[3])
self.scan_keepsettings = ConfigYesNo(default = lastConfiguration[4])
self.list = []
self.tunerEntry = getConfigListEntry(_("Tuner"), self.scan_nims)
self.list.append(self.tunerEntry)
self.scanProvider = getConfigListEntry(_("Provider"), self.scan_provider)
self.list.append(self.scanProvider)
self.scanHD = getConfigListEntry(_("HD list"), self.scan_hd)
self.list.append(self.scanHD)
self.list.append(getConfigListEntry(_("Use fastscan channel numbering"), self.scan_keepnumbering))
self.list.append(getConfigListEntry(_("Use fastscan channel names"), self.scan_keepsettings))
self.list.append(getConfigListEntry(_("Enable auto fast scan"), config.misc.fastscan.auto))
ConfigListScreen.__init__(self, self.list)
self["config"].list = self.list
self["config"].l.setList(self.list)
self.finished_cb = None
self["introduction"] = Label(_("Select your provider, and press OK to start the scan"))
def saveConfiguration(self):
config.misc.fastscan.last_configuration.value = `(self.scan_nims.value, self.scan_provider.value, self.scan_hd.value, self.scan_keepnumbering.value, self.scan_keepsettings.value)`
config.misc.fastscan.save()
def keySave(self):
self.saveConfiguration()
self.close()
def keyGo(self):
self.saveConfiguration()
self.startScan()
def getTransponderParameters(self, number):
transponderParameters = eDVBFrontendParametersSatellite()
transponderParameters.frequency = self.transponders[number][0]
transponderParameters.symbol_rate = self.transponders[number][1]
transponderParameters.fec = self.transponders[number][2]
transponderParameters.orbital_position = self.transponders[number][3]
transponderParameters.polarisation = self.transponders[number][4]
transponderParameters.inversion = self.transponders[number][5]
transponderParameters.system = self.transponders[number][6]
transponderParameters.modulation = self.transponders[number][7]
transponderParameters.rolloff = self.transponders[number][8]
transponderParameters.pilot = self.transponders[number][9]
return transponderParameters
def startScan(self):
parameters = tuple(x[1] for x in self.providers if x[0] == self.scan_provider.value)[0]
pid = parameters[1]
if self.scan_hd.value and parameters[2]:
pid += 1
if self.scan_nims.value:
self.session.open(FastScanStatus, scanTuner = int(self.scan_nims.value),
transponderParameters = self.getTransponderParameters(parameters[0]),
scanPid = pid, keepNumbers = self.scan_keepnumbering.value, keepSettings = self.scan_keepsettings.value,
providerName = self.scan_provider.getText())
def keyCancel(self):
self.close()
class FastScanAutoScreen(FastScanScreen):
def __init__(self, session, lastConfiguration):
print "[AutoFastScan] start"
Screen.__init__(self, session)
self.skinName="Standby"
self["actions"] = ActionMap( [ "StandbyActions" ],
{
"power": self.Power,
"discrete_on": self.Power
}, -1)
self.onClose.append(self.__onClose)
parameters = tuple(x[1] for x in self.providers if x[0] == lastConfiguration[1])
if parameters:
parameters = parameters[0]
pid = parameters[1]
if lastConfiguration[2] and parameters[2]:
pid += 1
self.scan = eFastScan(pid, lastConfiguration[1], self.getTransponderParameters(parameters[0]), lastConfiguration[3], lastConfiguration[4])
self.scan.scanCompleted.get().append(self.scanCompleted)
self.scan.start(int(lastConfiguration[0]))
else:
self.scan = None
self.close(True)
def __onClose(self):
if self.scan:
self.scan.scanCompleted.get().remove(self.scanCompleted)
del self.scan
def scanCompleted(self, result):
print "[AutoFastScan] completed result = ", result
refreshServiceList()
self.close(result>0)
def Power(self):
from Screens.Standby import inStandby
inStandby.Power()
print "[AutoFastScan] aborted due to power button pressed"
self.close(True)
def createSummary(self):
from Screens.Standby import StandbySummary
return StandbySummary
def FastScanMain(session, **kwargs):
if session.nav.RecordTimer.isRecording():
session.open(MessageBox, _("A recording is currently running. Please stop the recording before trying to scan."), MessageBox.TYPE_ERROR)
else:
nimList = []
# collect all nims which are *not* set to "nothing"
for n in nimmanager.nim_slots:
if not n.isCompatible("DVB-S"):
continue
if n.config_mode == "nothing":
continue
if n.config_mode in ("loopthrough", "satposdepends"):
root_id = nimmanager.sec.getRoot(n.slot_id, int(n.config.connectedTo.value))
if n.type == nimmanager.nim_slots[root_id].type: # check if connected from a DVB-S to DVB-S2 Nim or vice versa
continue
nimList.append((str(n.slot), n.friendly_full_description))
if nimList:
session.open(FastScanScreen, nimList)
else:
session.open(MessageBox, _("No suitable sat tuner found!"), MessageBox.TYPE_ERROR)
Session = None
FastScanAutoStartTimer = eTimer()
def restartScanAutoStartTimer(reply=False):
if not reply:
print "[AutoFastScan] Scan was not succesfully retry in one hour"
FastScanAutoStartTimer.startLongTimer(3600)
else:
FastScanAutoStartTimer.startLongTimer(86400)
def FastScanAuto():
lastConfiguration = eval(config.misc.fastscan.last_configuration.value)
if not lastConfiguration or Session.nav.RecordTimer.isRecording():
restartScanAutoStartTimer()
else:
Session.openWithCallback(restartScanAutoStartTimer, FastScanAutoScreen, lastConfiguration)
FastScanAutoStartTimer.callback.append(FastScanAuto)
def leaveStandby():
FastScanAutoStartTimer.stop()
def standbyCountChanged(value):
if config.misc.fastscan.auto.value:
from Screens.Standby import inStandby
inStandby.onClose.append(leaveStandby)
FastScanAutoStartTimer.startLongTimer(90)
def startSession(session, **kwargs):
global Session
Session = session
config.misc.standbyCounter.addNotifier(standbyCountChanged, initial_call=False)
def FastScanStart(menuid, **kwargs):
if menuid == "scan":
return [(_("Fast Scan"), FastScanMain, "fastscan", None)]
else:
return []
def Plugins(**kwargs):
if nimmanager.hasNimType("DVB-S"):
return [PluginDescriptor(name=_("Fast Scan"), description="Scan Dutch/Belgian sat provider", where = PluginDescriptor.WHERE_MENU, fnc=FastScanStart),
PluginDescriptor(where=[PluginDescriptor.WHERE_SESSIONSTART], fnc=startSession)]
else:
return []
| gpl-2.0 |
hkariti/ansible | lib/ansible/modules/network/nxos/nxos_linkagg.py | 2 | 12721 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_linkagg
extends_documentation_fragment: nxos
version_added: "2.5"
short_description: Manage link aggregation groups on Cisco NXOS devices.
description:
- This module provides declarative management of link aggregation groups
on Cisco NXOS devices.
author:
- Trishna Guha (@trishnaguha)
notes:
- Tested against NXOSv 7.0(3)I5(1).
- C(state=absent) removes the portchannel config and interface if it
already exists. If members to be removed are not explicitly
passed, all existing members (if any), are removed.
- Members must be a list.
- LACP needs to be enabled first if active/passive modes are used.
options:
group:
description:
- Channel-group number for the port-channel
Link aggregation group.
required: true
mode:
description:
- Mode for the link aggregation group.
required: false
default: on
choices: ['active','passive','on']
min_links:
description:
- Minimum number of ports required up
before bringing up the link aggregation group.
required: false
default: null
members:
description:
- List of interfaces that will be managed in the link aggregation group.
required: false
default: null
force:
description:
- When true it forces link aggregation group members to match what
is declared in the members param. This can be used to remove members.
required: false
choices: [True, False]
default: False
aggregate:
description: List of link aggregation definitions.
state:
description:
- State of the link aggregation group.
required: false
default: present
choices: ['present','absent']
purge:
description:
- Purge links not defined in the I(aggregate) parameter.
default: no
"""
EXAMPLES = """
- name: create link aggregation group
nxos_linkagg:
group: 99
state: present
- name: delete link aggregation group
nxos_linkagg:
group: 99
state: absent
- name: set link aggregation group to members
nxos_linkagg:
group: 10
min_links: 3
mode: active
members:
- Ethernet1/2
- Ethernet1/4
- name: remove link aggregation group from Ethernet1/2
nxos_linkagg:
group: 10
min_links: 3
mode: active
members:
- Ethernet1/4
- name: Create aggregate of linkagg definitions
nxos_linkagg:
aggregate:
- { group: 3 }
- { group: 100, min_links: 3 }
- name: Remove aggregate of linkagg definitions
nxos_linkagg:
aggregate:
- { group: 3 }
- { group: 100, min_links: 3 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface port-channel 30
- lacp min-links 5
- interface Ethernet2/1
- channel-group 30 mode active
- no interface port-channel 30
"""
import re
from copy import deepcopy
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
def execute_show_command(command, module):
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
if network_api == 'cliconf':
if 'show port-channel summary' in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif network_api == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def search_obj_in_list(group, lst):
for o in lst:
if o['group'] == group:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
force = module.params['force']
for w in want:
group = w['group']
mode = w['mode']
min_links = w['min_links']
members = w.get('members') or []
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(group, have)
if state == 'absent':
if obj_in_have:
members_to_remove = list(set(obj_in_have['members']) - set(members))
if members_to_remove:
for m in members_to_remove:
commands.append('interface {0}'.format(m))
commands.append('no channel-group {0}'.format(obj_in_have['group']))
commands.append('exit')
commands.append('no interface port-channel {0}'.format(group))
elif state == 'present':
if not obj_in_have:
commands.append('interface port-channel {0}'.format(group))
if min_links != 'None':
commands.append('lacp min-links {0}'.format(min_links))
commands.append('exit')
if members:
for m in members:
commands.append('interface {0}'.format(m))
if force:
commands.append('channel-group {0} force mode {1}'.format(group, mode))
else:
commands.append('channel-group {0} mode {1}'.format(group, mode))
else:
if members:
if not obj_in_have['members']:
for m in members:
commands.append('interface port-channel {0}'.format(group))
commands.append('exit')
commands.append('interface {0}'.format(m))
if force:
commands.append('channel-group {0} force mode {1}'.format(group, mode))
else:
commands.append('channel-group {0} mode {1}'.format(group, mode))
elif set(members) != set(obj_in_have['members']):
missing_members = list(set(members) - set(obj_in_have['members']))
for m in missing_members:
commands.append('interface port-channel {0}'.format(group))
commands.append('exit')
commands.append('interface {0}'.format(m))
if force:
commands.append('channel-group {0} force mode {1}'.format(group, mode))
else:
commands.append('channel-group {0} mode {1}'.format(group, mode))
superfluous_members = list(set(obj_in_have['members']) - set(members))
for m in superfluous_members:
commands.append('interface port-channel {0}'.format(group))
commands.append('exit')
commands.append('interface {0}'.format(m))
commands.append('no channel-group {0}'.format(group))
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['group'], want)
if not obj_in_want:
commands.append('no interface port-channel {0}'.format(h['group']))
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['group'] = str(d['group'])
d['min_links'] = str(d['min_links'])
obj.append(d)
else:
obj.append({
'group': str(module.params['group']),
'mode': module.params['mode'],
'min_links': str(module.params['min_links']),
'members': module.params['members'],
'state': module.params['state']
})
return obj
def parse_min_links(module, group):
min_links = None
flags = ['| section interface.port-channel{0}'.format(group)]
config = get_config(module, flags=flags)
match = re.search(r'lacp min-links (\S+)', config, re.M)
if match:
min_links = match.group(1)
return min_links
def parse_mode(module, m):
mode = None
flags = ['| section interface.{0}'.format(m)]
config = get_config(module, flags=flags)
match = re.search(r'mode (\S+)', config, re.M)
if match:
mode = match.group(1)
return mode
def get_members(channel):
members = []
if 'TABLE_member' in channel.keys():
interfaces = channel['TABLE_member']['ROW_member']
else:
return list()
if isinstance(interfaces, dict):
members.append(interfaces.get('port'))
elif isinstance(interfaces, list):
for i in interfaces:
members.append(i.get('port'))
return members
def parse_members(output, group):
channels = output['TABLE_channel']['ROW_channel']
if isinstance(channels, list):
for channel in channels:
if channel['group'] == group:
members = get_members(channel)
elif isinstance(channels, dict):
if channels['group'] == group:
members = get_members(channels)
else:
return list()
return members
def parse_channel_options(module, output, channel):
obj = {}
group = channel['group']
obj['group'] = group
obj['min-links'] = parse_min_links(module, group)
members = parse_members(output, group)
obj['members'] = members
for m in members:
obj['mode'] = parse_mode(module, m)
return obj
def map_config_to_obj(module):
objs = list()
output = execute_show_command('show port-channel summary', module)[0]
if not output:
return list()
try:
channels = output['TABLE_channel']['ROW_channel']
except KeyError:
return objs
if channels:
if isinstance(channels, list):
for channel in channels:
obj = parse_channel_options(module, output, channel)
objs.append(obj)
elif isinstance(channels, dict):
obj = parse_channel_options(module, output, channels)
objs.append(obj)
return objs
def main():
""" main entry point for module execution
"""
element_spec = dict(
group=dict(type='int'),
mode=dict(required=False, choices=['on', 'active', 'passive'], default='on', type='str'),
min_links=dict(required=False, default=None, type='int'),
members=dict(required=False, default=None, type='list'),
force=dict(required=False, default=False, type='bool'),
state=dict(required=False, choices=['absent', 'present'], default='present')
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['group'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
required_one_of = [['group', 'aggregate']]
mutually_exclusive = [['group', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kosgroup/odoo | odoo/service/server.py | 5 | 36250 | #-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import random
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest
import werkzeug.serving
from werkzeug.debug import DebuggedApplication
if os.name == 'posix':
# Unix only for workers
import fcntl
import resource
import psutil
else:
# Windows shim
signal.SIGHUP = -1
# Optional process names for workers
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import odoo
from odoo.modules.registry import Registry
from odoo.release import nt_service_name
import odoo.tools.config as config
from odoo.tools import stripped_sys_argv, dumpstacks, log_ormcache_stats
_logger = logging.getLogger(__name__)
try:
import watchdog
from watchdog.observers import Observer
from watchdog.events import FileCreatedEvent, FileModifiedEvent, FileMovedEvent
except ImportError:
watchdog = None
SLEEP_INTERVAL = 60 # 1 min
def memory_info(process):
""" psutil < 2.0 does not have memory_info, >= 3.0 does not have
get_memory_info """
pmem = (getattr(process, 'memory_info', None) or process.get_memory_info)()
return (pmem.rss, pmem.vms)
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class LoggingBaseWSGIServerMixIn(object):
def handle_error(self, request, client_address):
t, e, _ = sys.exc_info()
if t == socket.error and e.errno == errno.EPIPE:
# broken pipe, ignore error
return
_logger.exception('Exception happened during processing of request from %s', client_address)
class BaseWSGIServerNoBind(LoggingBaseWSGIServerMixIn, werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'odoo.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(LoggingBaseWSGIServerMixIn, werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# FileSystem Watcher for autoreload and cache invalidation
#----------------------------------------------------------
class FSWatcher(object):
def __init__(self):
self.observer = Observer()
for path in odoo.modules.module.ad_paths:
_logger.info('Watching addons folder %s', path)
self.observer.schedule(self, path, recursive=True)
def dispatch(self, event):
if isinstance(event, (FileCreatedEvent, FileModifiedEvent, FileMovedEvent)):
if not event.is_directory:
path = getattr(event, 'dest_path', event.src_path)
if path.endswith('.py'):
try:
source = open(path, 'rb').read() + '\n'
compile(source, path, 'exec')
except SyntaxError:
_logger.error('autoreload: python code change detected, SyntaxError in %s', path)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def start(self):
self.observer.start()
_logger.info('AutoReload watcher running')
def stop(self):
self.observer.stop()
self.observer.join()
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
if e.errno == errno.EBADF:
# Werkzeug > 0.9.6 closes the socket itself (see commit
# https://github.com/mitsuhiko/werkzeug/commit/4d8ca089)
return
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
odoo.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = odoo.modules.registry.Registry.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.iteritems():
while registry.ready:
acquired = odoo.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(odoo.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="odoo.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="odoo.service.httpd")
t.setDaemon(True)
t.start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self, stop=False):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if test_mode or (config['xmlrpc'] and not stop):
# some tests need the http deamon to be available...
self.http_spawn()
if not stop:
# only relevant if we are not in "--stop-after-init" mode
self.cron_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
if self.httpd:
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
odoo.modules.registry.Registry.delete_all()
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
try:
self.httpd.serve_forever()
except:
_logger.exception("Evented Service (longpolling): uncaught error during main loop")
raise
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = config['xmlrpc'] and \
(config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
self.cron_timeout = config['limit_time_real_cron'] or None
if self.cron_timeout == -1:
self.cron_timeout = self.timeout
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = [sys.executable, sys.argv[0], 'gevent'] + nargs[1:]
popen = subprocess.Popen(cmd)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid == self.long_polling_pid:
self.long_polling_pid = None
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
odoo.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGUSR1:
# log ormcache stats on kill -SIGUSR1
log_ormcache_stats()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("%s (%s) timeout after %ss",
worker.__class__.__name__,
pid,
worker.watchdog_timeout)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
if config['xmlrpc']:
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
if not self.long_polling_pid:
self.long_polling_spawn()
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
if self.address:
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGINT)
while self.workers and time.time() < limit:
try:
self.process_signals()
except KeyboardInterrupt:
_logger.info("Forced shutdown.")
break
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
if self.socket:
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
odoo.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception, e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('odoo: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = memory_info(psutil.Process(os.getpid()))
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', self.pid, config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
if self.multi.socket:
# Prevent fd inheritance: close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s, registry count: %s.",
self.pid, self.request_count,
len(odoo.modules.registry.Registry.registries))
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.settimeout(0.5)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
self.watchdog_timeout = multi.cron_timeout # Use a distinct value for CRON Worker
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = odoo.service.db.list_dbs(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('odoo.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = memory_info(psutil.Process(os.getpid()))
import odoo.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
odoo.modules.registry.Registry.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
odoo.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
vms_diff = (end_vms - start_vms) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
if self.multi.socket:
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in odoo.conf.server_wide_modules:
try:
odoo.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if odoo.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
if updated_modules:
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def load_test_file_yml(registry, test_file):
with registry.cursor() as cr:
odoo.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
if config['test_commit']:
_logger.info('test %s has been commited', test_file)
cr.commit()
else:
_logger.info('test %s has been rollbacked', test_file)
cr.rollback()
def load_test_file_py(registry, test_file):
# Locate python module based on its filename and run the tests
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod_name, mod_mod in sys.modules.items():
if mod_mod:
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest.TestSuite()
for t in unittest.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
stream = odoo.modules.module.TestStream()
result = unittest.TextTestRunner(verbosity=2, stream=stream).run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
config = odoo.tools.config
test_file = config['test_file']
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = Registry.new(dbname, update_module=update_module)
# run test_file if provided
if test_file:
_logger.info('loading test file %s', test_file)
with odoo.api.Environment.manage():
if test_file.endswith('yml'):
load_test_file_yml(registry, test_file)
elif test_file.endswith('py'):
load_test_file_py(registry, test_file)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the odoo http server and cron processor.
"""
global server
load_server_wide_modules()
if odoo.evented:
server = GeventServer(odoo.service.wsgi_server.application)
elif config['workers']:
server = PreforkServer(odoo.service.wsgi_server.application)
else:
server = ThreadedServer(odoo.service.wsgi_server.application)
watcher = None
if 'reload' in config['dev_mode']:
if watchdog:
watcher = FSWatcher()
watcher.start()
else:
_logger.warning("'watchdog' module not installed. Code autoreload feature is disabled")
if 'werkzeug' in config['dev_mode']:
server.app = DebuggedApplication(server.app, evalex=True)
rc = server.run(preload, stop)
# like the legend of the phoenix, all ends with beginnings
if getattr(odoo, 'phoenix', False):
if watcher:
watcher.stop()
_reexec()
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
| gpl-3.0 |
Jannes123/inasafe | safe/storage/layer.py | 7 | 3762 | """**Class Layer**
"""
from safe.common.utilities import verify
from projection import Projection
class Layer(object):
"""Common class for geospatial layers
"""
def __init__(self, name=None, projection=None,
keywords=None, style_info=None,
sublayer=None):
"""Common constructor for all types of layers
See docstrings for class Raster and class Vector for details.
"""
# Name
msg = ('Specified name must be a string or None. '
'I got %s with type %s' % (name, str(type(name))[1:-1]))
verify(isinstance(name, basestring) or name is None, msg)
self.name = name
# Projection
self.projection = Projection(projection)
# Keywords
if keywords is None:
self.keywords = {}
else:
msg = ('Specified keywords must be either None or a '
'dictionary. I got %s' % keywords)
verify(isinstance(keywords, dict), msg)
self.keywords = keywords
# Style info
if style_info is None:
self.style_info = {}
else:
msg = ('Specified style_info must be either None or a '
'dictionary. I got %s' % style_info)
verify(isinstance(style_info, dict), msg)
self.style_info = style_info
# Defaults
self.sublayer = sublayer
self.filename = None
self.data = None
def __ne__(self, other):
"""Override '!=' to allow comparison with other projection objecs
"""
return not self == other
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_filename(self):
return self.filename
def get_projection(self, proj4=False):
"""Return projection of this layer as a string
"""
return self.projection.get_projection(proj4)
def get_keywords(self, key=None):
"""Return a copy of the keywords dictionary
Args:
* key (optional): If specified value will be returned for key only
"""
if key is None:
return self.keywords.copy()
else:
if key in self.keywords:
return self.keywords[key]
else:
msg = ('Keyword %s does not exist in %s: Options are '
'%s' % (key, self.get_name(), self.keywords.keys()))
raise Exception(msg)
def get_style_info(self):
"""Return style_info dictionary
"""
return self.style_info
def get_impact_summary(self):
"""Return 'impact_summary' keyword if present. Otherwise ''.
"""
if 'impact_summary' in self.keywords:
return self.keywords['impact_summary']
else:
return ''
def get_total_needs(self):
"""Return 'total_needs' keyword if present. Otherwise ''.
"""
if 'total_needs' in self.keywords:
return self.keywords['total_needs']
else:
return ''
def get_style_type(self):
"""Return style type of a layer. If not found, return None
"""
if self.style_info is None:
return None
return self.style_info.get('style_type', None)
# Layer properties used to identify their types
@property
def is_inasafe_spatial_object(self):
return True
@property
def is_raster(self):
if 'Raster' in str(self.__class__):
return True
else:
return False
@property
def is_vector(self):
if 'Vector' in str(self.__class__):
return True
else:
return False
| gpl-3.0 |
eloquence/unisubs | apps/teams/models.py | 1 | 128528 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from collections import defaultdict
from itertools import groupby
from math import ceil
import csv
import datetime
import logging
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.files import File
from django.db import models
from django.db.models import query, Q
from django.db.models.signals import post_save, post_delete, pre_delete
from django.http import Http404
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _, ugettext
from haystack import site
from haystack.query import SQ
import teams.moderation_const as MODERATION
from caching import ModelCacheManager
from comments.models import Comment
from auth.models import UserLanguage, CustomUser as User
from auth.providers import get_authentication_provider
from messages import tasks as notifier
from subtitles import shims
from subtitles.signals import language_deleted
from teams.moderation_const import WAITING_MODERATION, UNMODERATED, APPROVED
from teams.permissions_const import (
TEAM_PERMISSIONS, PROJECT_PERMISSIONS, ROLE_OWNER, ROLE_ADMIN, ROLE_MANAGER,
ROLE_CONTRIBUTOR
)
from teams import tasks
from teams import workflows
from teams.signals import api_subtitles_approved, api_subtitles_rejected
from utils import DEFAULT_PROTOCOL
from utils import translation
from utils.amazon import S3EnabledImageField, S3EnabledFileField
from utils.panslugify import pan_slugify
from utils.searching import get_terms
from utils.text import fmt
from videos.models import (Video, VideoUrl, SubtitleVersion, SubtitleLanguage,
Action)
from videos.tasks import video_changed_tasks
from subtitles.models import (
SubtitleVersion as NewSubtitleVersion,
SubtitleLanguage as NewSubtitleLanguage,
SubtitleNoteBase,
ORIGIN_IMPORTED
)
from subtitles import pipeline
from functools import partial
logger = logging.getLogger(__name__)
celery_logger = logging.getLogger('celery.task')
BILLING_CUTOFF = getattr(settings, 'BILLING_CUTOFF', None)
# Teams
class TeamQuerySet(query.QuerySet):
def add_members_count(self):
"""Add _members_count field to this query
This can be used to order/filter the query and also avoids a query in
when Team.members_count() is called.
"""
select = {
'_members_count': (
'SELECT COUNT(1) '
'FROM teams_teammember tm '
'WHERE tm.team_id=teams_team.id'
)
}
return self.extra(select=select)
def add_videos_count(self):
"""Add _videos_count field to this query
This can be used to order/filter the query and also avoids a query in
when Team.video_count() is called.
"""
select = {
'_videos_count': (
'SELECT COUNT(1) '
'FROM teams_teamvideo tv '
'WHERE tv.team_id=teams_team.id'
)
}
return self.extra(select=select)
def add_user_is_member(self, user):
"""Add user_is_member field to this query """
if not user.is_authenticated():
return self.extra(select={'user_is_member': 0})
select = {
'user_is_member': (
'EXISTS (SELECT 1 '
'FROM teams_teammember tm '
'WHERE tm.team_id=teams_team.id '
'AND tm.user_id=%s)'
)
}
return self.extra(select=select, select_params=[user.id])
class TeamManager(models.Manager):
def get_query_set(self):
"""Return a QS of all non-deleted teams."""
return TeamQuerySet(Team).filter(deleted=False)
def for_user(self, user, exclude_private=False):
"""Return the teams visible for the given user.
If exclude_private is True, then we will exclude private teams, even
if the user can apply to them.
"""
# policies where we should show the team, even if they're not visible
visible_policies = [Team.OPEN, Team.APPLICATION]
q = models.Q(is_visible=True)
if not exclude_private:
q |= models.Q(membership_policy__in=visible_policies)
if user.is_authenticated():
user_teams = TeamMember.objects.filter(user=user)
q |= models.Q(id__in=user_teams.values('team_id'))
return self.get_query_set().filter(q)
def with_recent_billing_record(self, day_range):
"""Find teams that have had a new video recently"""
start_date = (datetime.datetime.now() -
datetime.timedelta(days=day_range))
team_ids = list(BillingRecord.objects
.order_by()
.filter(created__gt=start_date)
.values_list('team_id', flat=True)
.distinct())
return Team.objects.filter(id__in=team_ids)
def needs_new_video_notification(self, notify_interval):
return (self.filter(
notify_interval=notify_interval,
teamvideo__created__gt=models.F('last_notification_time'))
.distinct())
class Team(models.Model):
APPLICATION = 1
INVITATION_BY_MANAGER = 2
INVITATION_BY_ALL = 3
OPEN = 4
INVITATION_BY_ADMIN = 5
MEMBERSHIP_POLICY_CHOICES = (
(OPEN, _(u'Open')),
(APPLICATION, _(u'Application')),
(INVITATION_BY_ALL, _(u'Invitation by any team member')),
(INVITATION_BY_MANAGER, _(u'Invitation by manager')),
(INVITATION_BY_ADMIN, _(u'Invitation by admin')),
)
VP_MEMBER = 1
VP_MANAGER = 2
VP_ADMIN = 3
VIDEO_POLICY_CHOICES = (
(VP_MEMBER, _(u'Any team member')),
(VP_MANAGER, _(u'Managers and admins')),
(VP_ADMIN, _(u'Admins only'))
)
TASK_ASSIGN_CHOICES = (
(10, 'Any team member'),
(20, 'Managers and admins'),
(30, 'Admins only'),
)
TASK_ASSIGN_NAMES = dict(TASK_ASSIGN_CHOICES)
TASK_ASSIGN_IDS = dict([choice[::-1] for choice in TASK_ASSIGN_CHOICES])
SUBTITLE_CHOICES = (
(10, 'Anyone'),
(20, 'Any team member'),
(30, 'Only managers and admins'),
(40, 'Only admins'),
)
SUBTITLE_NAMES = dict(SUBTITLE_CHOICES)
SUBTITLE_IDS = dict([choice[::-1] for choice in SUBTITLE_CHOICES])
NOTIFY_DAILY = 'D'
NOTIFY_HOURLY = 'H'
NOTIFY_INTERVAL_CHOICES = (
(NOTIFY_DAILY, _('Daily')),
(NOTIFY_HOURLY, _('Hourly')),
)
name = models.CharField(_(u'name'), max_length=250, unique=True)
slug = models.SlugField(_(u'slug'), unique=True)
description = models.TextField(_(u'description'), blank=True, help_text=_('All urls will be converted to links. Line breaks and HTML not supported.'))
logo = S3EnabledImageField(verbose_name=_(u'logo'), blank=True,
upload_to='teams/logo/',
default='',
thumb_sizes=[(280, 100), (100, 100)])
square_logo = S3EnabledImageField(verbose_name=_(u'square logo'),
blank=True,
default='',
upload_to='teams/square-logo/',
thumb_sizes=[(100, 100), (48, 48)])
is_visible = models.BooleanField(_(u'videos public?'), default=True)
videos = models.ManyToManyField(Video, through='TeamVideo', verbose_name=_('videos'))
users = models.ManyToManyField(User, through='TeamMember', related_name='teams', verbose_name=_('users'))
points = models.IntegerField(default=0, editable=False)
applicants = models.ManyToManyField(User, through='Application', related_name='applicated_teams', verbose_name=_('applicants'))
created = models.DateTimeField(auto_now_add=True)
highlight = models.BooleanField(default=False)
video = models.ForeignKey(Video, null=True, blank=True, related_name='intro_for_teams', verbose_name=_(u'Intro Video'))
application_text = models.TextField(blank=True)
page_content = models.TextField(_(u'Page content'), blank=True, help_text=_(u'You can use markdown. This will replace Description.'))
is_moderated = models.BooleanField(default=False)
header_html_text = models.TextField(blank=True, default='', help_text=_(u"HTML that appears at the top of the teams page."))
last_notification_time = models.DateTimeField(editable=False, default=datetime.datetime.now)
notify_interval = models.CharField(max_length=1,
choices=NOTIFY_INTERVAL_CHOICES,
default=NOTIFY_DAILY)
auth_provider_code = models.CharField(_(u'authentication provider code'),
max_length=24, blank=True, default="")
# code value from one the TeamWorkflow subclasses
# Since other apps can add workflow types, let's use this system to avoid
# conflicts:
# - Core types are defined in the teams app and 1 char long
# - Extention types are defined on other apps. They are 2 chars long,
# with the first one being unique to the app.
workflow_type = models.CharField(max_length=2, default='O')
# Enabling Features
projects_enabled = models.BooleanField(default=False)
# Deprecated field that enables the tasks workflow
workflow_enabled = models.BooleanField(default=False)
# Policies and Permissions
membership_policy = models.IntegerField(_(u'membership policy'),
choices=MEMBERSHIP_POLICY_CHOICES,
default=OPEN)
video_policy = models.IntegerField(_(u'video policy'),
choices=VIDEO_POLICY_CHOICES,
default=VP_MEMBER)
# The values below here are mostly specific to the tasks workflow and will
# probably be deleted.
task_assign_policy = models.IntegerField(_(u'task assignment policy'),
choices=TASK_ASSIGN_CHOICES,
default=TASK_ASSIGN_IDS['Any team member'])
subtitle_policy = models.IntegerField(_(u'subtitling policy'),
choices=SUBTITLE_CHOICES,
default=SUBTITLE_IDS['Anyone'])
translate_policy = models.IntegerField(_(u'translation policy'),
choices=SUBTITLE_CHOICES,
default=SUBTITLE_IDS['Anyone'])
max_tasks_per_member = models.PositiveIntegerField(_(u'maximum tasks per member'),
default=None, null=True, blank=True)
task_expiration = models.PositiveIntegerField(_(u'task expiration (days)'),
default=None, null=True, blank=True)
deleted = models.BooleanField(default=False)
partner = models.ForeignKey('Partner', null=True, blank=True,
related_name='teams')
objects = TeamManager()
all_objects = models.Manager() # For accessing deleted teams, if necessary.
cache = ModelCacheManager()
class Meta:
ordering = ['name']
verbose_name = _(u'Team')
verbose_name_plural = _(u'Teams')
def __init__(self, *args, **kwargs):
models.Model.__init__(self, *args, **kwargs)
self._member_cache = {}
def save(self, *args, **kwargs):
creating = self.pk is None
super(Team, self).save(*args, **kwargs)
self.cache.invalidate()
if creating:
# create a default project
self.default_project
# setup our workflow
self.new_workflow.setup_team()
def __unicode__(self):
return self.name or self.slug
def is_tasks_team(self):
return self.workflow_enabled
@property
def new_workflow(self):
if not hasattr(self, '_new_workflow'):
self._new_workflow = workflows.TeamWorkflow.get_workflow(self)
return self._new_workflow
def is_old_style(self):
return self.workflow_type == "O"
def get_tasks_page_url(self):
return reverse('teams:team_tasks', kwargs={
'slug': self.slug,
})
def languages(self, members_joined_since=None):
"""Returns the languages spoken by the member of the team
"""
if members_joined_since:
users = self.members_since(members_joined_since)
else:
users = self.users.all()
return UserLanguage.objects.filter(user__in=users).values_list('language', flat=True)
def active_users(self, since=None, published=True):
sv = NewSubtitleVersion.objects.filter(video__in=self.videos.all())
if published:
sv = sv.filter(Q(visibility_override='public') | Q(visibility='public'))
if since:
sv = sv.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=since))
return sv.exclude(author__username="anonymous").values_list('author', 'subtitle_language')
def get_default_message(self, name):
return fmt(Setting.MESSAGE_DEFAULTS.get(name, ''), team=self)
def get_messages(self, names):
"""Fetch messages from the settings objects
This method fetches the messages assocated with names and interpolates
them to replace %(team)s with the team name.
Returns:
dict mapping names to message text
"""
messages = {
name: self.get_default_message(name)
for name in names
}
for setting in self.settings.with_names(names):
if setting.data:
messages[setting.key_name] = setting.data
return messages
def render_message(self, msg):
"""Return a string of HTML represention a team header for a notification.
TODO: Get this out of the model and into a templatetag or something.
"""
author_page = msg.author.get_absolute_url() if msg.author else ''
context = {
'team': self,
'msg': msg,
'author': msg.author,
'author_page': author_page,
'team_page': self.get_absolute_url(),
"STATIC_URL": settings.STATIC_URL,
}
return render_to_string('teams/_team_message.html', context)
def is_open(self):
"""Return whether this team's membership is open to the public."""
return self.membership_policy == self.OPEN
def is_by_application(self):
"""Return whether this team's membership is by application only."""
return self.membership_policy == self.APPLICATION
def get_workflow(self):
"""Return the workflow for the given team.
A workflow will always be returned. If one isn't specified for the team
a default (unsaved) one will be populated with default values and
returned.
TODO: Refactor this behaviour into something less confusing.
"""
return Workflow.get_for_target(self.id, 'team')
@property
def auth_provider(self):
"""Return the authentication provider class for this Team, or None.
No DB queries are used, so this is safe to call many times.
"""
if not self.auth_provider_code:
return None
else:
return get_authentication_provider(self.auth_provider_code)
# Thumbnails
def logo_thumbnail(self):
"""URL for a kind-of small version of this team's logo, or None."""
if self.logo:
return self.logo.thumb_url(100, 100)
def logo_thumbnail_medium(self):
"""URL for a medium version of this team's logo, or None."""
if self.logo:
return self.logo.thumb_url(280, 100)
def square_logo_thumbnail(self):
"""URL for this team's square logo, or None."""
if self.square_logo:
return self.square_logo.thumb_url(100, 100)
def square_logo_thumbnail_small(self):
"""URL for a small version of this team's square logo, or None."""
if self.square_logo:
return self.square_logo.thumb_url(48, 48)
# URLs
@models.permalink
def get_absolute_url(self):
return ('teams:dashboard', [self.slug])
def get_site_url(self):
"""Return the full, absolute URL for this team, including http:// and the domain."""
return '%s://%s%s' % (DEFAULT_PROTOCOL,
Site.objects.get_current().domain,
self.get_absolute_url())
# Membership and roles
def get_member(self, user):
"""Get a TeamMember object for a user or None."""
if not user.is_authenticated():
return None
if user.id in self._member_cache:
return self._member_cache[user.id]
try:
member = self.members.get(user=user)
except TeamMember.DoesNotExist:
member = None
self._member_cache[user.id] = member
return member
def user_is_member(self, user):
members = self.cache.get('members')
if members is None:
members = list(self.members.values_list('user_id', flat=True))
self.cache.set('members', members)
return user.id in members
def uncache_member(self, user):
try:
del self._member_cache[user.id]
except KeyError:
pass
def user_can_view_videos(self, user):
return self.is_visible or self.user_is_member(user)
def _is_role(self, user, role=None):
"""Return whether the given user has the given role in this team.
Safe to use with null or unauthenticated users.
If no role is given, simply return whether the user is a member of this team at all.
TODO: Change this to use the stuff in teams.permissions.
"""
if not user or not user.is_authenticated():
return False
qs = self.members.filter(user=user)
if role:
qs = qs.filter(role=role)
return qs.exists()
def can_bulk_approve(self, user):
return self.is_owner(user) or self.is_admin(user)
def is_owner(self, user):
"""
Return whether the given user is an owner of this team.
"""
return self._is_role(user, TeamMember.ROLE_OWNER)
def is_admin(self, user):
"""Return whether the given user is an admin of this team."""
return self._is_role(user, TeamMember.ROLE_ADMIN)
def is_manager(self, user):
"""Return whether the given user is a manager of this team."""
return self._is_role(user, TeamMember.ROLE_MANAGER)
def is_member(self, user):
"""Return whether the given user is a member of this team."""
return self._is_role(user)
def is_contributor(self, user, authenticated=True):
"""Return whether the given user is a contributor of this team, False otherwise."""
return self._is_role(user, TeamMember.ROLE_CONTRIBUTOR)
def can_see_video(self, user, team_video=None):
"""I have no idea.
TODO: Figure out what this thing is, and if it's still necessary.
"""
if not user.is_authenticated():
return False
return self.is_member(user)
def fetch_video_actions(self, video_language=None):
"""Fetch the Action objects for this team's videos
Args:
video_language: only actions for videos with this
primary_audio_language_code
"""
video_q = TeamVideo.objects.filter(team=self).values_list('video_id')
if video_language is not None:
video_q = video_q.filter(
video__primary_audio_language_code=video_language)
return Action.objects.filter(video_id__in=video_q)
# moderation
# Moderation
def moderates_videos(self):
"""Return whether this team moderates videos in some way, False otherwise.
Moderation means the team restricts who can create subtitles and/or
translations.
"""
if self.subtitle_policy != Team.SUBTITLE_IDS['Anyone']:
return True
if self.translate_policy != Team.SUBTITLE_IDS['Anyone']:
return True
return False
def video_is_moderated_by_team(self, video):
"""Return whether this team moderates the given video."""
return video.moderated_by == self
# Item counts
@property
def members_count(self):
"""Return the number of members of this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_members_count'):
setattr(self, '_members_count', self.users.count())
return self._members_count
def members_count_since(self, joined_since):
"""Return the number of members of this team who joined the last n days.
"""
return self.users.filter(date_joined__gt=datetime.datetime.now() - datetime.timedelta(days=joined_since)).count()
def members_since(self, joined_since):
""" Returns the members who joined the team the last n days
"""
return self.users.filter(date_joined__gt=datetime.datetime.now() - datetime.timedelta(days=joined_since))
@property
def videos_count(self):
"""Return the number of videos of this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_videos_count'):
setattr(self, '_videos_count', self.teamvideo_set.count())
return self._videos_count
def videos_count_since(self, added_since = None):
"""Return the number of videos of this team added the last n days.
"""
return self.teamvideo_set.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=added_since)).count()
def videos_since(self, added_since):
"""Returns the videos of this team added the last n days.
"""
return self.videos.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=added_since))
def unassigned_tasks(self, sort=None):
qs = Task.objects.filter(team=self, deleted=False, completed=None, assignee=None, type=Task.TYPE_IDS['Approve'])
if sort is not None:
qs = qs.order_by(sort)
return qs
def get_task(self, task_pk):
return Task.objects.get(pk=task_pk)
def get_tasks(self, task_pks):
return Task.objects.filter(pk__in=task_pks).select_related('new_subtitle_version', 'new_subtitle_version__subtitle_language', 'team_video', 'team_video__video', 'team_video__video__teamvideo', 'workflow')
def _count_tasks(self):
qs = Task.objects.filter(team=self, deleted=False, completed=None)
# quick, check, are there more than 1000 tasks, if so return 1001, and
# let the UI display > 1000
if qs[1000:1001].exists():
return 1001
else:
return qs.count()
@property
def tasks_count(self):
"""Return the number of incomplete, undeleted tasks of this team.
Caches the result in-object for performance.
Note: the count is capped at 1001 tasks. If a team has more than
that, we generally just want to display "> 1000". Use
get_tasks_count_display() to do that.
"""
if not hasattr(self, '_tasks_count'):
setattr(self, '_tasks_count', self._count_tasks())
return self._tasks_count
def get_tasks_count_display(self):
"""Get a string to display for our tasks count."""
if self.tasks_count <= 1000:
return unicode(self.tasks_count)
else:
return ugettext('> 1000')
# Applications (people applying to join)
def application_message(self):
"""Return the membership application message for this team, or '' if none exists."""
try:
return self.settings.get(key=Setting.KEY_IDS['messages_application']).data
except Setting.DoesNotExist:
return ''
@property
def applications_count(self):
"""Return the number of open membership applications to this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_applications_count'):
setattr(self, '_applications_count', self.applications.count())
return self._applications_count
# Language pairs
def _lang_pair(self, lp, suffix):
return SQ(content="{0}_{1}_{2}".format(lp[0], lp[1], suffix))
def get_videos_for_languages_haystack(self, language=None,
num_completed_langs=None,
project=None, user=None, query=None,
sort=None, exclude_language=None):
qs = self.get_videos_for_user(user)
if project:
qs = qs.filter(project_pk=project.pk)
if query:
for term in get_terms(query):
qs = qs.auto_query(qs.query.clean(term).decode('utf-8'))
if language:
qs = qs.filter(video_completed_langs=language)
if exclude_language:
qs = qs.exclude(video_completed_langs=exclude_language)
if num_completed_langs is not None:
qs = qs.filter(num_completed_langs=num_completed_langs)
qs = qs.order_by({
'name': 'video_title_exact',
'-name': '-video_title_exact',
'subs': 'num_completed_langs',
'-subs': '-num_completed_langs',
'time': 'team_video_create_date',
'-time': '-team_video_create_date',
}.get(sort or '-time'))
return qs
def get_videos_for_user(self, user):
from teams.search_indexes import TeamVideoLanguagesIndex
is_member = (user and user.is_authenticated()
and self.members.filter(user=user).exists())
if is_member:
return TeamVideoLanguagesIndex.results_for_members(self).filter(team_id=self.id)
else:
return TeamVideoLanguagesIndex.results().filter(team_id=self.id)
# Projects
@property
def default_project(self):
"""Return the default project for this team.
If it doesn't already exist it will be created.
TODO: Move the creation into a signal on the team to avoid creating
multiple default projects here?
"""
try:
return Project.objects.get(team=self, slug=Project.DEFAULT_NAME)
except Project.DoesNotExist:
p = Project(team=self,name=Project.DEFAULT_NAME)
p.save()
return p
@property
def has_projects(self):
"""Return whether this team has projects other than the default one."""
return self.project_set.count() > 1
# Readable/writeable language codes
def get_writable_langs(self):
"""Return a list of language code strings that are writable for this team.
This value may come from memcache.
"""
return TeamLanguagePreference.objects.get_writable(self)
def get_readable_langs(self):
"""Return a list of language code strings that are readable for this team.
This value may come from memcache.
"""
return TeamLanguagePreference.objects.get_readable(self)
def get_team_languages(self, since=None):
query_sl = NewSubtitleLanguage.objects.filter(video__in=self.videos.all())
new_languages = []
if since:
query_sl = query_sl.filter(id__in=NewSubtitleVersion.objects.filter(video__in=self.videos.all(),
created__gt=datetime.datetime.now() - datetime.timedelta(days=since)).order_by('subtitle_language').values_list('subtitle_language', flat=True).distinct())
new_languages = list(NewSubtitleLanguage.objects.filter(video__in=self.videos_since(since)).values_list('language_code', 'subtitles_complete'))
query_sl = query_sl.values_list('language_code', 'subtitles_complete')
languages = list(query_sl)
def first_member(x):
return x[0]
complete_languages = map(first_member, filter(lambda x: x[1], languages))
incomplete_languages = map(first_member, filter(lambda x: not x[1], languages))
new_languages = map(first_member, new_languages)
if since:
return (complete_languages, incomplete_languages, new_languages)
else:
return (complete_languages, incomplete_languages)
# This needs to be constructed after the model definition since we need a
# reference to the class itself.
Team._meta.permissions = TEAM_PERMISSIONS
# Project
class ProjectManager(models.Manager):
def for_team(self, team_identifier):
"""Return all non-default projects for the given team with the given identifier.
The team_identifier passed may be an actual Team object, or a string
containing a team slug, or the primary key of a team as an integer.
"""
if hasattr(team_identifier, "pk"):
team = team_identifier
elif isinstance(team_identifier, int):
team = Team.objects.get(pk=team_identifier)
elif isinstance(team_identifier, str):
team = Team.objects.get(slug=team_identifier)
return Project.objects.filter(team=team).exclude(name=Project.DEFAULT_NAME)
class Project(models.Model):
# All tvs belong to a project, wheather the team has enabled them or not
# the default project is just a convenience UI that pretends to be part of
# the team . If this ever gets changed, you need to change migrations/0044
DEFAULT_NAME = "_root"
team = models.ForeignKey(Team)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(blank=True)
name = models.CharField(max_length=255, null=False)
description = models.TextField(blank=True, null=True, max_length=2048)
guidelines = models.TextField(blank=True, null=True, max_length=2048)
slug = models.SlugField(blank=True)
order = models.PositiveIntegerField(default=0)
workflow_enabled = models.BooleanField(default=False)
objects = ProjectManager()
def __unicode__(self):
if self.is_default_project:
return u"---------"
return u"%s" % (self.name)
def display(self, default_project_label=None):
if self.is_default_project and default_project_label is not None:
return default_project_label
return self.__unicode__()
def save(self, slug=None,*args, **kwargs):
self.modified = datetime.datetime.now()
if slug is not None:
self.slug = pan_slugify(slug)
elif not self.slug:
self.slug = pan_slugify(self.name)
super(Project, self).save(*args, **kwargs)
@property
def is_default_project(self):
"""Return whether this project is a default project for a team."""
return self.name == Project.DEFAULT_NAME
def get_site_url(self):
"""Return the full, absolute URL for this project, including http:// and the domain."""
return '%s://%s%s' % (DEFAULT_PROTOCOL, Site.objects.get_current().domain, self.get_absolute_url())
@models.permalink
def get_absolute_url(self):
return ('teams:project_video_list', [self.team.slug, self.slug])
@property
def videos_count(self):
"""Return the number of videos in this project.
Caches the result in-object for performance.
"""
if not hasattr(self, '_videos_count'):
setattr(self, '_videos_count', TeamVideo.objects.filter(project=self).count())
return self._videos_count
def _count_tasks(self):
qs = tasks.filter(team_video__project = self)
# quick, check, are there more than 1000 tasks, if so return 1001, and
# let the UI display > 1000
if qs[1000:1001].exists():
return 1001
else:
return qs.count()
@property
def tasks_count(self):
"""Return the number of incomplete, undeleted tasks in this project.
Caches the result in-object for performance.
"""
tasks = Task.objects.filter(team=self.team, deleted=False, completed=None)
if not hasattr(self, '_tasks_count'):
setattr(self, '_tasks_count', self._count_tasks())
return self._tasks_count
class Meta:
unique_together = (
("team", "name",),
("team", "slug",),
)
permissions = PROJECT_PERMISSIONS
# TeamVideo
class TeamVideo(models.Model):
THUMBNAIL_SIZE = (288, 162)
team = models.ForeignKey(Team)
video = models.OneToOneField(Video)
description = models.TextField(blank=True,
help_text=_(u'Use this space to explain why you or your team need to '
u'caption or subtitle this video. Adding a note makes '
u'volunteers more likely to help out!'))
thumbnail = S3EnabledImageField(upload_to='teams/video_thumbnails/', null=True, blank=True,
help_text=_(u'We automatically grab thumbnails for certain sites, e.g. Youtube'),
thumb_sizes=(THUMBNAIL_SIZE, (120,90),))
all_languages = models.BooleanField(_('Need help with all languages'), default=False,
help_text=_(u'If you check this, other languages will not be displayed.'))
added_by = models.ForeignKey(User, null=True)
# this is an auto_add like field, but done on the model save so the
# admin doesn't throw a fit
created = models.DateTimeField(blank=True)
partner_id = models.CharField(max_length=100, blank=True, default="")
project = models.ForeignKey(Project)
class Meta:
unique_together = (('team', 'video'),)
def __unicode__(self):
return unicode(self.video)
@models.permalink
def get_absolute_url(self):
return ('teams:team_video', [self.pk])
def get_tasks_page_url(self):
return "%s?team_video=%s" % (self.team.get_tasks_page_url(), self.pk)
def get_thumbnail(self):
if self.thumbnail:
return self.thumbnail.thumb_url(*TeamVideo.THUMBNAIL_SIZE)
video_thumb = self.video.get_thumbnail(fallback=False)
if video_thumb:
return video_thumb
return "%simages/video-no-thumbnail-medium.png" % settings.STATIC_URL
def _original_language(self):
if not hasattr(self, 'original_language_code'):
sub_lang = self.video.subtitle_language()
setattr(self, 'original_language_code', None if not sub_lang else sub_lang.language)
return getattr(self, 'original_language_code')
def save(self, *args, **kwargs):
if not hasattr(self, "project"):
self.project = self.team.default_project
assert self.project.team == self.team, \
"%s: Team (%s) is not equal to project's (%s) team (%s)"\
% (self, self.team, self.project, self.project.team)
if not self.pk:
self.created = datetime.datetime.now()
self.video.cache.invalidate()
self.video.clear_team_video_cache()
super(TeamVideo, self).save(*args, **kwargs)
def is_checked_out(self, ignore_user=None):
'''Return whether this video is checked out in a task.
If a user is given, checkouts by that user will be ignored. This
provides a way to ask "can user X check out or work on this task?".
This is similar to the writelocking done on Videos and
SubtitleLanguages.
'''
tasks = self.task_set.filter(
# Find all tasks for this video which:
deleted=False, # - Aren't deleted
assignee__isnull=False, # - Are assigned to someone
language="", # - Aren't specific to a language
completed__isnull=True, # - Are unfinished
)
if ignore_user:
tasks = tasks.exclude(assignee=ignore_user)
return tasks.exists()
# Convenience functions
def subtitles_started(self):
"""Return whether subtitles have been started for this video."""
from subtitles.models import SubtitleLanguage
return (SubtitleLanguage.objects.having_nonempty_versions()
.filter(video=self.video)
.exists())
def subtitles_finished(self):
"""Return whether at least one set of subtitles has been finished for this video."""
qs = (self.video.newsubtitlelanguage_set.having_public_versions()
.filter(subtitles_complete=True))
for lang in qs:
if lang.is_synced():
return True
return False
def get_workflow(self):
"""Return the appropriate Workflow for this TeamVideo."""
return Workflow.get_for_team_video(self)
def move_to(self, new_team, project=None):
"""
Moves this TeamVideo to a new team.
This method expects you to have run the correct permissions checks.
"""
old_team = self.team
if old_team == new_team and project == self.project:
return
within_team = (old_team == new_team)
# these imports are here to avoid circular imports, hacky
from teams.signals import api_teamvideo_new
from teams.signals import video_moved_from_team_to_team
from videos import metadata_manager
# For now, we'll just delete any tasks associated with the moved video.
if not within_team:
self.task_set.update(deleted=True)
# We move the video by just switching the team, instead of deleting and
# recreating it.
self.team = new_team
# projects are always team dependent:
if project:
self.project = project
else:
self.project = new_team.default_project
self.save()
if not within_team:
# We need to make any as-yet-unmoderated versions public.
# TODO: Dedupe this and the team video delete signal.
video = self.video
video.newsubtitleversion_set.extant().update(visibility='public')
video.is_public = new_team.is_visible
video.moderated_by = new_team if new_team.moderates_videos() else None
video.save()
TeamVideoMigration.objects.create(from_team=old_team,
to_team=new_team,
to_project=self.project)
# Update search data and other things
video_changed_tasks.delay(video.pk)
# Create any necessary tasks.
autocreate_tasks(self)
# fire a http notification that a new video has hit this team:
api_teamvideo_new.send(self)
video_moved_from_team_to_team.send(sender=self,
destination_team=new_team, video=self.video)
def get_task_for_editor(self, language_code):
if not hasattr(self, '_editor_task'):
self._editor_task = self._get_task_for_editor(language_code)
return self._editor_task
def _get_task_for_editor(self, language_code):
task_set = self.task_set.incomplete().filter(language=language_code)
# 2533: We can get 2 review tasks if we include translate/transcribe
# tasks in the results. This is because when we have a task id and
# the user clicks endorse, we do the following:
# - save the subtitles
# - save the task, setting subtitle_version to the version that we
# just saved
#
# However, the task code creates a task on both of those steps. I'm not
# sure exactly what the old editor does to make this not happen, but
# it's safest to just not send task_id in that case
task_set = task_set.filter(type__in=(Task.TYPE_IDS['Review'],
Task.TYPE_IDS['Approve']))
# This assumes there is only 1 incomplete tasks at once, hopefully
# that's a good enough assumption to hold until we dump tasks for the
# collab model.
tasks = list(task_set[:1])
if tasks:
return tasks[0]
else:
return None
@staticmethod
def get_videos_non_language_ids(team, language_code, non_empty_language_code=False):
if non_empty_language_code:
return TeamVideo.objects.filter(
team=team).exclude(
video__primary_audio_language_code__gt=language_code).values_list('id', flat=True)
return TeamVideo.objects.filter(
team=team).exclude(
video__primary_audio_language_code=language_code).values_list('id', flat=True)
class TeamVideoMigration(models.Model):
from_team = models.ForeignKey(Team, related_name='+')
to_team = models.ForeignKey(Team, related_name='+')
to_project = models.ForeignKey(Project, related_name='+')
datetime = models.DateTimeField()
def __init__(self, *args, **kwargs):
if 'datetime' not in kwargs:
kwargs['datetime'] = self.now()
models.Model.__init__(self, *args, **kwargs)
@staticmethod
def now():
# Make now a function so we can patch it in the unittests
return datetime.datetime.now()
def _create_translation_tasks(team_video, subtitle_version=None):
"""Create any translation tasks that should be autocreated for this video.
subtitle_version should be the original SubtitleVersion that these tasks
will probably be translating from.
"""
preferred_langs = TeamLanguagePreference.objects.get_preferred(team_video.team)
for lang in preferred_langs:
# Don't create tasks for languages that are already complete.
sl = team_video.video.subtitle_language(lang)
if sl and sl.is_complete_and_synced():
continue
# Don't create tasks for languages that already have one. This includes
# review/approve tasks and such.
# Doesn't matter if it's complete or not.
task_exists = Task.objects.not_deleted().filter(
team=team_video.team, team_video=team_video, language=lang
).exists()
if task_exists:
continue
# Otherwise, go ahead and create it.
task = Task(team=team_video.team, team_video=team_video,
language=lang, type=Task.TYPE_IDS['Translate'])
# we should only update the team video after all tasks for
# this video are saved, else we end up with a lot of
# wasted tasks
task.save(update_team_video_index=False)
tasks.update_one_team_video.delay(team_video.pk)
def autocreate_tasks(team_video):
workflow = Workflow.get_for_team_video(team_video)
existing_subtitles = team_video.video.completed_subtitle_languages(public_only=True)
# We may need to create a transcribe task, if there are no existing subs.
if workflow.autocreate_subtitle and not existing_subtitles:
if not team_video.task_set.not_deleted().exists():
original_language = team_video.video.primary_audio_language_code
Task(team=team_video.team,
team_video=team_video,
subtitle_version=None,
language= original_language or '',
type=Task.TYPE_IDS['Subtitle']
).save()
# If there are existing subtitles, we may need to create translate tasks.
#
# TODO: This sets the "source version" for the translations to an arbitrary
# language's version. In practice this probably won't be a problem
# because most teams will transcribe one language and then send to a
# new team for translation, but we can probably be smarter about this
# if we spend some time.
if workflow.autocreate_translate and existing_subtitles:
_create_translation_tasks(team_video)
def team_video_save(sender, instance, created, **kwargs):
"""Update the Solr index for this team video.
TODO: Rename this to something more specific.
"""
tasks.update_one_team_video.delay(instance.id)
def team_video_delete(sender, instance, **kwargs):
"""Perform necessary actions for when a TeamVideo is deleted.
TODO: Split this up into separate signals.
"""
from videos import metadata_manager
# not using an async task for this since the async task
# could easily execute way after the instance is gone,
# and backend.remove requires the instance.
tv_search_index = site.get_index(TeamVideo)
tv_search_index.backend.remove(instance)
try:
video = instance.video
# we need to publish all unpublished subs for this video:
NewSubtitleVersion.objects.filter(video=video,
visibility='private').update(visibility='public')
video.is_public = True
video.moderated_by = None
video.save()
metadata_manager.update_metadata(video.pk)
video.update_search_index()
except Video.DoesNotExist:
pass
if instance.video_id is not None:
Video.cache.invalidate_by_pk(instance.video_id)
def on_language_deleted(sender, **kwargs):
"""When a language is deleted, delete all tasks associated with it."""
team_video = sender.video.get_team_video()
if not team_video:
return
Task.objects.filter(team_video=team_video,
language=sender.language_code).delete()
# check if there are no more source languages for the video, and in that
# case delete all transcribe tasks. Don't delete:
# - transcribe tasks that have already been started
# - review tasks
# - approve tasks
if not sender.video.has_public_version():
# filtering on new_subtitle_version=None excludes all 3 cases where we
# don't want to delete tasks
Task.objects.filter(team_video=team_video,
new_subtitle_version=None).delete()
def team_video_autocreate_task(sender, instance, created, raw, **kwargs):
"""Create subtitle/translation tasks for a newly added TeamVideo, if necessary."""
if created and not raw:
autocreate_tasks(instance)
def team_video_add_video_moderation(sender, instance, created, raw, **kwargs):
"""Set the .moderated_by attribute on a newly created TeamVideo's Video, if necessary."""
if created and not raw and instance.team.moderates_videos():
instance.video.moderated_by = instance.team
instance.video.save()
def team_video_rm_video_moderation(sender, instance, **kwargs):
"""Clear the .moderated_by attribute on a newly deleted TeamVideo's Video, if necessary."""
try:
# when removing a video, this will be triggered by the fk constraing
# and will be already removed
instance.video.moderated_by = None
instance.video.save()
except Video.DoesNotExist:
pass
post_save.connect(team_video_save, TeamVideo, dispatch_uid="teams.teamvideo.team_video_save")
post_save.connect(team_video_autocreate_task, TeamVideo, dispatch_uid='teams.teamvideo.team_video_autocreate_task')
post_save.connect(team_video_add_video_moderation, TeamVideo, dispatch_uid='teams.teamvideo.team_video_add_video_moderation')
post_delete.connect(team_video_delete, TeamVideo, dispatch_uid="teams.teamvideo.team_video_delete")
post_delete.connect(team_video_rm_video_moderation, TeamVideo, dispatch_uid="teams.teamvideo.team_video_rm_video_moderation")
language_deleted.connect(on_language_deleted, dispatch_uid="teams.subtitlelanguage.language_deleted")
# TeamMember
class TeamMemberManager(models.Manager):
use_for_related_fields = True
def create_first_member(self, team, user):
"""Make sure that new teams always have an 'owner' member."""
tm = TeamMember(team=team, user=user, role=ROLE_OWNER)
tm.save()
return tm
def admins(self):
return self.filter(role__in=(ROLE_OWNER, ROLE_ADMIN))
class TeamMember(models.Model):
ROLE_OWNER = ROLE_OWNER
ROLE_ADMIN = ROLE_ADMIN
ROLE_MANAGER = ROLE_MANAGER
ROLE_CONTRIBUTOR = ROLE_CONTRIBUTOR
ROLES = (
(ROLE_OWNER, _("Owner")),
(ROLE_MANAGER, _("Manager")),
(ROLE_ADMIN, _("Admin")),
(ROLE_CONTRIBUTOR, _("Contributor")),
)
team = models.ForeignKey(Team, related_name='members')
user = models.ForeignKey(User, related_name='team_members')
role = models.CharField(max_length=16, default=ROLE_CONTRIBUTOR, choices=ROLES, db_index=True)
created = models.DateTimeField(default=datetime.datetime.now, null=True,
blank=True)
objects = TeamMemberManager()
def __unicode__(self):
return u'%s' % self.user
def save(self, *args, **kwargs):
super(TeamMember, self).save(*args, **kwargs)
Team.cache.invalidate_by_pk(self.team_id)
def delete(self):
super(TeamMember, self).delete()
Team.cache.invalidate_by_pk(self.team_id)
def project_narrowings(self):
"""Return any project narrowings applied to this member."""
return self.narrowings.filter(project__isnull=False)
def language_narrowings(self):
"""Return any language narrowings applied to this member."""
return self.narrowings.filter(project__isnull=True)
def project_narrowings_fast(self):
"""Return any project narrowings applied to this member.
Caches the result in-object for speed.
"""
return [n for n in self.narrowings_fast() if n.project]
def language_narrowings_fast(self):
"""Return any language narrowings applied to this member.
Caches the result in-object for speed.
"""
return [n for n in self.narrowings_fast() if n.language]
def narrowings_fast(self):
"""Return any narrowings (both project and language) applied to this member.
Caches the result in-object for speed.
"""
if hasattr(self, '_cached_narrowings'):
if self._cached_narrowings is not None:
return self._cached_narrowings
self._cached_narrowings = self.narrowings.all()
return self._cached_narrowings
def has_max_tasks(self):
"""Return whether this member has the maximum number of tasks."""
max_tasks = self.team.max_tasks_per_member
if max_tasks:
if self.user.task_set.incomplete().filter(team=self.team).count() >= max_tasks:
return True
return False
def is_manager(self):
"""Test if the user is a manager or above."""
return self.role in (ROLE_OWNER, ROLE_ADMIN, ROLE_MANAGER)
def is_admin(self):
"""Test if the user is an admin or owner."""
return self.role in (ROLE_OWNER, ROLE_ADMIN)
class Meta:
unique_together = (('team', 'user'),)
def clear_tasks(sender, instance, *args, **kwargs):
"""Unassign all tasks assigned to a user.
Used when deleting a user from a team.
"""
tasks = instance.team.task_set.incomplete().filter(assignee=instance.user)
tasks.update(assignee=None)
pre_delete.connect(clear_tasks, TeamMember, dispatch_uid='teams.members.clear-tasks-on-delete')
# MembershipNarrowing
class MembershipNarrowing(models.Model):
"""Represent narrowings that can be made on memberships.
A single MembershipNarrowing can apply to a project or a language, but not both.
"""
member = models.ForeignKey(TeamMember, related_name="narrowings")
project = models.ForeignKey(Project, null=True, blank=True)
language = models.CharField(max_length=24, blank=True,
choices=translation.ALL_LANGUAGE_CHOICES)
added_by = models.ForeignKey(TeamMember, related_name="narrowing_includer", null=True, blank=True)
created = models.DateTimeField(auto_now_add=True, blank=None)
modified = models.DateTimeField(auto_now=True, blank=None)
def __unicode__(self):
if self.project:
return u"Permission restriction for %s to project %s " % (self.member, self.project)
else:
return u"Permission restriction for %s to language %s " % (self.member, self.language)
def save(self, *args, **kwargs):
# Cannot have duplicate narrowings for a language.
if self.language:
duplicate_exists = MembershipNarrowing.objects.filter(
member=self.member, language=self.language
).exclude(id=self.id).exists()
assert not duplicate_exists, "Duplicate language narrowing detected!"
# Cannot have duplicate narrowings for a project.
if self.project:
duplicate_exists = MembershipNarrowing.objects.filter(
member=self.member, project=self.project
).exclude(id=self.id).exists()
assert not duplicate_exists, "Duplicate project narrowing detected!"
super(MembershipNarrowing, self).save(*args, **kwargs)
Team.cache.invalidate_by_pk(self.member.team_id)
def delete(self):
super(MembershipNarrowing, self).delete()
Team.cache.invalidate_by_pk(self.member.team_id)
class TeamSubtitleNote(SubtitleNoteBase):
team = models.ForeignKey(Team, related_name='+')
class ApplicationInvalidException(Exception):
pass
class ApplicationManager(models.Manager):
def can_apply(self, team, user):
"""
A user can apply either if he is not a member of the team yet, the
team hasn't said no to the user (either application denied or removed the user'
and if no applications are pending.
"""
sour_application_exists = self.filter(team=team, user=user, status__in=[
Application.STATUS_MEMBER_REMOVED, Application.STATUS_DENIED,
Application.STATUS_PENDING]).exists()
if sour_application_exists:
return False
return not team.is_member(user)
def open(self, team=None, user=None):
qs = self.filter(status=Application.STATUS_PENDING)
if team:
qs = qs.filter(team=team)
if user:
qs = qs.filter(user=user)
return qs
# Application
class Application(models.Model):
team = models.ForeignKey(Team, related_name='applications')
user = models.ForeignKey(User, related_name='team_applications')
note = models.TextField(blank=True)
# None -> not acted upon
# True -> Approved
# False -> Rejected
STATUS_PENDING,STATUS_APPROVED, STATUS_DENIED, STATUS_MEMBER_REMOVED,\
STATUS_MEMBER_LEFT = xrange(0, 5)
STATUSES = (
(STATUS_PENDING, u"Pending"),
(STATUS_APPROVED, u"Approved"),
(STATUS_DENIED, u"Denied"),
(STATUS_MEMBER_REMOVED, u"Member Removed"),
(STATUS_MEMBER_LEFT, u"Member Left"),
)
STATUSES_IDS = dict([choice[::-1] for choice in STATUSES])
status = models.PositiveIntegerField(default=STATUS_PENDING, choices=STATUSES)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(blank=True, null=True)
# free text keeping a log of changes to this application
history = models.TextField(blank=True, null=True)
objects = ApplicationManager()
class Meta:
unique_together = (('team', 'user', 'status'),)
def approve(self, author, interface):
"""Approve the application.
This will create an appropriate TeamMember if this application has
not been already acted upon
"""
if self.status not in (Application.STATUS_PENDING, Application.STATUS_MEMBER_LEFT):
raise ApplicationInvalidException("")
member, created = TeamMember.objects.get_or_create(team=self.team, user=self.user)
if created:
notifier.team_member_new.delay(member.pk)
self.modified = datetime.datetime.now()
self.status = Application.STATUS_APPROVED
self.save(author=author, interface=interface)
return self
def deny(self, author, interface):
"""
Marks the application as not approved, then
Queue a Celery task that will handle properly denying this
application.
"""
if self.status != Application.STATUS_PENDING:
raise ApplicationInvalidException("")
self.modified = datetime.datetime.now()
self.status = Application.STATUS_DENIED
self.save(author=author, interface=interface)
notifier.team_application_denied.delay(self.pk)
return self
def on_member_leave(self, author, interface):
"""
Marks the appropriate status, but users can still
reapply to a team if they so desire later.
"""
self.status = Application.STATUS_MEMBER_LEFT
self.save(author=author, interface=interface)
def on_member_removed(self, author, interface):
"""
Marks the appropriate status so that user's cannot reapply
to a team after being removed.
"""
self.status = Application.STATUS_MEMBER_REMOVED
self.save(author=author, interface=interface)
def _generate_history_line(self, new_status, author=None, interface=None):
author = author or "?"
interface = interface or "web UI"
new_status = new_status if new_status != None else Application.STATUS_PENDING
for value,name in Application.STATUSES:
if value == new_status:
status = name
assert status
return u"%s by %s from %s (%s)\n" % (status, author, interface, datetime.datetime.now())
def save(self, dispatches_http_callback=True, author=None, interface=None, *args, **kwargs):
"""
Saves the model, but also appends a line on the history for that
model, like these:
- CoolGuy Approved through the web UI.
- Arthur Left team through the web UI.
This way,we can keep one application per user per team, never
delete them (so the messages stay current) and we still can
track history
"""
self.history = (self.history or "") + self._generate_history_line(self.status, author, interface)
super(Application, self).save(*args, **kwargs)
if dispatches_http_callback:
from teams.signals import api_application_new
api_application_new.send(self)
def __unicode__(self):
return "Application: %s - %s - %s" % (self.team.slug, self.user.username, self.get_status_display())
# Invites
class InviteExpiredException(Exception):
pass
class InviteManager(models.Manager):
def pending_for(self, team, user):
return self.filter(team=team, user=user, approved=None)
def acted_on(self, team, user):
return self.filter(team=team, user=user, approved__notnull=True)
class Invite(models.Model):
team = models.ForeignKey(Team, related_name='invitations')
user = models.ForeignKey(User, related_name='team_invitations')
note = models.TextField(blank=True, max_length=200)
author = models.ForeignKey(User)
role = models.CharField(max_length=16, choices=TeamMember.ROLES,
default=TeamMember.ROLE_CONTRIBUTOR)
# None -> not acted upon
# True -> Approved
# False -> Rejected
approved = models.NullBooleanField(default=None)
objects = InviteManager()
def accept(self):
"""Accept this invitation.
Creates an appropriate TeamMember record, sends a notification and
deletes itself.
"""
if self.approved is not None:
raise InviteExpiredException("")
self.approved = True
member, created = TeamMember.objects.get_or_create(
team=self.team, user=self.user, role=self.role)
if created:
notifier.team_member_new.delay(member.pk)
self.save()
return True
def deny(self):
"""Deny this invitation.
Could be useful to send a notification here in the future.
"""
if self.approved is not None:
raise InviteExpiredException("")
self.approved = False
self.save()
def message_json_data(self, data, msg):
data['can-reply'] = False
return data
# Workflows
class Workflow(models.Model):
REVIEW_CHOICES = (
(00, "Don't require review"),
(10, 'Peer must review'),
(20, 'Manager must review'),
(30, 'Admin must review'),
)
REVIEW_NAMES = dict(REVIEW_CHOICES)
REVIEW_IDS = dict([choice[::-1] for choice in REVIEW_CHOICES])
APPROVE_CHOICES = (
(00, "Don't require approval"),
(10, 'Manager must approve'),
(20, 'Admin must approve'),
)
APPROVE_NAMES = dict(APPROVE_CHOICES)
APPROVE_IDS = dict([choice[::-1] for choice in APPROVE_CHOICES])
team = models.ForeignKey(Team)
project = models.ForeignKey(Project, blank=True, null=True)
team_video = models.ForeignKey(TeamVideo, blank=True, null=True)
autocreate_subtitle = models.BooleanField(default=False)
autocreate_translate = models.BooleanField(default=False)
review_allowed = models.PositiveIntegerField(
choices=REVIEW_CHOICES, verbose_name='reviewers', default=0)
approve_allowed = models.PositiveIntegerField(
choices=APPROVE_CHOICES, verbose_name='approvers', default=0)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
class Meta:
unique_together = ('team', 'project', 'team_video')
@classmethod
def _get_target_team(cls, id, type):
"""Return the team for the given target.
The target is identified by id (its PK as an integer) and type (a string
of 'team_video', 'project', or 'team').
"""
if type == 'team_video':
return TeamVideo.objects.select_related('team').get(pk=id).team
elif type == 'project':
return Project.objects.select_related('team').get(pk=id).team
else:
return Team.objects.get(pk=id)
@classmethod
def get_for_target(cls, id, type, workflows=None):
'''Return the most specific Workflow for the given target.
If target object does not exist, None is returned.
If workflows is given, it should be a QS or List of all Workflows for
the TeamVideo's team. This will let you look it up yourself once and
use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
'''
if not workflows:
team = Workflow._get_target_team(id, type)
workflows = list(Workflow.objects.filter(team=team.id)
.select_related('project', 'team',
'team_video'))
else:
team = workflows[0].team
default_workflow = Workflow(team=team)
if not workflows:
return default_workflow
if type == 'team_video':
try:
return [w for w in workflows
if w.team_video and w.team_video.id == id][0]
except IndexError:
# If there's no video-specific workflow for this video, there
# might be a workflow for its project, so we'll start looking
# for that instead.
team_video = TeamVideo.objects.get(pk=id)
id, type = team_video.project_id, 'project'
if type == 'project':
try:
return [w for w in workflows
if w.project and w.project.workflow_enabled
and w.project.id == id and not w.team_video][0]
except IndexError:
# If there's no project-specific workflow for this project,
# there might be one for its team, so we'll fall through.
pass
if not team.workflow_enabled:
return default_workflow
return [w for w in workflows
if (not w.project) and (not w.team_video)][0]
@classmethod
def get_for_team_video(cls, team_video, workflows=None):
'''Return the most specific Workflow for the given team_video.
If workflows is given, it should be a QuerySet or List of all Workflows
for the TeamVideo's team. This will let you look it up yourself once
and use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
NOTE: This function caches the workflow for performance reasons. If the
workflow changes within the space of a single request that
_cached_workflow should be cleared.
'''
if not hasattr(team_video, '_cached_workflow'):
team_video._cached_workflow = Workflow.get_for_target(
team_video.id, 'team_video', workflows)
return team_video._cached_workflow
@classmethod
def get_for_project(cls, project, workflows=None):
'''Return the most specific Workflow for the given project.
If workflows is given, it should be a QuerySet or List of all Workflows
for the Project's team. This will let you look it up yourself once
and use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
'''
return Workflow.get_for_target(project.id, 'project', workflows)
@classmethod
def add_to_team_videos(cls, team_videos):
'''Add the appropriate Workflow objects to each TeamVideo as .workflow.
This will only perform one DB query, and it will add the most specific
workflow possible to each TeamVideo.
This only exists for performance reasons.
'''
if not team_videos:
return []
workflows = list(Workflow.objects.filter(team=team_videos[0].team))
for tv in team_videos:
tv.workflow = Workflow.get_for_team_video(tv, workflows)
def get_specific_target(self):
"""Return the most specific target that this workflow applies to."""
return self.team_video or self.project or self.team
def __unicode__(self):
target = self.get_specific_target()
return u'Workflow %s for %s (%s %d)' % (
self.pk, target, target.__class__.__name__, target.pk)
# Convenience functions for checking if a step of the workflow is enabled.
@property
def review_enabled(self):
"""Return whether any form of review is enabled for this workflow."""
return True if self.review_allowed else False
@property
def approve_enabled(self):
"""Return whether any form of approval is enabled for this workflow."""
return True if self.approve_allowed else False
@property
def requires_review_or_approval(self):
"""Return whether a given workflow requires review or approval."""
return self.approve_enabled or self.review_enabled
@property
def requires_tasks(self):
"""Return whether a given workflow requires the use of tasks."""
return (self.requires_review_or_approval or self.autocreate_subtitle
or self.autocreate_translate)
# Tasks
class TaskManager(models.Manager):
def not_deleted(self):
"""Return a QS of tasks that are not deleted."""
return self.get_query_set().filter(deleted=False)
def incomplete(self):
"""Return a QS of tasks that are not deleted or completed."""
return self.not_deleted().filter(completed=None)
def complete(self):
"""Return a QS of tasks that are not deleted, but are completed."""
return self.not_deleted().filter(completed__isnull=False)
def _type(self, types, completed=None, approved=None):
"""Return a QS of tasks that are not deleted and are one of the given types.
types should be a list of strings matching a label in Task.TYPE_CHOICES.
completed should be one of:
* True (only show completed tasks)
* False (only show incomplete tasks)
* None (don't filter on completion status)
approved should be either None or a string matching a label in
Task.APPROVED_CHOICES.
"""
type_ids = [Task.TYPE_IDS[type] for type in types]
qs = self.not_deleted().filter(type__in=type_ids)
if completed == False:
qs = qs.filter(completed=None)
elif completed == True:
qs = qs.filter(completed__isnull=False)
if approved:
qs = qs.filter(approved=Task.APPROVED_IDS[approved])
return qs
def incomplete_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted or completed."""
return self._type(['Subtitle'], False)
def incomplete_translate(self):
"""Return a QS of translate tasks that are not deleted or completed."""
return self._type(['Translate'], False)
def incomplete_review(self):
"""Return a QS of review tasks that are not deleted or completed."""
return self._type(['Review'], False)
def incomplete_approve(self):
"""Return a QS of approve tasks that are not deleted or completed."""
return self._type(['Approve'], False)
def incomplete_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted or completed."""
return self._type(['Subtitle', 'Translate'], False)
def incomplete_review_or_approve(self):
"""Return a QS of review or approve tasks that are not deleted or completed."""
return self._type(['Review', 'Approve'], False)
def complete_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted, but are completed."""
return self._type(['Subtitle'], True)
def complete_translate(self):
"""Return a QS of translate tasks that are not deleted, but are completed."""
return self._type(['Translate'], True)
def complete_review(self, approved=None):
"""Return a QS of review tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Review'], True, approved)
def complete_approve(self, approved=None):
"""Return a QS of approve tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Approve'], True, approved)
def complete_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted, but are completed."""
return self._type(['Subtitle', 'Translate'], True)
def complete_review_or_approve(self, approved=None):
"""Return a QS of review or approve tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Review', 'Approve'], True, approved)
def all_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted."""
return self._type(['Subtitle'])
def all_translate(self):
"""Return a QS of translate tasks that are not deleted."""
return self._type(['Translate'])
def all_review(self):
"""Return a QS of review tasks that are not deleted."""
return self._type(['Review'])
def all_approve(self):
"""Return a QS of tasks that are not deleted."""
return self._type(['Approve'])
def all_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted."""
return self._type(['Subtitle', 'Translate'])
def all_review_or_approve(self):
"""Return a QS of review or approve tasks that are not deleted."""
return self._type(['Review', 'Approve'])
class Task(models.Model):
TYPE_CHOICES = (
(10, 'Subtitle'),
(20, 'Translate'),
(30, 'Review'),
(40, 'Approve'),
)
TYPE_NAMES = dict(TYPE_CHOICES)
TYPE_IDS = dict([choice[::-1] for choice in TYPE_CHOICES])
APPROVED_CHOICES = (
(10, 'In Progress'),
(20, 'Approved'),
(30, 'Rejected'),
)
APPROVED_NAMES = dict(APPROVED_CHOICES)
APPROVED_IDS = dict([choice[::-1] for choice in APPROVED_CHOICES])
APPROVED_FINISHED_IDS = (20, 30)
type = models.PositiveIntegerField(choices=TYPE_CHOICES)
team = models.ForeignKey(Team)
team_video = models.ForeignKey(TeamVideo)
language = models.CharField(max_length=16,
choices=translation.ALL_LANGUAGE_CHOICES,
blank=True, db_index=True)
assignee = models.ForeignKey(User, blank=True, null=True)
subtitle_version = models.ForeignKey(SubtitleVersion, blank=True, null=True)
new_subtitle_version = models.ForeignKey(NewSubtitleVersion,
blank=True, null=True)
# The original source version being reviewed or approved.
#
# For example, if person A creates two versions while working on a subtitle
# task:
#
# v1 v2
# --o---o
# s s
#
# and then the reviewer and approver make some edits
#
# v1 v2 v3 v4 v5
# --o---o---o---o---o
# s s r r a
# *
#
# the review_base_version will be v2. Once approved, if an edit is made it
# needs to be approved as well, and the same thing happens:
#
# v1 v2 v3 v4 v5 v6 v7
# --o---o---o---o---o---o---o
# s s r r a e a
# *
#
# This is used when rejecting versions, and may be used elsewhere in the
# future as well.
review_base_version = models.ForeignKey(SubtitleVersion, blank=True,
null=True,
related_name='tasks_based_on')
new_review_base_version = models.ForeignKey(NewSubtitleVersion, blank=True,
null=True,
related_name='tasks_based_on_new')
deleted = models.BooleanField(default=False)
# TODO: Remove this field.
public = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
completed = models.DateTimeField(blank=True, null=True)
expiration_date = models.DateTimeField(blank=True, null=True)
# Arbitrary priority for tasks. Some teams might calculate this
# on complex criteria and expect us to be able to sort tasks on it.
# Higher numbers mean higher priority
priority = models.PositiveIntegerField(blank=True, default=0, db_index=True)
# Review and Approval -specific fields
approved = models.PositiveIntegerField(choices=APPROVED_CHOICES,
null=True, blank=True)
body = models.TextField(blank=True, default="")
objects = TaskManager()
def __unicode__(self):
return u'Task %s (%s) for %s' % (self.id or "unsaved",
self.get_type_display(),
self.team_video)
@property
def summary(self):
"""
Return a brief summary of the task
"""
output = unicode(self.team_video)
if self.body:
output += unicode(self.body.split('\n',1)[0].strip()[:20])
return output
@staticmethod
def now():
"""datetime.datetime.now as a method
This lets us patch it in the unittests.
"""
return datetime.datetime.now()
def is_subtitle_task(self):
return self.type == Task.TYPE_IDS['Subtitle']
def is_translate_task(self):
return self.type == Task.TYPE_IDS['Translate']
def is_review_task(self):
return self.type == Task.TYPE_IDS['Review']
def is_approve_task(self):
return self.type == Task.TYPE_IDS['Approve']
@property
def workflow(self):
'''Return the most specific workflow for this task's TeamVideo.'''
return Workflow.get_for_team_video(self.team_video)
@staticmethod
def add_cached_video_urls(tasks):
"""Add the cached_video_url attribute to a list of atkss
cached_video_url is the URL as a string for the video.
"""
team_video_pks = [t.team_video_id for t in tasks]
video_urls = (VideoUrl.objects
.filter(video__teamvideo__id__in=team_video_pks)
.filter(primary=True))
video_url_map = dict((vu.video_id, vu.effective_url)
for vu in video_urls)
for t in tasks:
t.cached_video_url = video_url_map.get(t.team_video.video_id)
def _add_comment(self, lang_ct=None):
"""Add a comment on the SubtitleLanguage for this task with the body as content."""
if self.body.strip():
if lang_ct is None:
lang_ct = ContentType.objects.get_for_model(NewSubtitleLanguage)
comment = Comment(
content=self.body,
object_pk=self.new_subtitle_version.subtitle_language.pk,
content_type=lang_ct,
submit_date=self.completed,
user=self.assignee,
)
comment.save()
notifier.send_video_comment_notification.delay(
comment.pk, version_pk=self.new_subtitle_version.pk)
def future(self):
"""Return whether this task expires in the future."""
return self.expiration_date > self.now()
# Functions related to task completion.
def _send_back(self, sends_notification=True):
"""Handle "rejection" of this task.
This will:
* Create a new task with the appropriate type (translate or subtitle).
* Try to reassign it to the previous assignee, leaving it unassigned
if that's not possible.
* Send a notification unless sends_notification is given as False.
NOTE: This function does not modify the *current* task in any way.
"""
# when sending back, instead of always sending back
# to the first step (translate/subtitle) go to the
# step before this one:
# Translate/Subtitle -> Review -> Approve
# also, you can just send back approve and review tasks.
if self.type == Task.TYPE_IDS['Approve'] and self.workflow.review_enabled:
type = Task.TYPE_IDS['Review']
else:
is_primary = (self.new_subtitle_version
.subtitle_language
.is_primary_audio_language())
if is_primary:
type = Task.TYPE_IDS['Subtitle']
else:
type = Task.TYPE_IDS['Translate']
# let's guess which assignee should we use
# by finding the last user that did this task type
previous_task = Task.objects.complete().filter(
team_video=self.team_video, language=self.language, team=self.team, type=type
).order_by('-completed')[:1]
if previous_task:
assignee = previous_task[0].assignee
else:
assignee = None
# The target assignee may have left the team in the mean time.
if not self.team.members.filter(user=assignee).exists():
assignee = None
task = Task(team=self.team, team_video=self.team_video,
language=self.language, type=type,
assignee=assignee)
task.new_subtitle_version = self.new_subtitle_version
task.set_expiration()
task.save()
if sends_notification:
# notify original submiter (assignee of self)
notifier.reviewed_and_sent_back.delay(self.pk)
return task
def complete_approved(self, user):
"""Mark a review/approve task as Approved and complete it.
:param user: user who is approving he task
:returns: next task in the workflow.
"""
self.assignee = user
self.approved = Task.APPROVED_IDS['Approved']
return self.complete()
def complete_rejected(self, user):
"""Mark a review/approve task as Rejected and complete it.
:param user: user who is approving he task
:returns: next task in the workflow.
"""
self.assignee = user
self.approved = Task.APPROVED_IDS['Rejected']
return self.complete()
def complete(self):
'''Mark as complete and return the next task in the process if applicable.'''
self.completed = self.now()
self.save()
return { 'Subtitle': self._complete_subtitle,
'Translate': self._complete_translate,
'Review': self._complete_review,
'Approve': self._complete_approve,
}[Task.TYPE_NAMES[self.type]]()
def _can_publish_directly(self, subtitle_version):
from teams.permissions import can_publish_edits_immediately
type = {10: 'Review',
20: 'Review',
30: 'Approve'}.get(self.type)
tasks = (Task.objects._type([type], True, 'Approved')
.filter(language=self.language))
return (can_publish_edits_immediately(self.team_video,
self.assignee,
self.language) and
subtitle_version and
subtitle_version.previous_version() and
subtitle_version.previous_version().is_public() and
subtitle_version.subtitle_language.is_complete_and_synced() and
tasks.exists())
def _find_previous_assignee(self, type):
"""Find the previous assignee for a new review/approve task for this video.
NOTE: This is different than finding out the person to send a task back
to! This is for saying "who reviewed this task last time?".
For now, we'll assign the review/approval task to whomever did it last
time (if it was indeed done), but only if they're still eligible to
perform it now.
"""
from teams.permissions import can_review, can_approve
if type == 'Approve':
# Check if this is a post-publish edit.
# According to #1039 we don't wanna auto-assign the assignee
version = self.get_subtitle_version()
if (version and
version.is_public() and
version.subtitle_language.is_complete_and_synced()):
return None
type = Task.TYPE_IDS['Approve']
can_do = can_approve
elif type == 'Review':
type = Task.TYPE_IDS['Review']
can_do = partial(can_review, allow_own=True)
else:
return None
last_task = self.team_video.task_set.complete().filter(
language=self.language, type=type
).order_by('-completed')[:1]
if last_task:
candidate = last_task[0].assignee
if candidate and can_do(self.team_video, candidate, self.language):
return candidate
def _complete_subtitle(self):
"""Handle the messy details of completing a subtitle task."""
sv = self.get_subtitle_version()
# TL;DR take a look at #1206 to know why i did this
if self.workflow.requires_review_or_approval and not self._can_publish_directly(sv):
if self.workflow.review_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Review'],
assignee=self._find_previous_assignee('Review'))
task.set_expiration()
task.save()
elif self.workflow.approve_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
else:
# Subtitle task is done, and there is no approval or review
# required, so we mark the version as approved.
sv.publish()
# We need to make sure this is updated correctly here.
from videos import metadata_manager
metadata_manager.update_metadata(self.team_video.video.pk)
if self.workflow.autocreate_translate:
# TODO: Switch to autocreate_task?
_create_translation_tasks(self.team_video, sv)
task = None
return task
def _complete_translate(self):
"""Handle the messy details of completing a translate task."""
sv = self.get_subtitle_version()
# TL;DR take a look at #1206 to know why i did this
if self.workflow.requires_review_or_approval and not self._can_publish_directly(sv):
if self.workflow.review_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Review'],
assignee=self._find_previous_assignee('Review'))
task.set_expiration()
task.save()
elif self.workflow.approve_enabled:
# The review step may be disabled. If so, we check the approve step.
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
else:
sv.publish()
# We need to make sure this is updated correctly here.
from videos import metadata_manager
metadata_manager.update_metadata(self.team_video.video.pk)
task = None
return task
def _complete_review(self):
"""Handle the messy details of completing a review task."""
approval = self.approved == Task.APPROVED_IDS['Approved']
sv = self.get_subtitle_version()
if approval:
self._ensure_language_complete(sv.subtitle_language)
self._add_comment()
task = None
if self.workflow.approve_enabled:
# Approval is enabled, so...
if approval:
# If the reviewer thought these subtitles were good we create
# the next task.
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
# Notify the appropriate users.
notifier.reviewed_and_pending_approval.delay(self.pk)
else:
# Otherwise we send the subtitles back for improvement.
task = self._send_back()
else:
# Approval isn't enabled, so the ruling of this Review task
# determines whether the subtitles go public.
if approval:
# Make these subtitles public!
self.new_subtitle_version.publish()
# If the subtitles are okay, go ahead and autocreate translation
# tasks if necessary.
if self.workflow.autocreate_translate:
_create_translation_tasks(self.team_video, sv)
# Notify the appropriate users and external services.
notifier.reviewed_and_published.delay(self.pk)
else:
# Send the subtitles back for improvement.
task = self._send_back()
# Before we go, we need to record who reviewed these subtitles, so if
# necessary we can "send back" to them later.
if self.assignee:
sv.set_reviewed_by(self.assignee)
return task
def do_complete_approve(self, lang_ct=None):
return self._complete_approve(lang_ct=lang_ct)
def _complete_approve(self, lang_ct=None):
"""Handle the messy details of completing an approve task."""
approval = self.approved == Task.APPROVED_IDS['Approved']
sv = self.get_subtitle_version()
if approval:
self._ensure_language_complete(sv.subtitle_language)
self._add_comment(lang_ct=lang_ct)
if approval:
# The subtitles are acceptable, so make them public!
self.new_subtitle_version.publish()
# Create translation tasks if necessary.
if self.workflow.autocreate_translate:
_create_translation_tasks(self.team_video, sv)
task = None
# Notify the appropriate users.
notifier.approved_notification.delay(self.pk, approval)
else:
# Send the subtitles back for improvement.
task = self._send_back()
# Before we go, we need to record who approved these subtitles, so if
# necessary we can "send back" to them later.
if self.assignee:
sv.set_approved_by(self.assignee)
if approval:
api_subtitles_approved.send(sv)
else:
api_subtitles_rejected.send(sv)
return task
def _ensure_language_complete(self, subtitle_language):
if not subtitle_language.subtitles_complete:
subtitle_language.subtitles_complete = True
subtitle_language.save()
def get_perform_url(self):
"""Return a URL for whatever dialog is used to perform this task."""
return reverse('teams:perform_task', args=(self.team.slug, self.id))
def tasks_page_perform_link_text(self):
"""Get the link text for perform link on the tasks page."""
if self.assignee:
return _('Resume')
else:
return _('Start now')
def get_widget_url(self):
"""Get the URL to edit the video for this task. """
return reverse("subtitles:subtitle-editor", kwargs={
"video_id": self.team_video.video.video_id,
"language_code": self.language
})
def needs_start_dialog(self):
"""Check if this task needs the start dialog.
The only time we need it is when a user is starting a
transcribe/translate task. We don't need it for review/approval, or
if the task is being resumed.
"""
# We use the start dialog for select two things:
# - primary audio language
# - language of the subtitles
return (self.language == '' or
self.team_video.video.primary_audio_language_code == '')
def get_reviewer(self):
"""For Approve tasks, return the last user to Review these subtitles.
May be None if this task is not an Approve task, or if we can't figure
out the last reviewer for any reason.
"""
if self.get_type_display() == 'Approve':
previous = Task.objects.complete().filter(
team_video=self.team_video,
language=self.language,
team=self.team,
type=Task.TYPE_IDS['Review']).order_by('-completed')[:1]
if previous:
return previous[0].assignee
def set_expiration(self):
"""Set the expiration_date of this task. Does not save().
Requires that self.team and self.assignee be set correctly.
"""
if not self.assignee or not self.team.task_expiration:
self.expiration_date = None
else:
limit = datetime.timedelta(days=self.team.task_expiration)
self.expiration_date = self.now() + limit
def get_subtitle_version(self):
""" Gets the subtitle version related to this task.
If the task has a subtitle_version attached, return it and
if not, try to find it throught the subtitle language of the video.
Note: we need this since we don't attach incomplete subtitle_version
to the task (and if we do we need to set the status to unmoderated and
that causes the version to get published).
"""
# autocreate sets the subtitle_version to another
# language's subtitle_version and that was breaking
# not only the interface but the new upload method.
if (self.new_subtitle_version and
self.new_subtitle_version.language_code == self.language):
return self.new_subtitle_version
if not hasattr(self, "_subtitle_version"):
language = self.team_video.video.subtitle_language(self.language)
self._subtitle_version = (language.get_tip(public=False)
if language else None)
return self._subtitle_version
def is_blocked(self):
"""Return whether this task is "blocked".
"Blocked" means that it's a translation task but the source language
isn't ready to be translated yet.
"""
subtitle_version = self.get_subtitle_version()
if not subtitle_version:
return False
source_language = subtitle_version.subtitle_language.get_translation_source_language()
if not source_language:
return False
can_perform = (source_language and
source_language.is_complete_and_synced())
if self.get_type_display() != 'Translate':
if self.get_type_display() in ('Review', 'Approve'):
# review and approve tasks will be blocked if they're
# a translation and they have a draft and the source
# language no longer has published version
if not can_perform or source_language.language_code == self.language:
return True
return not can_perform
def save(self, update_team_video_index=True, *args, **kwargs):
is_review_or_approve = self.get_type_display() in ('Review', 'Approve')
if self.language:
if not self.language in translation.ALL_LANGUAGE_CODES:
raise ValidationError(
"Subtitle Language should be a valid code.")
result = super(Task, self).save(*args, **kwargs)
if update_team_video_index:
tasks.update_one_team_video.delay(self.team_video.pk)
Video.cache.invalidate_by_pk(self.team_video.video_id)
return result
# Settings
class SettingManager(models.Manager):
use_for_related_fields = True
def guidelines(self):
"""Return a QS of settings related to team guidelines."""
keys = [key for key, name in Setting.KEY_CHOICES
if name.startswith('guidelines_')]
return self.get_query_set().filter(key__in=keys)
def messages(self):
"""Return a QS of settings related to team messages."""
keys = [key for key, name in Setting.KEY_CHOICES
if name.startswith('messages_')]
return self.get_query_set().filter(key__in=keys)
def messages_guidelines(self):
"""Return a QS of settings related to team messages or guidelines."""
return self.get_query_set().filter(key__in=Setting.MESSAGE_KEYS)
def with_names(self, names):
return self.filter(key__in=[Setting.KEY_IDS[name] for name in names])
def all_messages(self):
messages = {}
for key in Setting.MESSAGE_KEYS:
name = Setting.KEY_NAMES[key]
messages[name] = self.instance.get_default_message(name)
messages.update({
s.key_name: s.data
for s in self.messages_guidelines()
if s.data
})
return messages
class Setting(models.Model):
KEY_CHOICES = (
(100, 'messages_invite'),
(101, 'messages_manager'),
(102, 'messages_admin'),
(103, 'messages_application'),
(104, 'messages_joins'),
(200, 'guidelines_subtitle'),
(201, 'guidelines_translate'),
(202, 'guidelines_review'),
# 300s means if this team will block those notifications
(300, 'block_invitation_sent_message'),
(301, 'block_application_sent_message'),
(302, 'block_application_denided_message'),
(303, 'block_team_member_new_message'),
(304, 'block_team_member_leave_message'),
(305, 'block_task_assigned_message'),
(306, 'block_reviewed_and_published_message'),
(307, 'block_reviewed_and_pending_approval_message'),
(308, 'block_reviewed_and_sent_back_message'),
(309, 'block_approved_message'),
(310, 'block_new_video_message'),
# 400 is for text displayed on web pages
(401, 'pagetext_welcome_heading'),
)
KEY_NAMES = dict(KEY_CHOICES)
KEY_IDS = dict([choice[::-1] for choice in KEY_CHOICES])
MESSAGE_KEYS = [
key for key, name in KEY_CHOICES
if name.startswith('messages_') or name.startswith('guidelines_')
or name.startswith('pagetext_')
]
MESSAGE_DEFAULTS = {
'pagetext_welcome_heading': _("Help %(team)s reach a world audience"),
}
key = models.PositiveIntegerField(choices=KEY_CHOICES)
data = models.TextField(blank=True)
team = models.ForeignKey(Team, related_name='settings')
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
objects = SettingManager()
class Meta:
unique_together = (('key', 'team'),)
def __unicode__(self):
return u'%s - %s' % (self.team, self.key_name)
@property
def key_name(self):
"""Return the key name for this setting.
TODO: Remove this and replace with get_key_display()?
"""
return Setting.KEY_NAMES[self.key]
# TeamLanguagePreferences
class TeamLanguagePreferenceManager(models.Manager):
def _generate_writable(self, team):
"""Return the set of language codes that are writeable for this team."""
unwritable = self.for_team(team).filter(allow_writes=False, preferred=False).values("language_code")
unwritable = set([x['language_code'] for x in unwritable])
return translation.ALL_LANGUAGE_CODES - unwritable
def _generate_readable(self, team):
"""Return the set of language codes that are readable for this team."""
unreadable = self.for_team(team).filter(allow_reads=False, preferred=False).values("language_code")
unreadable = set([x['language_code'] for x in unreadable])
return translation.ALL_LANGUAGE_CODES - unreadable
def _generate_preferred(self, team):
"""Return the set of language codes that are preferred for this team."""
preferred = self.for_team(team).filter(preferred=True).values("language_code")
return set([x['language_code'] for x in preferred])
def for_team(self, team):
"""Return a QS of all language preferences for the given team."""
return self.get_query_set().filter(team=team)
def on_changed(cls, sender, instance, *args, **kwargs):
"""Perform any necessary actions when a language preference changes.
TODO: Refactor this out of the manager...
"""
from teams.cache import invalidate_lang_preferences
invalidate_lang_preferences(instance.team)
def get_readable(self, team):
"""Return the set of language codes that are readable for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_readable_langs
return get_readable_langs(team)
def get_writable(self, team):
"""Return the set of language codes that are writeable for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_writable_langs
return get_writable_langs(team)
def get_preferred(self, team):
"""Return the set of language codes that are preferred for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_preferred_langs
return get_preferred_langs(team)
class TeamLanguagePreference(models.Model):
"""Represent language preferences for a given team.
First, TLPs may mark a language as "preferred". If that's the case then the
other attributes of this model are irrelevant and can be ignored.
"Preferred" languages will have translation tasks automatically created for
them when subtitles are added.
If preferred is False, the TLP describes a *restriction* on the language
instead. Writing in that language may be prevented, or both reading and
writing may be prevented.
(Note: "writing" means not only writing new subtitles but also creating
tasks, etc)
This is how the restriction settings should interact. TLP means that we
have created a TeamLanguagePreference for that team and language.
| Action | NO | allow_read=True, | allow_read=False, |
| | TLP | allow_write=False | allow_write=False |
========================================================================================
| assignable as tasks | X | | |
| assignable as narrowing | X | | |
| listed on the widget for viewing | X | X | |
| listed on the widget for improving | X | | |
| returned from the api read operations | X | X | |
| upload / write operations from the api | X | | |
| show up on the start dialog | X | | |
+----------------------------------------+-----+-------------------+-------------------+
Remember, this table only applies if preferred=False. If the language is
preferred the "restriction" attributes are effectively garbage. Maybe we
should make the column nullable to make this more clear?
allow_read=True, allow_write=True, preferred=False is invalid. Just remove
the row all together.
"""
team = models.ForeignKey(Team, related_name="lang_preferences")
language_code = models.CharField(max_length=16)
allow_reads = models.BooleanField(default=False)
allow_writes = models.BooleanField(default=False)
preferred = models.BooleanField(default=False)
objects = TeamLanguagePreferenceManager()
class Meta:
unique_together = ('team', 'language_code')
def clean(self, *args, **kwargs):
if self.allow_reads and self.allow_writes:
raise ValidationError("No sense in having all allowed, just remove the preference for this language.")
if self.preferred and (self.allow_reads or self.allow_writes):
raise ValidationError("Cannot restrict a preferred language.")
super(TeamLanguagePreference, self).clean(*args, **kwargs)
def __unicode__(self):
return u"%s preference for team %s" % (self.language_code, self.team)
post_save.connect(TeamLanguagePreference.objects.on_changed, TeamLanguagePreference)
# TeamNotificationSettings
class TeamNotificationSettingManager(models.Manager):
def notify_team(self, team_pk, event_name, **kwargs):
"""Notify the given team of a given event.
Finds the matching notification settings for this team, instantiates
the notifier class, and sends the appropriate notification.
If the notification settings has an email target, sends an email.
If the http settings are filled, then sends the request.
This can be ran as a Celery task, as it requires no objects to be passed.
"""
try:
team = Team.objects.get(pk=team_pk)
except Team.DoesNotExist:
logger.error("A pk for a non-existent team was passed in.",
extra={"team_pk": team_pk, "event_name": event_name})
return
try:
if team.partner:
notification_settings = self.get(partner=team.partner)
else:
notification_settings = self.get(team=team)
except TeamNotificationSetting.DoesNotExist:
return
notification_settings.notify(event_name, **kwargs)
class TeamNotificationSetting(models.Model):
"""Info on how a team should be notified of changes to its videos.
For now, a team can be notified by having a http request sent with the
payload as the notification information. This cannot be hardcoded since
teams might have different urls for each environment.
Some teams have strict requirements on mapping video ids to their internal
values, and also their own language codes. Therefore we need to configure
a class that can do the correct mapping.
TODO: allow email notifications
"""
EVENT_VIDEO_NEW = "video-new"
EVENT_VIDEO_EDITED = "video-edited"
EVENT_LANGUAGE_NEW = "language-new"
EVENT_LANGUAGE_EDITED = "language-edit"
EVENT_LANGUAGE_DELETED = "language-deleted"
EVENT_SUBTITLE_NEW = "subs-new"
EVENT_SUBTITLE_APPROVED = "subs-approved"
EVENT_SUBTITLE_REJECTED = "subs-rejected"
EVENT_APPLICATION_NEW = 'application-new'
team = models.OneToOneField(Team, related_name="notification_settings",
null=True, blank=True)
partner = models.OneToOneField('Partner',
related_name="notification_settings", null=True, blank=True)
# the url to post the callback notifing partners of new video activity
request_url = models.URLField(blank=True, null=True)
basic_auth_username = models.CharField(max_length=255, blank=True, null=True)
basic_auth_password = models.CharField(max_length=255, blank=True, null=True)
# not being used, here to avoid extra migrations in the future
email = models.EmailField(blank=True, null=True)
# integers mapping to classes, see unisubs-integration/notificationsclasses.py
notification_class = models.IntegerField(default=1,)
objects = TeamNotificationSettingManager()
def get_notification_class(self):
try:
from ted.notificationclasses import NOTIFICATION_CLASS_MAP
return NOTIFICATION_CLASS_MAP[self.notification_class]
except ImportError:
logger.exception("Apparently unisubs-integration is not installed")
def notify(self, event_name, **kwargs):
"""Resolve the notification class for this setting and fires notfications."""
notification_class = self.get_notification_class()
if not notification_class:
logger.error("Could not find notification class %s" % self.notification_class)
return
notification = notification_class(self.team, self.partner,
event_name, **kwargs)
if self.request_url:
success, content = notification.send_http_request(
self.request_url,
self.basic_auth_username,
self.basic_auth_password
)
return success, content
# FIXME: spec and test this, for now just return
return
def __unicode__(self):
if self.partner:
return u'NotificationSettings for partner %s' % self.partner
return u'NotificationSettings for team %s' % self.team
class BillingReport(models.Model):
# use BillingRecords to signify completed work
TYPE_BILLING_RECORD = 2
# use approval tasks to signify completed work
TYPE_APPROVAL = 3
# Like TYPE_APPROVAL, but centered on the users who subtitle/review the
# work
TYPE_APPROVAL_FOR_USERS = 4
TYPE_CHOICES = (
(TYPE_BILLING_RECORD, 'Crowd sourced'),
(TYPE_APPROVAL, 'Professional services'),
(TYPE_APPROVAL_FOR_USERS, 'On-demand translators'),
)
teams = models.ManyToManyField(Team, related_name='billing_reports')
start_date = models.DateField()
end_date = models.DateField()
csv_file = S3EnabledFileField(blank=True, null=True,
upload_to='teams/billing/')
processed = models.DateTimeField(blank=True, null=True)
type = models.IntegerField(choices=TYPE_CHOICES,
default=TYPE_BILLING_RECORD)
def __unicode__(self):
if hasattr(self, 'id') and self.id is not None:
team_count = self.teams.all().count()
else:
team_count = 0
return "%s teams (%s - %s)" % (team_count,
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
def _get_approved_tasks(self):
return Task.objects.complete_approve().filter(
approved=Task.APPROVED_IDS['Approved'],
team__in=self.teams.all(),
completed__range=(self.start_date, self.end_date))
def _report_date(self, datetime):
return datetime.strftime('%Y-%m-%d %H:%M:%S')
def generate_rows_type_approval(self):
header = (
'Team',
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Translation?',
'Approver',
'Date',
)
rows = [header]
for approve_task in self._get_approved_tasks():
video = approve_task.team_video.video
project = approve_task.team_video.project.name if approve_task.team_video.project else 'none'
version = approve_task.new_subtitle_version
language = version.subtitle_language
subtitle_task = (Task.objects.complete_subtitle_or_translate()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0]
rows.append((
approve_task.team.name,
video.title_display(),
video.video_id,
project,
approve_task.language,
get_minutes_for_version(version, False),
language.is_primary_audio_language(),
subtitle_task.type==Task.TYPE_IDS['Translate'],
unicode(approve_task.assignee),
self._report_date(approve_task.completed),
))
return rows
def generate_rows_type_approval_for_users(self):
header = (
'User',
'Task Type',
'Team',
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Approver',
'Note',
'Date',
'Pay Rate',
)
data_rows = []
for approve_task in self._get_approved_tasks():
video = approve_task.team_video.video
project = approve_task.team_video.project.name if approve_task.team_video.project else 'none'
version = approve_task.get_subtitle_version()
language = version.subtitle_language
all_tasks = [approve_task]
try:
all_tasks.append((Task.objects.complete_subtitle_or_translate()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0])
except IndexError:
# no subtitling task, probably the review task was manually
# created.
pass
try:
all_tasks.append((Task.objects.complete_review()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0])
except IndexError:
# review not enabled
pass
for task in all_tasks:
data_rows.append((
unicode(task.assignee),
task.get_type_display(),
approve_task.team.name,
video.title_display(),
video.video_id,
project,
language.language_code,
get_minutes_for_version(version, False),
language.is_primary_audio_language(),
unicode(approve_task.assignee),
unicode(task.body),
self._report_date(task.completed),
task.assignee.pay_rate_code,
))
data_rows.sort(key=lambda row: row[0])
return [header] + data_rows
def generate_rows_type_billing_record(self):
rows = []
for i,team in enumerate(self.teams.all()):
rows = rows + BillingRecord.objects.csv_report_for_team(team,
self.start_date, self.end_date, add_header=i == 0)
return rows
def generate_rows(self):
if self.type == BillingReport.TYPE_BILLING_RECORD:
rows = self.generate_rows_type_billing_record()
elif self.type == BillingReport.TYPE_APPROVAL:
rows = self.generate_rows_type_approval()
elif self.type == BillingReport.TYPE_APPROVAL_FOR_USERS:
rows = self.generate_rows_type_approval_for_users()
else:
raise ValueError("Unknown type: %s" % self.type)
return rows
def convert_unicode_to_utf8(self, rows):
def _convert(value):
if isinstance(value, unicode):
return value.encode("utf-8")
else:
return value
return [tuple(_convert(v) for v in row) for row in rows]
def process(self):
"""
Generate the correct rows (including headers), saves it to a tempo file,
then set's that file to the csv_file property, which if , using the S3
storage will take care of exporting it to s3.
"""
try:
rows = self.generate_rows()
except StandardError:
logger.error("Error generating billing report: (id: %s)", self.id)
self.csv_file = None
else:
self.csv_file = self.make_csv_file(rows)
self.processed = datetime.datetime.utcnow()
self.save()
def make_csv_file(self, rows):
rows = self.convert_unicode_to_utf8(rows)
fn = '/tmp/bill-%s-teams-%s-%s-%s-%s.csv' % (
self.teams.all().count(),
self.start_str, self.end_str,
self.get_type_display(), self.pk)
with open(fn, 'w') as f:
writer = csv.writer(f)
writer.writerows(rows)
return File(open(fn, 'r'))
@property
def start_str(self):
return self.start_date.strftime("%Y%m%d")
@property
def end_str(self):
return self.end_date.strftime("%Y%m%d")
class BillingReportGenerator(object):
def __init__(self, all_records, add_header=True):
if add_header:
self.rows = [self.header()]
else:
self.rows = []
all_records = list(all_records)
self.make_language_number_map(all_records)
self.make_languages_without_records(all_records)
for video, records in groupby(all_records, lambda r: r.video):
records = list(records)
if video:
for lang in self.languages_without_records.get(video.id, []):
self.rows.append(
self.make_row_for_lang_without_record(video, lang))
for r in records:
self.rows.append(self.make_row(video, r))
def header(self):
return [
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Language number',
'Team',
'Created',
'Source',
'User',
]
def make_row(self, video, record):
return [
(video and video.title_display()) or "----",
(video and video.video_id) or "deleted",
(record.project.name if record.project else 'none'),
(record.new_subtitle_language and record.new_subtitle_language.language_code) or "----",
record.minutes,
record.is_original,
(self.language_number_map and (record.id in self.language_number_map) and self.language_number_map[record.id]) or "----",
record.team.slug,
record.created.strftime('%Y-%m-%d %H:%M:%S'),
record.source,
record.user.username,
]
def make_language_number_map(self, records):
self.language_number_map = {}
videos = set(r.video for r in records)
video_counts = dict((v and v.id, 0) for v in videos)
qs = (BillingRecord.objects
.filter(video__in=videos)
.order_by('created'))
for record in qs:
vid = record.video and record.video.id
video_counts[vid] += 1
self.language_number_map[record.id] = video_counts[vid]
def make_languages_without_records(self, records):
self.languages_without_records = {}
videos = [r.video for r in records]
language_ids = [r.new_subtitle_language_id for r in records]
no_billing_record_where = """\
NOT EXISTS (
SELECT 1
FROM teams_billingrecord br
WHERE br.new_subtitle_language_id = subtitles_subtitlelanguage.id
)"""
qs = (NewSubtitleLanguage.objects
.filter(video__in=videos, subtitles_complete=True)
.exclude(id__in=language_ids).
extra(where=[no_billing_record_where]))
for lang in qs:
vid = lang.video_id
if vid not in self.languages_without_records:
self.languages_without_records[vid] = [lang]
else:
self.languages_without_records[vid].append(lang)
def make_row_for_lang_without_record(self, video, language):
return [
video.title_display(),
video.video_id,
'none',
language.language_code,
0,
language.is_primary_audio_language(),
0,
'unknown',
language.created.strftime('%Y-%m-%d %H:%M:%S'),
'unknown',
'unknown',
]
class BillingRecordManager(models.Manager):
def data_for_team(self, team, start, end):
return self.filter(team=team, created__gte=start, created__lte=end)
def csv_report_for_team(self, team, start, end, add_header=True):
all_records = self.data_for_team(team, start, end)
generator = BillingReportGenerator(all_records, add_header)
return generator.rows
def insert_records_for_translations(self, billing_record):
"""
IF you've translated from an incomplete language, and later on that
language is completed, we must check if any translations are now
complete and therefore should have billing records with them
"""
translations = billing_record.new_subtitle_language.get_dependent_subtitle_languages()
inserted = []
for translation in translations:
version = translation.get_tip(public=False)
if version:
inserted.append(self.insert_record(version))
return filter(bool, inserted)
def insert_record(self, version):
"""
Figures out if this version qualifies for a billing record, and
if so creates one. This should be self contained, e.g. safe to call
for any version. No records should be created if not needed, and it
won't create multiples.
If this language has translations it will check if any of those are now
eligible for BillingRecords and create one accordingly.
"""
from teams.models import BillingRecord
celery_logger.debug('insert billing record')
language = version.subtitle_language
video = language.video
tv = video.get_team_video()
if not tv:
celery_logger.debug('not a team video')
return
if not language.is_complete_and_synced(public=False):
celery_logger.debug('language not complete')
return
try:
# we already have a record
previous_record = BillingRecord.objects.get(video=video,
new_subtitle_language=language)
# make sure we update it
celery_logger.debug('a billing record for this language exists')
previous_record.is_original = \
video.primary_audio_language_code == language.language_code
previous_record.save()
return
except BillingRecord.DoesNotExist:
pass
if NewSubtitleVersion.objects.filter(
subtitle_language=language,
created__lt=BILLING_CUTOFF).exclude(
pk=version.pk).exists():
celery_logger.debug('an older version exists')
return
is_original = language.is_primary_audio_language()
source = version.origin
team = tv.team
project = tv.project
new_record = BillingRecord.objects.create(
video=video,
project = project,
new_subtitle_version=version,
new_subtitle_language=language,
is_original=is_original, team=team,
created=version.created,
source=source,
user=version.author)
from_translations = self.insert_records_for_translations(new_record)
return new_record, from_translations
def get_minutes_for_version(version, round_up_to_integer):
"""
Return the number of minutes the subtitles specified in version
"""
subs = version.get_subtitles()
if len(subs) == 0:
return 0
for sub in subs:
if sub.start_time is not None:
start_time = sub.start_time
break
# we shouldn't have an end time set without a start time, but handle
# it just in case
if sub.end_time is not None:
start_time = sub.end_time
break
else:
return 0
for sub in reversed(subs):
if sub.end_time is not None:
end_time = sub.end_time
break
# we shouldn't have an end time not set, but check for that just in
# case
if sub.start_time is not None:
end_time = sub.start_time
break
else:
return 0
duration_seconds = (end_time - start_time) / 1000.0
minutes = duration_seconds/60.0
if round_up_to_integer:
minutes = int(ceil(minutes))
return minutes
class BillingRecord(models.Model):
# The billing record should still exist if the video is deleted
video = models.ForeignKey(Video, blank=True, null=True, on_delete=models.SET_NULL)
project = models.ForeignKey(Project, blank=True, null=True, on_delete=models.SET_NULL)
subtitle_version = models.ForeignKey(SubtitleVersion, null=True,
blank=True, on_delete=models.SET_NULL)
new_subtitle_version = models.ForeignKey(NewSubtitleVersion, null=True,
blank=True, on_delete=models.SET_NULL)
subtitle_language = models.ForeignKey(SubtitleLanguage, null=True,
blank=True, on_delete=models.SET_NULL)
new_subtitle_language = models.ForeignKey(NewSubtitleLanguage, null=True,
blank=True, on_delete=models.SET_NULL)
minutes = models.FloatField(blank=True, null=True)
is_original = models.BooleanField(default=False)
team = models.ForeignKey(Team)
created = models.DateTimeField()
source = models.CharField(max_length=255)
user = models.ForeignKey(User)
objects = BillingRecordManager()
class Meta:
unique_together = ('video', 'new_subtitle_language')
def __unicode__(self):
return "%s - %s" % (self.video and self.video.video_id,
self.new_subtitle_language and self.new_subtitle_language.language_code)
def save(self, *args, **kwargs):
if not self.minutes and self.minutes != 0.0:
self.minutes = self.get_minutes()
assert self.minutes is not None
return super(BillingRecord, self).save(*args, **kwargs)
def get_minutes(self):
return get_minutes_for_version(self.new_subtitle_version, True)
class Partner(models.Model):
name = models.CharField(_(u'name'), max_length=250, unique=True)
slug = models.SlugField(_(u'slug'), unique=True)
can_request_paid_captions = models.BooleanField(default=False)
# The `admins` field specifies users who can do just about anything within
# the partner realm.
admins = models.ManyToManyField('auth.CustomUser',
related_name='managed_partners', blank=True, null=True)
def __unicode__(self):
return self.name
def is_admin(self, user):
return user in self.admins.all()
| agpl-3.0 |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/encodings/cp1026.py | 593 | 13369 | """ Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'{' # 0x48 -> LEFT CURLY BRACKET
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'[' # 0x68 -> LEFT SQUARE BRACKET
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
u':' # 0x7A -> COLON
u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'}' # 0x8C -> RIGHT CURLY BRACKET
u'`' # 0x8D -> GRAVE ACCENT
u'\xa6' # 0x8E -> BROKEN BAR
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u']' # 0xAC -> RIGHT SQUARE BRACKET
u'$' # 0xAD -> DOLLAR SIGN
u'@' # 0xAE -> COMMERCIAL AT
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'~' # 0xCC -> TILDE
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\\' # 0xDC -> REVERSE SOLIDUS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'#' # 0xEC -> NUMBER SIGN
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'"' # 0xFC -> QUOTATION MARK
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
wiki05/youtube-dl | youtube_dl/extractor/nytimes.py | 116 | 4325 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
parse_iso8601,
)
class NYTimesBaseIE(InfoExtractor):
def _extract_video_from_id(self, video_id):
video_data = self._download_json(
'http://www.nytimes.com/svc/video/api/v2/video/%s' % video_id,
video_id, 'Downloading video JSON')
title = video_data['headline']
description = video_data.get('summary')
duration = float_or_none(video_data.get('duration'), 1000)
uploader = video_data['byline']
timestamp = parse_iso8601(video_data['publication_date'][:-8])
def get_file_size(file_size):
if isinstance(file_size, int):
return file_size
elif isinstance(file_size, dict):
return int(file_size.get('value', 0))
else:
return 0
formats = [
{
'url': video['url'],
'format_id': video.get('type'),
'vcodec': video.get('video_codec'),
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
'filesize': get_file_size(video.get('fileSize')),
} for video in video_data['renditions']
]
self._sort_formats(formats)
thumbnails = [
{
'url': 'http://www.nytimes.com/%s' % image['url'],
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in video_data['images']
]
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'uploader': uploader,
'duration': duration,
'formats': formats,
'thumbnails': thumbnails,
}
class NYTimesIE(NYTimesBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?nytimes\.com/video/(?:[^/]+/)+?|graphics8\.nytimes\.com/bcvideo/\d+(?:\.\d+)?/iframe/embed\.html\?videoId=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263',
'md5': '18a525a510f942ada2720db5f31644c0',
'info_dict': {
'id': '100000002847155',
'ext': 'mov',
'title': 'Verbatim: What Is a Photocopier?',
'description': 'md5:93603dada88ddbda9395632fdc5da260',
'timestamp': 1398631707,
'upload_date': '20140427',
'uploader': 'Brett Weiner',
'duration': 419,
}
}, {
'url': 'http://www.nytimes.com/video/travel/100000003550828/36-hours-in-dubai.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_video_from_id(video_id)
class NYTimesArticleIE(NYTimesBaseIE):
_VALID_URL = r'https?://(?:www\.)?nytimes\.com/(.(?<!video))*?/(?:[^/]+/)*(?P<id>[^.]+)(?:\.html)?'
_TESTS = [{
'url': 'http://www.nytimes.com/2015/04/14/business/owner-of-gravity-payments-a-credit-card-processor-is-setting-a-new-minimum-wage-70000-a-year.html?_r=0',
'md5': 'e2076d58b4da18e6a001d53fd56db3c9',
'info_dict': {
'id': '100000003628438',
'ext': 'mov',
'title': 'New Minimum Wage: $70,000 a Year',
'description': 'Dan Price, C.E.O. of Gravity Payments, surprised his 120-person staff by announcing that he planned over the next three years to raise the salary of every employee to $70,000 a year.',
'timestamp': 1429033037,
'upload_date': '20150414',
'uploader': 'Matthew Williams',
}
}, {
'url': 'http://www.nytimes.com/news/minute/2014/03/17/times-minute-whats-next-in-crimea/?_php=true&_type=blogs&_php=true&_type=blogs&_r=1',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._html_search_regex(r'data-videoid="(\d+)"', webpage, 'video id')
return self._extract_video_from_id(video_id)
| unlicense |
goldyliang/VLC_Subtitle_Navigation | extras/misc/stackhandler.py | 99 | 9554 | #!/usr/bin/python
#####################################################################
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
#
# Copyright (C) 2011-2012 Ludovic Fauvet <etix@videolan.org>
# Jean-Baptiste Kempf <jb@videolan.org>
#
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
#####################################################################
#
# This script can be started in two ways:
# - Without any arguments:
# The script will search for stacktrace in the WORKDIR, process
# them and dispatch them in their respective subdirectories.
# - With a stacktrace as only argument:
# The script will write the output on stdout and exit immediately
# after the stacktrace has been processed.
# The input file will stay in place, untouched.
#
# NOTE: Due to a bug in the mingw32-binutils > 2.19 the section
# .gnu_debuglink in the binary file is trimmed thus preventing
# gdb to find the associated symbols. This script will
# work around this issue and rerun gdb for each dbg file.
#
#####################################################################
VLC_VERSION = "2.0.3"
VLC_BIN = "/home/videolan/vlc/" + VLC_VERSION + "/vlc-" VLC_VERSION + "/vlc.exe"
VLC_BASE_DIR = "/home/videolan/vlc/" + VLC_VERSION + "/vlc-" + VLC_VERSION + "/"
VLC_SYMBOLS_DIR = "/home/videolan/vlc/" + VLC_VERSION + "/symbols-" + VLC_VERSION + "/"
WORKDIR = "/srv/ftp/crashes-win32"
FILE_MATCH = r"^\d{14}$"
FILE_MAX_SIZE = 10000
GDB_CMD = "gdb --exec=%(VLC_BIN)s --symbols=%(VLC_SYMBOLS_DIR)s%(DBG_FILE)s.dbg --batch -x %(BATCH_FILE)s"
EMAIL_TO = "bugreporter -- videolan.org"
EMAIL_FROM = "crashes@crash.videolan.org"
EMAIL_SUBJECT = "[CRASH] New Win32 crash report"
EMAIL_BODY = \
"""
Dear Bug Squasher,
This crash has been reported automatically and might be incomplete and/or broken.
Windows version: %(WIN32_VERSION)s
%(STACKTRACE)s
Truly yours,
a python script.
"""
import os, sys, re, tempfile
import string, shlex, subprocess
import smtplib, datetime, shutil
import traceback
from email.mime.text import MIMEText
def processFile(filename):
print "Processing " + filename
global win32_version
f = open(filename, 'r')
# Read (and repair) the input file
content = "".join(filter(lambda x: x in string.printable, f.read()))
f.close()
if os.path.getsize(filename) < 10:
print("File empty")
os.remove(filename)
return
# Check if VLC version match
if not isValidVersion(content):
print("Invalid VLC version")
moveFile(filename, outdated = True)
return
# Get Windows version
win32_version = getWinVersion(content) or 'unknown'
# Map eip <--> library
mapping = mapLibraries(content)
if not mapping:
print("Stacktrace not found")
os.remove(filename)
return
# Associate all eip to their respective lib
# lib1
# `- 0x6904f020
# - 0x6927d37c
# lib2
# `- 0x7e418734
# - 0x7e418816
# - 0x7e42bf15
sortedEIP,delta_libs = sortEIP(content,mapping)
# Compute the stacktrace using GDB
eipmap = findSymbols(sortedEIP)
# Generate the body of the email
body = genEmailBody(mapping, eipmap, delta_libs)
# Send the email
sendEmail(body)
# Print the output
print(body)
# Finally archive the stacktrace
moveFile(filename, outdated = False)
def isValidVersion(content):
pattern = re.compile(r"^VLC=%s " % VLC_VERSION, re.MULTILINE)
res = pattern.search(content)
return True if res else False
def getWinVersion(content):
pattern = re.compile(r"^OS=(.*)$", re.MULTILINE)
res = pattern.search(content)
if res is not None:
return res.group(1)
return None
def getDiffAddress(content, name):
plugin_name_section = content.find(name)
if plugin_name_section < 0:
return None
begin_index = content.rfind("\n", 0, plugin_name_section) + 1
end_index = content.find("|", begin_index)
tmp_index = name.rfind('plugins\\')
libname = name[tmp_index :].replace("\\", "/")
full_path = VLC_BASE_DIR + libname
if not os.path.isfile(full_path):
return None
cmd = "objdump -p " + full_path + " |grep ImageBase -|cut -f2-"
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).stdout.read().strip()
diff = int(content[begin_index:end_index], 16) - int(p, 16)
return diff
def mapLibraries(content):
stacktrace_section = content.find("[stacktrace]")
if stacktrace_section < 0:
return None
stacklines = content[stacktrace_section:]
stacklines = stacklines.splitlines()
pattern = re.compile(r"^([0-9a-fA-F]+)\|(.+)$")
mapping = []
for line in stacklines:
m = pattern.match(line)
print(line)
if m is not None:
mapping.append(m.group(1, 2))
if len(mapping) == 0:
return None
return mapping
def sortEIP(content, mapping):
# Merge all EIP mapping to the same library
libs = {}
libs_address = {}
for item in mapping:
# Extract the library name (without the full path)
index = item[1].rfind('\\')
libname = item[1][index + 1:]
# Append the eip to its respective lib
if libname not in libs:
libs[libname] = []
diff = getDiffAddress(content, item[1])
if diff is not None:
libs_address[libname] = diff
else:
libs_address[libname] = 0
libs[libname].append(int(item[0],16) - libs_address[libname])
return libs,libs_address
def findSymbols(sortedEIP):
eipmap = {}
for k, v in sortedEIP.items():
# Create the gdb batchfile
batchfile = tempfile.NamedTemporaryFile(mode="w")
batchfile.write("set print symbol-filename on\n")
# Append all eip for this lib
for eip in v:
batchfile.write('p/a %s\n' % hex(eip))
batchfile.flush()
# Generate the command line
cmd = GDB_CMD % {"VLC_BIN": VLC_BIN, "VLC_SYMBOLS_DIR": VLC_SYMBOLS_DIR, "DBG_FILE": k, "BATCH_FILE": batchfile.name}
args = shlex.split(cmd)
# Start GDB and get result
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Parse result
gdb_pattern = re.compile(r"^\$\d+ = (.+)$")
cnt = 0
while p.poll() == None:
o = p.stdout.readline()
if o != b'':
o = bytes.decode(o)
m = gdb_pattern.match(o)
if m is not None:
#print("LINE: [%s]" % m.group(1))
eipmap[v[cnt]] = m.group(1)
cnt += 1
batchfile.close()
return eipmap
def genEmailBody(mapping, eipmap, delta_libs):
stacktrace = ""
cnt = 0
for item in mapping:
index = item[1].rfind('\\')
libname = item[1][index + 1:]
print(int(item[0],16), delta_libs[libname])
#print(eipmap)
#print(mapping)
stacktrace += "%d. %s [in %s]\n" % (cnt, eipmap[int(item[0],16)-delta_libs[libname]], item[1])
cnt += 1
stacktrace = stacktrace.rstrip('\n')
return EMAIL_BODY % {"STACKTRACE": stacktrace, "WIN32_VERSION": win32_version}
def sendEmail(body):
msg = MIMEText(body)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
# Send the email
s = smtplib.SMTP()
s.connect("127.0.0.1")
s.sendmail(EMAIL_FROM, [EMAIL_TO], msg.as_string())
s.quit()
def moveFile(filename, outdated = False):
today = datetime.datetime.now().strftime("%Y%m%d")
today_path = "%s/%s" % (WORKDIR, today)
if not os.path.isdir(today_path):
os.mkdir(today_path)
if not outdated:
shutil.move(filename, "%s/%s" % (today_path, os.path.basename(filename)))
else:
outdated_path = "%s/outdated/" % today_path
if not os.path.isdir(outdated_path):
os.mkdir(outdated_path)
shutil.move(filename, "%s/%s" % (outdated_path, os.path.basename(filename)))
### ENTRY POINT ###
batch = len(sys.argv) != 2
if batch:
print("Running in batch mode")
input_files = []
if not batch:
if not os.path.isfile(sys.argv[1]):
exit("file does not exists")
input_files.append(sys.argv[1])
else:
file_pattern = re.compile(FILE_MATCH)
entries = os.listdir(WORKDIR)
for entry in entries:
path_entry = WORKDIR + "/" + entry
if not os.path.isfile(path_entry):
continue
if not file_pattern.match(entry):
print(entry)
os.remove(path_entry)
continue
if os.path.getsize(path_entry) > FILE_MAX_SIZE:
print("%s is too big" % entry)
os.remove(path_entry)
continue
input_files.append(path_entry)
if not len(input_files):
exit("Nothing to process")
# Start processing each file
for input_file in input_files:
try:
processFile(input_file)
except Exception as ex:
print(traceback.format_exc())
| gpl-2.0 |
apanju/odoo | addons/website_event_track/models/event.py | 300 | 8344 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
import pytz
class event_track_tag(osv.osv):
_name = "event.track.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Track Tag', translate=True)
}
class event_tag(osv.osv):
_name = "event.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Tag', translate=True)
}
#
# Tracks: conferences
#
class event_track_stage(osv.osv):
_name = "event.track.stage"
_order = 'sequence'
_columns = {
'name': fields.char('Track Stage', translate=True),
'sequence': fields.integer('Sequence')
}
_defaults = {
'sequence': 0
}
class event_track_location(osv.osv):
_name = "event.track.location"
_columns = {
'name': fields.char('Track Rooms')
}
class event_track(osv.osv):
_name = "event.track"
_description = 'Event Tracks'
_order = 'priority, date'
_inherit = ['mail.thread', 'ir.needaction_mixin', 'website.seo.metadata']
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for track in self.browse(cr, uid, ids, context=context):
res[track.id] = "/event/%s/track/%s" % (slug(track.event_id), slug(track))
return res
_columns = {
'name': fields.char('Track Title', required=True, translate=True),
'user_id': fields.many2one('res.users', 'Responsible'),
'speaker_ids': fields.many2many('res.partner', string='Speakers'),
'tag_ids': fields.many2many('event.track.tag', string='Tags'),
'stage_id': fields.many2one('event.track.stage', 'Stage'),
'description': fields.html('Track Description', translate=True),
'date': fields.datetime('Track Date'),
'duration': fields.float('Duration', digits=(16,2)),
'location_id': fields.many2one('event.track.location', 'Location'),
'event_id': fields.many2one('event.event', 'Event', required=True),
'color': fields.integer('Color Index'),
'priority': fields.selection([('3','Low'),('2','Medium (*)'),('1','High (**)'),('0','Highest (***)')], 'Priority', required=True),
'website_published': fields.boolean('Available in the website', copy=False),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'image': fields.related('speaker_ids', 'image', type='binary', readonly=True)
}
def set_priority(self, cr, uid, ids, priority, context={}):
return self.write(cr, uid, ids, {'priority' : priority})
def _default_stage_id(self, cr, uid, context={}):
stage_obj = self.pool.get('event.track.stage')
ids = stage_obj.search(cr, uid, [], context=context)
return ids and ids[0] or False
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'website_published': lambda self, cr, uid, ctx: False,
'duration': lambda *args: 1.5,
'stage_id': _default_stage_id,
'priority': '2'
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('event.track.stage')
result = stage_obj.name_search(cr, uid, '', context=context)
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
#
# Events
#
class event_event(osv.osv):
_inherit = "event.event"
def _list_tz(self,cr,uid, context=None):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
def _count_tracks(self, cr, uid, ids, field_name, arg, context=None):
return {
event.id: len(event.track_ids)
for event in self.browse(cr, uid, ids, context=context)
}
def _get_tracks_tag_ids(self, cr, uid, ids, field_names, arg=None, context=None):
res = dict((res_id, []) for res_id in ids)
for event in self.browse(cr, uid, ids, context=context):
for track in event.track_ids:
res[event.id] += [tag.id for tag in track.tag_ids]
res[event.id] = list(set(res[event.id]))
return res
_columns = {
'tag_ids': fields.many2many('event.tag', string='Tags'),
'track_ids': fields.one2many('event.track', 'event_id', 'Tracks', copy=True),
'sponsor_ids': fields.one2many('event.sponsor', 'event_id', 'Sponsorships', copy=True),
'blog_id': fields.many2one('blog.blog', 'Event Blog'),
'show_track_proposal': fields.boolean('Talks Proposals'),
'show_tracks': fields.boolean('Multiple Tracks'),
'show_blog': fields.boolean('News'),
'count_tracks': fields.function(_count_tracks, type='integer', string='Tracks'),
'tracks_tag_ids': fields.function(_get_tracks_tag_ids, type='one2many', relation='event.track.tag', string='Tags of Tracks'),
'allowed_track_tag_ids': fields.many2many('event.track.tag', string='Accepted Tags', help="List of available tags for track proposals."),
'timezone_of_event': fields.selection(_list_tz, 'Event Timezone', size=64),
}
_defaults = {
'show_track_proposal': False,
'show_tracks': False,
'show_blog': False,
'timezone_of_event':lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).tz,
}
def _get_new_menu_pages(self, cr, uid, event, context=None):
context = context or {}
result = super(event_event, self)._get_new_menu_pages(cr, uid, event, context=context)
if event.show_tracks:
result.append( (_('Talks'), '/event/%s/track' % slug(event)))
result.append( (_('Agenda'), '/event/%s/agenda' % slug(event)))
if event.blog_id:
result.append( (_('News'), '/blogpost'+slug(event.blog_ig)))
if event.show_track_proposal:
result.append( (_('Talk Proposals'), '/event/%s/track_proposal' % slug(event)))
return result
#
# Sponsors
#
class event_sponsors_type(osv.osv):
_name = "event.sponsor.type"
_order = "sequence"
_columns = {
"name": fields.char('Sponsor Type', required=True, translate=True),
"sequence": fields.integer('Sequence')
}
class event_sponsors(osv.osv):
_name = "event.sponsor"
_order = "sequence"
_columns = {
'event_id': fields.many2one('event.event', 'Event', required=True),
'sponsor_type_id': fields.many2one('event.sponsor.type', 'Sponsoring Type', required=True),
'partner_id': fields.many2one('res.partner', 'Sponsor/Customer', required=True),
'url': fields.text('Sponsor Website'),
'sequence': fields.related('sponsor_type_id', 'sequence', string='Sequence', store=True),
'image_medium': fields.related('partner_id', 'image_medium', string='Logo', type='binary')
}
def has_access_to_partner(self, cr, uid, ids, context=None):
partner_ids = [sponsor.partner_id.id for sponsor in self.browse(cr, uid, ids, context=context)]
return len(partner_ids) == self.pool.get("res.partner").search(cr, uid, [("id", "in", partner_ids)], count=True, context=context)
| agpl-3.0 |
codedsk/hubcheck | hubcheck/pageobjects/po_time_overview_page.py | 1 | 1349 | from hubcheck.pageobjects.po_time_base_page import TimeBasePage
from hubcheck.pageobjects.basepageelement import Link
class TimeOverviewPage(TimeBasePage):
"""time overview page"""
def __init__(self,browser,catalog,groupid=None):
super(TimeOverviewPage,self).__init__(browser,catalog)
self.path = "/time/overview"
# load hub's classes
TimeOverviewPage_Locators = self.load_class('TimeOverviewPage_Locators')
TimeOverview = self.load_class('TimeOverview')
# update this object's locator
self.locators.update(TimeOverviewPage_Locators.locators)
# setup page object's components
self.overview = TimeOverview(self,{'base':'overview'})
def get_active_hubs_count(self):
return self.overview.get_active_hubs_count()
def get_active_tasks_count(self):
return self.overview.get_active_tasks_count()
def get_total_hours_count(self):
return self.overview.get_total_hours_count()
def goto_hubs(self):
self.overview.goto_hubs()
def goto_tasks(self):
self.overview.goto_tasks()
def goto_records(self):
self.overview.goto_records()
class TimeOverviewPage_Locators_Base(object):
"""locators for TimeOverviewPage object"""
locators = {
'overview' : "css=#plg_time_overview",
}
| mit |
madeso/prettygood | dotnet/Tagger/TagValidator.py | 1 | 2567 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using MusicBrainz;
using PrettyGood.Util;
namespace Tagger
{
class TagValidator
{
public bool validate(IdTag tag)
{
Artist artist = null;
if (string.IsNullOrEmpty(tag.Artist) == false) artist = getArtist(tag.Artist);
if (artist == null) return false;
Release album = null;
if (string.IsNullOrEmpty(tag.Album) == false) album = getRelease(artist, tag.Album);
Track track = null;
if (string.IsNullOrEmpty(tag.Title))
{
int num = int.Parse(tag.TrackNumber.RemoveLeadingZeros());
num %= 100;
track = album.GetTracks()[num];
}
else
{
foreach (var t in Track.Query(tag.Title, artist.GetName()))
{
track = t;
break;
}
}
if (track == null) return false;
if (album == null)
{
foreach (var r in track.GetReleases())
{
album = r;
break;
}
}
tag.Artist = artist.GetName();
tag.Album = album.GetTitle();
tag.TrackNumber = track.GetTrackNumber(album).ToString();
tag.TotalTracks = album.GetTracks().Count.ToString();
//tag.Year = album.GetReleaseRelations()[0].BeginDate;
return true;
}
private Release getRelease(Artist artist, string a)
{
string album = a.ToLower();
foreach (Release r in artist.GetReleases())
{
if (album == r.GetTitle().ToLower()) return r;
}
return null;
}
private Artist getArtist(string art)
{
string artist = art.ToLower();
if (artists.ContainsKey(artist)) return artists[artist];
Artist info = null;
System.Threading.Thread.Sleep(500);
foreach (Artist a in Artist.Query(artist))
{
string name = a.GetName();
if (artist.Contains(name.ToLower()))
{
info = a;
break;
}
}
artists.Add(artist, info);
return info;
}
Dictionary<string, Artist> artists = new Dictionary<string, Artist>();
}
}
| mit |
gangadhar-kadam/sms-erpnext | patches/march_2013/p08_create_aii_accounts.py | 5 | 4022 | import webnotes
def execute():
webnotes.reload_doc("setup", "doctype", "company")
create_chart_of_accounts_if_not_exists()
add_group_accounts()
add_ledger_accounts()
add_aii_cost_center()
set_default_accounts()
def set_default_accounts():
for company in webnotes.conn.sql_list("select name from `tabCompany`"):
webnotes.get_obj("Company", company).set_default_accounts()
def _check(parent_account, company):
def _get_root(is_pl_account, debit_or_credit):
res = webnotes.conn.sql("""select name from `tabAccount`
where company=%s and is_pl_account = %s and debit_or_credit = %s
and ifnull(parent_account, "") ="" """, (company, is_pl_account, debit_or_credit))
return res and res[0][0] or None
if not webnotes.conn.exists("Account", parent_account):
if parent_account.startswith("Current Assets"):
parent_account = _get_root("No", "Debit")
elif parent_account.startswith("Direct Expenses"):
parent_account = _get_root("Yes", "Debit")
elif parent_account.startswith("Current Liabilities"):
parent_account = _get_root("No", "Credit")
return parent_account
def add_group_accounts():
accounts_to_add = [
["Stock Assets", "Current Assets", "Group", ""],
["Stock Expenses", "Direct Expenses", "Group", "Expense Account"],
["Stock Liabilities", "Current Liabilities", "Group", ""],
]
add_accounts(accounts_to_add, _check)
def add_ledger_accounts():
accounts_to_add = [
["Stock In Hand", "Stock Assets", "Ledger", ""],
["Cost of Goods Sold", "Stock Expenses", "Ledger", "Expense Account"],
["Stock Adjustment", "Stock Expenses", "Ledger", "Expense Account"],
["Expenses Included In Valuation", "Stock Expenses", "Ledger", "Expense Account"],
["Stock Received But Not Billed", "Stock Liabilities", "Ledger", ""],
]
add_accounts(accounts_to_add)
def add_accounts(accounts_to_add, check_fn=None):
for company, abbr in webnotes.conn.sql("""select name, abbr from `tabCompany`"""):
count = webnotes.conn.sql("""select count(name) from `tabAccount`
where company=%s and ifnull(parent_account, '')=''""", company)[0][0]
if count > 4:
webnotes.errprint("Company" + company +
"has more than 4 root accounts. cannot apply patch to this company.")
continue
for account_name, parent_account_name, group_or_ledger, account_type in accounts_to_add:
if not webnotes.conn.sql("""select name from `tabAccount` where account_name = %s
and company = %s""", (account_name, company)):
parent_account = "%s - %s" % (parent_account_name, abbr)
if check_fn:
parent_account = check_fn(parent_account, company)
account = webnotes.bean({
"doctype": "Account",
"account_name": account_name,
"parent_account": parent_account,
"group_or_ledger": group_or_ledger,
"account_type": account_type,
"company": company
})
account.insert()
def add_aii_cost_center():
for company, abbr in webnotes.conn.sql("""select name, abbr from `tabCompany`"""):
if not webnotes.conn.sql("""select name from `tabCost Center` where cost_center_name =
'Auto Inventory Accounting' and company_name = %s""", company):
parent_cost_center = webnotes.conn.get_value("Cost Center",
{"parent_cost_center['']": '', "company_name": company})
if not parent_cost_center:
webnotes.errprint("Company " + company + "does not have a root cost center")
continue
cc = webnotes.bean({
"doctype": "Cost Center",
"cost_center_name": "Auto Inventory Accounting",
"parent_cost_center": parent_cost_center,
"group_or_ledger": "Ledger",
"company_name": company
})
cc.insert()
def create_chart_of_accounts_if_not_exists():
for company in webnotes.conn.sql("select name from `tabCompany`"):
if not webnotes.conn.sql("select * from `tabAccount` where company = %s", company[0]):
webnotes.conn.sql("""update `tabCompany` set receivables_group = '',
payables_group = '' where name = %s""", company[0])
webnotes.bean("Company", company[0]).save()
| agpl-3.0 |
CodeForAfrica/grano | grano/plugins.py | 4 | 2079 | import logging
from stevedore.enabled import EnabledExtensionManager
from grano.core import app
log = logging.getLogger(__name__)
NAMESPACES = [
'grano.startup',
'grano.periodic',
'grano.entity.change',
'grano.relation.change',
'grano.schema.change',
'grano.project.change'
]
PLUGINS = {'LOADED': False, 'MANAGERS': {}}
def _get_manager(namespace):
default_plugins = app.config.get('DEFAULT_PLUGINS', [])
enabled_plugins = app.config.get('PLUGINS', []) + default_plugins
available_plugins = set()
assert namespace in NAMESPACES, \
'%s not one of %r' % (namespace, NAMESPACES)
if not PLUGINS['LOADED']:
for namespace_ in NAMESPACES:
def check_func(ext):
available_plugins.add(ext.name)
return ext.name in enabled_plugins
mgr = EnabledExtensionManager(
namespace=namespace_,
check_func=check_func,
propagate_map_exceptions=False,
invoke_on_load=True)
PLUGINS['MANAGERS'][namespace_] = mgr
PLUGINS['LOADED'] = True
log.info("Enabled: %s", ", ".join(sorted(enabled_plugins)))
log.info("Available: %s", ", ".join(sorted(available_plugins)))
return PLUGINS['MANAGERS'][namespace]
def notify_plugins(namespace, callback):
""" Notify all active plugins about an event in a given namespace.
The second argument is a function, it'll be called once with each
plugin instance which is available. """
try:
mgr = _get_manager(namespace)
mgr.map(lambda ext, data: callback(ext.obj), None)
except RuntimeError:
pass
def list_plugins():
""" List all available plugins, grouped by the namespace in which
they're made available. """
plugins = {}
for namespace in NAMESPACES:
plugins[namespace] = []
mgr = _get_manager(namespace)
try:
mgr.map(lambda e, d: plugins[namespace].append(e.name), None)
except RuntimeError:
pass
return plugins
| mit |
40323210/bg6_cdw11 | static/plugin/liquid_tags/test_notebook.py | 311 | 3042 | import re
from pelican.tests.support import unittest
from . import notebook
class TestNotebookTagRegex(unittest.TestCase):
def get_argdict(self, markup):
match = notebook.FORMAT.search(markup)
if match:
argdict = match.groupdict()
src = argdict['src']
start = argdict['start']
end = argdict['end']
language = argdict['language']
return src, start, end, language
return None
def test_basic_notebook_tag(self):
markup = u'path/to/thing.ipynb'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertIsNone(language)
def test_basic_notebook_tag_insensitive_to_whitespace(self):
markup = u' path/to/thing.ipynb '
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertIsNone(language)
def test_notebook_tag_with_cells(self):
markup = u'path/to/thing.ipynb cells[1:5]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertIsNone(language)
def test_notebook_tag_with_alphanumeric_language(self):
markup = u'path/to/thing.ipynb language[python3]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertEqual(language, u'python3')
def test_notebook_tag_with_symbol_in_name_language(self):
for short_name in [u'c++', u'cpp-objdump', u'c++-objdumb', u'cxx-objdump']:
markup = u'path/to/thing.ipynb language[{}]'.format(short_name)
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertEqual(language, short_name)
def test_notebook_tag_with_language_and_cells(self):
markup = u'path/to/thing.ipynb cells[1:5] language[julia]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertEqual(language, u'julia')
def test_notebook_tag_with_language_and_cells_and_weird_spaces(self):
markup = u' path/to/thing.ipynb cells[1:5] language[julia] '
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertEqual(language, u'julia')
if __name__ == '__main__':
unittest.main() | agpl-3.0 |
mne-tools/mne-tools.github.io | 0.21/_downloads/4bd3132f565f8eeb8f92269a858f1f3f/plot_source_alignment.py | 2 | 17152 | # -*- coding: utf-8 -*-
"""
.. _plot_source_alignment:
Source alignment and coordinate frames
======================================
This tutorial shows how to visually assess the spatial alignment of MEG sensor
locations, digitized scalp landmark and sensor locations, and MRI volumes. This
alignment process is crucial for computing the forward solution, as is
understanding the different coordinate frames involved in this process.
.. contents:: Page contents
:local:
:depth: 2
Let's start out by loading some data.
"""
import os.path as op
import numpy as np
import nibabel as nib
from scipy import linalg
import mne
from mne.io.constants import FIFF
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
raw = mne.io.read_raw_fif(raw_fname)
trans = mne.read_trans(trans_fname)
src = mne.read_source_spaces(op.join(subjects_dir, 'sample', 'bem',
'sample-oct-6-src.fif'))
# load the T1 file and change the header information to the correct units
t1w = nib.load(op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz'))
t1w = nib.Nifti1Image(t1w.dataobj, t1w.affine)
t1w.header['xyzt_units'] = np.array(10, dtype='uint8')
t1_mgh = nib.MGHImage(t1w.dataobj, t1w.affine)
###############################################################################
# .. raw:: html
#
# <style>
# .pink {color:DarkSalmon; font-weight:bold}
# .blue {color:DeepSkyBlue; font-weight:bold}
# .gray {color:Gray; font-weight:bold}
# .magenta {color:Magenta; font-weight:bold}
# .purple {color:Indigo; font-weight:bold}
# .green {color:LimeGreen; font-weight:bold}
# .red {color:Red; font-weight:bold}
# </style>
#
# .. role:: pink
# .. role:: blue
# .. role:: gray
# .. role:: magenta
# .. role:: purple
# .. role:: green
# .. role:: red
#
#
# Understanding coordinate frames
# -------------------------------
# For M/EEG source imaging, there are three **coordinate frames** must be
# brought into alignment using two 3D `transformation matrices <wiki_xform_>`_
# that define how to rotate and translate points in one coordinate frame
# to their equivalent locations in another. The three main coordinate frames
# are:
#
# * :blue:`"meg"`: the coordinate frame for the physical locations of MEG
# sensors
# * :gray:`"mri"`: the coordinate frame for MRI images, and scalp/skull/brain
# surfaces derived from the MRI images
# * :pink:`"head"`: the coordinate frame for digitized sensor locations and
# scalp landmarks ("fiducials")
#
#
# Each of these are described in more detail in the next section.
#
# A good way to start visualizing these coordinate frames is to use the
# `mne.viz.plot_alignment` function, which is used for creating or inspecting
# the transformations that bring these coordinate frames into alignment, and
# displaying the resulting alignment of EEG sensors, MEG sensors, brain
# sources, and conductor models. If you provide ``subjects_dir`` and
# ``subject`` parameters, the function automatically loads the subject's
# Freesurfer MRI surfaces. Important for our purposes, passing
# ``show_axes=True`` to `~mne.viz.plot_alignment` will draw the origin of each
# coordinate frame in a different color, with axes indicated by different sized
# arrows:
#
# * shortest arrow: (**R**)ight / X
# * medium arrow: forward / (**A**)nterior / Y
# * longest arrow: up / (**S**)uperior / Z
#
# Note that all three coordinate systems are **RAS** coordinate frames and
# hence are also `right-handed`_ coordinate systems. Finally, note that the
# ``coord_frame`` parameter sets which coordinate frame the camera
# should initially be aligned with. Let's take a look:
fig = mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
subjects_dir=subjects_dir, surfaces='head-dense',
show_axes=True, dig=True, eeg=[], meg='sensors',
coord_frame='meg')
mne.viz.set_3d_view(fig, 45, 90, distance=0.6, focalpoint=(0., 0., 0.))
print('Distance from head origin to MEG origin: %0.1f mm'
% (1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3])))
print('Distance from head origin to MRI origin: %0.1f mm'
% (1000 * np.linalg.norm(trans['trans'][:3, 3])))
dists = mne.dig_mri_distances(raw.info, trans, 'sample',
subjects_dir=subjects_dir)
print('Distance from %s digitized points to head surface: %0.1f mm'
% (len(dists), 1000 * np.mean(dists)))
###############################################################################
# Coordinate frame definitions
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# 1. Neuromag/Elekta/MEGIN head coordinate frame ("head", :pink:`pink axes`)
# The head coordinate frame is defined through the coordinates of
# anatomical landmarks on the subject's head: usually the Nasion (`NAS`_),
# and the left and right preauricular points (`LPA`_ and `RPA`_).
# Different MEG manufacturers may have different definitions of the head
# coordinate frame. A good overview can be seen in the
# `FieldTrip FAQ on coordinate systems`_.
#
# For Neuromag/Elekta/MEGIN, the head coordinate frame is defined by the
# intersection of
#
# 1. the line between the LPA (:red:`red sphere`) and RPA
# (:purple:`purple sphere`), and
# 2. the line perpendicular to this LPA-RPA line one that goes through
# the Nasion (:green:`green sphere`).
#
# The axes are oriented as **X** origin→RPA, **Y** origin→NAS,
# **Z** origin→upward (orthogonal to X and Y).
#
# .. note:: The required 3D coordinates for defining the head coordinate
# frame (NAS, LPA, RPA) are measured at a stage separate from
# the MEG data recording. There exist numerous devices to
# perform such measurements, usually called "digitizers". For
# example, see the devices by the company `Polhemus`_.
#
# 2. MEG device coordinate frame ("meg", :blue:`blue axes`)
# The MEG device coordinate frame is defined by the respective MEG
# manufacturers. All MEG data is acquired with respect to this coordinate
# frame. To account for the anatomy and position of the subject's head, we
# use so-called head position indicator (HPI) coils. The HPI coils are
# placed at known locations on the scalp of the subject and emit
# high-frequency magnetic fields used to coregister the head coordinate
# frame with the device coordinate frame.
#
# From the Neuromag/Elekta/MEGIN user manual:
#
# The origin of the device coordinate system is located at the center
# of the posterior spherical section of the helmet with X axis going
# from left to right and Y axis pointing front. The Z axis is, again
# normal to the plane with positive direction up.
#
# .. note:: The HPI coils are shown as :magenta:`magenta spheres`.
# Coregistration happens at the beginning of the recording and
# the head↔meg transformation matrix is stored in
# ``raw.info['dev_head_t']``.
#
# 3. MRI coordinate frame ("mri", :gray:`gray axes`)
# Defined by Freesurfer, the "MRI surface RAS" coordinate frame has its
# origin at the center of a 256×256×256 1mm anisotropic volume (though the
# center may not correspond to the anatomical center of the subject's
# head).
#
# .. note:: We typically align the MRI coordinate frame to the head
# coordinate frame through a
# `rotation and translation matrix <wiki_xform_>`_,
# that we refer to in MNE as ``trans``.
#
# A bad example
# ^^^^^^^^^^^^^
# Let's try using `~mne.viz.plot_alignment` with ``trans=None``, which
# (incorrectly!) equates the MRI and head coordinate frames.
mne.viz.plot_alignment(raw.info, trans=None, subject='sample', src=src,
subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
###############################################################################
# A good example
# ^^^^^^^^^^^^^^
# Here is the same plot, this time with the ``trans`` properly defined
# (using a precomputed transformation matrix).
mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
src=src, subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
###############################################################################
# Visualizing the transformations
# -------------------------------
# Let's visualize these coordinate frames using just the scalp surface; this
# will make it easier to see their relative orientations. To do this we'll
# first load the Freesurfer scalp surface, then apply a few different
# transforms to it. In addition to the three coordinate frames discussed above,
# we'll also show the "mri_voxel" coordinate frame. Unlike MRI Surface RAS,
# "mri_voxel" has its origin in the corner of the volume (the left-most,
# posterior-most coordinate on the inferior-most MRI slice) instead of at the
# center of the volume. "mri_voxel" is also **not** an RAS coordinate system:
# rather, its XYZ directions are based on the acquisition order of the T1 image
# slices.
# the head surface is stored in "mri" coordinate frame
# (origin at center of volume, units=mm)
seghead_rr, seghead_tri = mne.read_surface(
op.join(subjects_dir, 'sample', 'surf', 'lh.seghead'))
# to put the scalp in the "head" coordinate frame, we apply the inverse of
# the precomputed `trans` (which maps head → mri)
mri_to_head = linalg.inv(trans['trans'])
scalp_pts_in_head_coord = mne.transforms.apply_trans(
mri_to_head, seghead_rr, move=True)
# to put the scalp in the "meg" coordinate frame, we use the inverse of
# raw.info['dev_head_t']
head_to_meg = linalg.inv(raw.info['dev_head_t']['trans'])
scalp_pts_in_meg_coord = mne.transforms.apply_trans(
head_to_meg, scalp_pts_in_head_coord, move=True)
# The "mri_voxel"→"mri" transform is embedded in the header of the T1 image
# file. We'll invert it and then apply it to the original `seghead_rr` points.
# No unit conversion necessary: this transform expects mm and the scalp surface
# is defined in mm.
vox_to_mri = t1_mgh.header.get_vox2ras_tkr()
mri_to_vox = linalg.inv(vox_to_mri)
scalp_points_in_vox = mne.transforms.apply_trans(
mri_to_vox, seghead_rr, move=True)
###############################################################################
# Now that we've transformed all the points, let's plot them. We'll use the
# same colors used by `~mne.viz.plot_alignment` and use :green:`green` for the
# "mri_voxel" coordinate frame:
def add_head(renderer, points, color, opacity=0.95):
renderer.mesh(*points.T, triangles=seghead_tri, color=color,
opacity=opacity)
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(600, 600), bgcolor='w', scene=False)
add_head(renderer, seghead_rr, 'gray')
add_head(renderer, scalp_pts_in_meg_coord, 'blue')
add_head(renderer, scalp_pts_in_head_coord, 'pink')
add_head(renderer, scalp_points_in_vox, 'green')
mne.viz.set_3d_view(figure=renderer.figure, distance=800,
focalpoint=(0., 30., 30.), elevation=105, azimuth=180)
renderer.show()
###############################################################################
# The relative orientations of the coordinate frames can be inferred by
# observing the direction of the subject's nose. Notice also how the origin of
# the :green:`mri_voxel` coordinate frame is in the corner of the volume
# (above, behind, and to the left of the subject), whereas the other three
# coordinate frames have their origin roughly in the center of the head.
#
# Example: MRI defacing
# ^^^^^^^^^^^^^^^^^^^^^
# For a real-world example of using these transforms, consider the task of
# defacing the MRI to preserve subject anonymity. If you know the points in
# the "head" coordinate frame (as you might if you're basing the defacing on
# digitized points) you would need to transform them into "mri" or "mri_voxel"
# in order to apply the blurring or smoothing operations to the MRI surfaces or
# images. Here's what that would look like (we'll use the nasion landmark as a
# representative example):
# get the nasion
nasion = [p for p in raw.info['dig'] if
p['kind'] == FIFF.FIFFV_POINT_CARDINAL and
p['ident'] == FIFF.FIFFV_POINT_NASION][0]
assert nasion['coord_frame'] == FIFF.FIFFV_COORD_HEAD
nasion = nasion['r'] # get just the XYZ values
# transform it from head to MRI space (recall that `trans` is head → mri)
nasion_mri = mne.transforms.apply_trans(trans, nasion, move=True)
# then transform to voxel space, after converting from meters to millimeters
nasion_vox = mne.transforms.apply_trans(
mri_to_vox, nasion_mri * 1e3, move=True)
# plot it to make sure the transforms worked
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(400, 400), bgcolor='w', scene=False)
add_head(renderer, scalp_points_in_vox, 'green', opacity=1)
renderer.sphere(center=nasion_vox, color='orange', scale=10)
mne.viz.set_3d_view(figure=renderer.figure, distance=600.,
focalpoint=(0., 125., 250.), elevation=45, azimuth=180)
renderer.show()
###############################################################################
# Defining the head↔MRI ``trans`` using the GUI
# ---------------------------------------------
# You can try creating the head↔MRI transform yourself using
# :func:`mne.gui.coregistration`.
#
# * First you must load the digitization data from the raw file
# (``Head Shape Source``). The MRI data is already loaded if you provide the
# ``subject`` and ``subjects_dir``. Toggle ``Always Show Head Points`` to see
# the digitization points.
# * To set the landmarks, toggle ``Edit`` radio button in ``MRI Fiducials``.
# * Set the landmarks by clicking the radio button (LPA, Nasion, RPA) and then
# clicking the corresponding point in the image.
# * After doing this for all the landmarks, toggle ``Lock`` radio button. You
# can omit outlier points, so that they don't interfere with the finetuning.
#
# .. note:: You can save the fiducials to a file and pass
# ``mri_fiducials=True`` to plot them in
# :func:`mne.viz.plot_alignment`. The fiducials are saved to the
# subject's bem folder by default.
# * Click ``Fit Head Shape``. This will align the digitization points to the
# head surface. Sometimes the fitting algorithm doesn't find the correct
# alignment immediately. You can try first fitting using LPA/RPA or fiducials
# and then align according to the digitization. You can also finetune
# manually with the controls on the right side of the panel.
# * Click ``Save As...`` (lower right corner of the panel), set the filename
# and read it with :func:`mne.read_trans`.
#
# For more information, see step by step instructions
# `in these slides
# <https://www.slideshare.net/mne-python/mnepython-coregistration>`_.
# Uncomment the following line to align the data yourself.
# mne.gui.coregistration(subject='sample', subjects_dir=subjects_dir)
###############################################################################
# .. _plot_source_alignment_without_mri:
#
# Alignment without MRI
# ---------------------
# The surface alignments above are possible if you have the surfaces available
# from Freesurfer. :func:`mne.viz.plot_alignment` automatically searches for
# the correct surfaces from the provided ``subjects_dir``. Another option is
# to use a :ref:`spherical conductor model <eeg_sphere_model>`. It is
# passed through ``bem`` parameter.
sphere = mne.make_sphere_model(info=raw.info, r0='auto', head_radius='auto')
src = mne.setup_volume_source_space(sphere=sphere, pos=10.)
mne.viz.plot_alignment(
raw.info, eeg='projected', bem=sphere, src=src, dig=True,
surfaces=['brain', 'outer_skin'], coord_frame='meg', show_axes=True)
###############################################################################
# It is also possible to use :func:`mne.gui.coregistration`
# to warp a subject (usually ``fsaverage``) to subject digitization data, see
# `these slides
# <https://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
#
# .. _right-handed: https://en.wikipedia.org/wiki/Right-hand_rule
# .. _wiki_xform: https://en.wikipedia.org/wiki/Transformation_matrix
# .. _NAS: https://en.wikipedia.org/wiki/Nasion
# .. _LPA: http://www.fieldtriptoolbox.org/faq/how_are_the_lpa_and_rpa_points_defined/ # noqa:E501
# .. _RPA: http://www.fieldtriptoolbox.org/faq/how_are_the_lpa_and_rpa_points_defined/ # noqa:E501
# .. _Polhemus: https://polhemus.com/scanning-digitizing/digitizing-products/
# .. _FieldTrip FAQ on coordinate systems: http://www.fieldtriptoolbox.org/faq/how_are_the_different_head_and_mri_coordinate_systems_defined/ # noqa:E501
| bsd-3-clause |
chainer/chainer | tests/chainerx_tests/unit_tests/routines_tests/test_creation.py | 3 | 46107 | import io
import sys
import tempfile
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
_array_params_list = [
-2,
1,
-1.5,
2.3,
True,
False,
numpy.array(1),
float('inf'),
float('nan'),
]
def _array_params(list):
return list + [
list,
[list, list],
(list, list),
tuple(list),
(tuple(list), tuple(list)),
[tuple(list), tuple(list)],
]
# Traverses the entries in `obj` recursively and returns `True` if all of the
# entries are finite numbers.
def _is_all_finite(obj):
if isinstance(obj, (tuple, list)):
return all(_is_all_finite(o) for o in obj)
else:
return numpy.isfinite(obj)
def _get_default_dtype(value):
if isinstance(value, bool):
return 'bool_'
if isinstance(value, int):
return 'int32'
if isinstance(value, float):
return 'float32'
assert False
# A special parameter object used to represent an unspecified argument.
class Unspecified(object):
pass
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('obj', _array_params(_array_params_list))
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None, Unspecified))
def test_array_from_tuple_or_list(xp, obj, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
# Skip nan/inf -> integer conversion that would cause a cast error.
if (not _is_all_finite(obj)
and dtype_spec not in (None, Unspecified)
and chainerx.dtype(dtype_spec).kind not in ('f', 'c')):
return chainerx.testing.ignore()
if dtype_spec is Unspecified:
return xp.array(obj)
else:
return xp.array(obj, dtype_spec)
@pytest.mark.parametrize('obj', _array_params(_array_params_list))
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_array_from_python_tuple_or_list_with_device(obj, device):
a = chainerx.array(obj, 'float32', device=device)
b = chainerx.array(obj, 'float32')
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
def _check_array_from_numpy_array(a_chx, a_np, device=None):
assert a_chx.offset == 0
array_utils.check_device(a_chx, device)
# recovered data should be equal
a_np_recovered = chainerx.to_numpy(a_chx)
chainerx.testing.assert_array_equal_ex(
a_chx, a_np_recovered, strides_check=False)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_array_from_numpy_array(xp, shape, dtype, device):
a_np = array_utils.create_dummy_ndarray(numpy, shape, dtype)
a_xp = xp.array(a_np)
if xp is chainerx:
_check_array_from_numpy_array(a_xp, a_np, device)
# test possibly freed memory
a_np_copy = a_np.copy()
del a_np
chainerx.testing.assert_array_equal_ex(
a_xp, a_np_copy, strides_check=False)
return a_xp
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_array_from_numpy_non_contiguous_array(xp, shape, dtype, device):
a_np = array_utils.create_dummy_ndarray(numpy, shape, dtype, padding=True)
a_xp = xp.array(a_np)
if xp is chainerx:
_check_array_from_numpy_array(a_xp, a_np, device)
# test possibly freed memory
a_np_copy = a_np.copy()
del a_np
chainerx.testing.assert_array_equal_ex(
a_xp, a_np_copy, strides_check=False)
return a_xp
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_array_from_numpy_positive_offset_array(xp, device):
a_np = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')[1, 1:]
a_xp = xp.array(a_np)
if xp is chainerx:
_check_array_from_numpy_array(a_xp, a_np, device)
# test possibly freed memory
a_np_copy = a_np.copy()
del a_np
chainerx.testing.assert_array_equal_ex(a_xp, a_np_copy)
return a_xp
def _array_from_numpy_array_with_dtype(xp, shape, src_dtype, dst_dtype_spec):
if xp is numpy and isinstance(dst_dtype_spec, chainerx.dtype):
dst_dtype_spec = dst_dtype_spec.name
t = array_utils.create_dummy_ndarray(numpy, shape, src_dtype)
return xp.array(t, dtype=dst_dtype_spec)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('src_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('dst_dtype', chainerx.testing.all_dtypes)
def test_array_from_numpy_array_with_dtype(
xp, shape, src_dtype, dst_dtype, device):
return _array_from_numpy_array_with_dtype(xp, shape, src_dtype, dst_dtype)
@chainerx.testing.numpy_chainerx_array_equal()
@chainerx.testing.parametrize_dtype_specifier(
'dst_dtype_spec', additional_args=(None,))
def test_array_from_numpy_array_with_dtype_spec(xp, shape, dst_dtype_spec):
return _array_from_numpy_array_with_dtype(
xp, shape, 'float32', dst_dtype_spec)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_array_from_numpy_array_with_device(shape, device):
orig = array_utils.create_dummy_ndarray(numpy, (2, ), 'float32')
a = chainerx.array(orig, device=device)
b = chainerx.array(orig)
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [True, False])
def test_array_from_chainerx_array(shape, dtype, copy, device):
t = array_utils.create_dummy_ndarray(chainerx, shape, dtype, device=device)
a = chainerx.array(t, copy=copy)
if not copy:
assert t is a
else:
assert t is not a
chainerx.testing.assert_array_equal_ex(a, t, strides_check=False)
assert a.device is t.device
assert a.is_contiguous
def _check_array_from_chainerx_array_with_dtype(
shape, src_dtype, dst_dtype_spec, copy, device=None):
t = array_utils.create_dummy_ndarray(
chainerx, shape, src_dtype, device=device)
a = chainerx.array(t, dtype=dst_dtype_spec, copy=copy)
src_dtype = chainerx.dtype(src_dtype)
dst_dtype = src_dtype if dst_dtype_spec is None else chainerx.dtype(
dst_dtype_spec)
device = chainerx.get_device(device)
if (not copy
and src_dtype == dst_dtype
and device is chainerx.get_default_device()):
assert t is a
else:
assert t is not a
chainerx.testing.assert_array_equal_ex(a, t.astype(dst_dtype))
assert a.dtype == dst_dtype
assert a.device is chainerx.get_default_device()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('src_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('dst_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('copy', [True, False])
def test_array_from_chainerx_array_with_dtype(
shape, src_dtype, dst_dtype, copy, device):
_check_array_from_chainerx_array_with_dtype(
shape, src_dtype, dst_dtype, copy, device)
@chainerx.testing.parametrize_dtype_specifier(
'dst_dtype_spec', additional_args=(None,))
@pytest.mark.parametrize('copy', [True, False])
def test_array_from_chainerx_array_with_dtype_spec(
shape, dst_dtype_spec, copy):
_check_array_from_chainerx_array_with_dtype(
shape, 'float32', dst_dtype_spec, copy)
@pytest.mark.parametrize('src_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('dst_dtype', chainerx.testing.all_dtypes + (None, ))
@pytest.mark.parametrize('copy', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize(
'dst_device_spec',
[None, 'native:1', chainerx.get_device('native:1'), 'native:0'])
def test_array_from_chainerx_array_with_device(
src_dtype, dst_dtype, copy, device, dst_device_spec):
t = array_utils.create_dummy_ndarray(
chainerx, (2,), src_dtype, device=device)
a = chainerx.array(t, dtype=dst_dtype, copy=copy, device=dst_device_spec)
dst_device = chainerx.get_device(dst_device_spec)
if (not copy
and (dst_dtype is None or src_dtype == dst_dtype)
and (dst_device_spec is None or device is dst_device)):
assert t is a
else:
assert t is not a
if dst_dtype is None:
dst_dtype = t.dtype
chainerx.testing.assert_array_equal_ex(
a, t.to_device(dst_device).astype(dst_dtype))
assert a.dtype == chainerx.dtype(dst_dtype)
assert a.device is dst_device
def test_asarray_from_python_tuple_or_list():
obj = _array_params_list
a = chainerx.asarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
def test_asarray_from_numpy_array_with_zero_copy():
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'float32', padding=True)
obj_refcount_before = sys.getrefcount(obj)
a = chainerx.asarray(obj, dtype='float32')
assert sys.getrefcount(obj) == obj_refcount_before + 1
chainerx.testing.assert_array_equal_ex(obj, a)
# test buffer is shared (zero copy)
a += a
chainerx.testing.assert_array_equal_ex(obj, a)
# test possibly freed memory
obj_copy = obj.copy()
del obj
chainerx.testing.assert_array_equal_ex(obj_copy, a, strides_check=False)
# test possibly freed memory (the other way)
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'float32', padding=True)
a = chainerx.asarray(obj, dtype='float32')
a_copy = a.copy()
del a
chainerx.testing.assert_array_equal_ex(a_copy, obj, strides_check=False)
def test_asarray_from_numpy_array_with_copy():
obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
a = chainerx.asarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
# test buffer is not shared
a += a
assert not numpy.array_equal(obj, chainerx.to_numpy(a))
@pytest.mark.parametrize('dtype', ['int32', 'float32'])
def test_asarray_from_chainerx_array(dtype):
obj = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'int32')
a = chainerx.asarray(obj, dtype=dtype)
if a.dtype == obj.dtype:
assert a is obj
else:
assert a is not obj
e = chainerx.array(obj, dtype=dtype, copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_asarray_with_device(device):
a = chainerx.asarray([0, 1], 'float32', device)
b = chainerx.asarray([0, 1], 'float32')
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@pytest.mark.parametrize('src_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('dst_dtype', chainerx.testing.all_dtypes + (None, ))
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize(
'dst_device_spec',
[None, 'native:1', chainerx.get_device('native:1'), 'native:0'])
def test_asarray_from_chainerx_array_with_device(
src_dtype, dst_dtype, device, dst_device_spec):
t = array_utils.create_dummy_ndarray(
chainerx, (2,), src_dtype, device=device)
a = chainerx.asarray(t, dtype=dst_dtype, device=dst_device_spec)
dst_device = chainerx.get_device(dst_device_spec)
if ((dst_dtype is None or src_dtype == dst_dtype)
and (dst_device_spec is None or device is dst_device)):
assert t is a
else:
assert t is not a
if dst_dtype is None:
dst_dtype = t.dtype
chainerx.testing.assert_array_equal_ex(
a, t.to_device(dst_device).astype(dst_dtype))
assert a.dtype == chainerx.dtype(dst_dtype)
assert a.device is dst_device
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('padding', [False, True])
def test_ascontiguousarray_from_chainerx_array(device, shape, dtype, padding):
np_arr = array_utils.create_dummy_ndarray(
numpy, shape, dtype, padding=padding)
obj = chainerx.testing._fromnumpy(np_arr, keepstrides=True, device=device)
a = chainerx.ascontiguousarray(obj)
if not padding and shape != (): # () will be reshaped to (1,)
assert a is obj
e = np_arr
chainerx.testing.assert_array_equal_ex(e, a, strides_check=False)
assert a.is_contiguous
assert e.dtype.name == a.dtype.name
def test_ascontiguousarray_from_chainerx_array_device():
with chainerx.using_device(chainerx.get_device('native:0')):
dev = chainerx.get_device('native:1') # Non default one
assert chainerx.get_default_device() is not dev
a = chainerx.arange(10, device=dev)
b = chainerx.ascontiguousarray(a)
assert b.is_contiguous is True
assert b.device is dev
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('padding', [False, True])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_ascontiguousarray_with_dtype(xp, device, shape, padding, dtype_spec):
obj = array_utils.create_dummy_ndarray(xp, shape, 'int32', padding=padding)
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
a = xp.ascontiguousarray(obj, dtype=dtype_spec)
if xp is chainerx:
assert a.is_contiguous
return a
def test_asanyarray_from_python_tuple_or_list():
obj = _array_params_list
a = chainerx.asanyarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
def test_asanyarray_from_numpy_array():
obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
a = chainerx.asanyarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
def test_asanyarray_from_numpy_subclass_array():
class Subclass(numpy.ndarray):
pass
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'int32').view(Subclass)
a = chainerx.asanyarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
@pytest.mark.parametrize('dtype', ['int32', 'float32'])
def test_asanyarray_from_chainerx_array(dtype):
obj = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'int32')
a = chainerx.asanyarray(obj, dtype=dtype)
if a.dtype == obj.dtype:
assert a is obj
else:
assert a is not obj
e = chainerx.array(obj, dtype=dtype, copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_asanyarray_with_device(device):
a = chainerx.asanyarray([0, 1], 'float32', device)
b = chainerx.asanyarray([0, 1], 'float32')
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None, Unspecified))
def test_empty(xp, shape_as_sequence_or_int, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if dtype_spec is Unspecified:
a = xp.empty(shape_as_sequence_or_int)
else:
a = xp.empty(shape_as_sequence_or_int, dtype_spec)
a.fill(0)
if dtype_spec in (None, Unspecified):
a = dtype_utils.cast_if_numpy_array(xp, a, 'float32')
return a
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_empty_with_device(device):
a = chainerx.empty((2,), 'float32', device)
b = chainerx.empty((2,), 'float32')
array_utils.check_device(a, device)
assert a.dtype == b.dtype
assert a.shape == b.shape
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_empty_like(xp, shape, dtype, device):
t = xp.empty(shape, dtype)
a = xp.empty_like(t)
a.fill(0)
return a
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_empty_like_with_device(device):
t = chainerx.empty((2,), 'float32')
a = chainerx.empty_like(t, device)
b = chainerx.empty_like(t)
array_utils.check_device(a, device)
assert a.dtype == b.dtype
assert a.shape == b.shape
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None, Unspecified))
def test_zeros(xp, shape_as_sequence_or_int, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if dtype_spec is Unspecified:
out = xp.zeros(shape_as_sequence_or_int)
else:
out = xp.zeros(shape_as_sequence_or_int, dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_zeros_with_device(device):
a = chainerx.zeros((2,), 'float32', device=device)
b = chainerx.zeros((2,), 'float32')
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_zeros_like(xp, shape, dtype, device):
t = xp.empty(shape, dtype)
return xp.zeros_like(t)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_zeros_like_with_device(device):
t = chainerx.empty((2,), 'float32')
a = chainerx.zeros_like(t, device)
b = chainerx.zeros_like(t)
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None, Unspecified))
def test_ones(xp, shape_as_sequence_or_int, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if dtype_spec is Unspecified:
out = xp.ones(shape_as_sequence_or_int)
else:
out = xp.ones(shape_as_sequence_or_int, dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_ones_with_device(device):
a = chainerx.ones((2,), 'float32', device)
b = chainerx.ones((2,), 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_ones_like(xp, shape, dtype, device):
t = xp.empty(shape, dtype)
return xp.ones_like(t)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_ones_like_with_device(shape, device):
t = chainerx.empty((2,), 'float32')
a = chainerx.ones_like(t, device)
b = chainerx.ones_like(t)
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize(
'value', [True, False, -2, 0, 1, 2, 2.3, float('inf'), float('nan')])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_full(xp, shape_as_sequence_or_int, value, device):
out = xp.full(shape_as_sequence_or_int, value)
return dtype_utils.cast_if_numpy_array(xp, out, _get_default_dtype(value))
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize(
'value', [True, False, -2, 0, 1, 2, 2.3, float('inf'), float('nan')])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_full_with_dtype(xp, shape, dtype_spec, value, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
return xp.full(shape, value, dtype_spec)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_full_with_device(device):
a = chainerx.full((2,), 1, 'float32', device)
b = chainerx.full((2,), 1, 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize(
'value', [True, False, -2, 0, 1, 2, 2.3, float('inf'), float('nan')])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_full_like(xp, shape, dtype, value, device):
t = xp.empty(shape, dtype)
return xp.full_like(t, value)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_full_like_with_device(device):
t = chainerx.empty((2,), 'float32')
a = chainerx.full_like(t, 1, device)
b = chainerx.full_like(t, 1)
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
def _is_bool_spec(dtype_spec):
# Used in arange tests
if dtype_spec is None:
return False
return chainerx.dtype(dtype_spec) == chainerx.bool_
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('stop', [-2, 0, 0.1, 3, 3.2, False, True])
@pytest.mark.parametrize_device(['native:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None,))
def test_arange_stop(xp, stop, dtype_spec, device):
# TODO(hvy): xp.arange(True) should return an ndarray of type int64
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
# Checked in test_invalid_arange_too_long_bool
if _is_bool_spec(dtype_spec) and stop > 2:
return chainerx.testing.ignore()
if isinstance(stop, bool) and dtype_spec is None:
# TODO(niboshi): This pattern needs dtype promotion.
return chainerx.testing.ignore()
out = xp.arange(stop, dtype=dtype_spec)
if dtype_spec in (None, Unspecified):
expected_dtype = _get_default_dtype(stop)
out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype)
return out
@chainerx.testing.numpy_chainerx_allclose(
atol=1e-7, float16_rtol=5e-3, float16_atol=5e-3)
@pytest.mark.parametrize('start,stop', [
(0, 0),
(0, 3),
(-3, 2),
(2, 0),
(-2.2, 3.4),
(True, True),
(False, False),
(True, False),
(False, True),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None,))
def test_arange_start_stop(xp, start, stop, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
# Checked in test_invalid_arange_too_long_bool
if _is_bool_spec(dtype_spec) and abs(stop - start) > 2:
return chainerx.testing.ignore()
if ((isinstance(start, bool)
or isinstance(stop, bool))
and dtype_spec is None):
# TODO(niboshi): This pattern needs dtype promotion.
return chainerx.testing.ignore()
out = xp.arange(start, stop, dtype=dtype_spec)
if dtype_spec in (None, Unspecified):
expected_dtype = _get_default_dtype(stop)
out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype)
return out
@chainerx.testing.numpy_chainerx_allclose(float16_rtol=1e-3)
@pytest.mark.parametrize('start,stop,step', [
(0, 3, 1),
(0, 0, 2),
(0, 1, 2),
(3, -1, -2),
(-1, 3, -2),
(3., 2., 1.2),
(2., -1., 1.),
(1, 4, -1.2),
# (4, 1, -1.2), # TODO(niboshi): Fix it (or maybe NumPy bug?)
(False, True, True),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None,))
def test_arange_start_stop_step(xp, start, stop, step, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
# Checked in test_invalid_arange_too_long_bool
if _is_bool_spec(dtype_spec) and abs((stop - start) / step) > 2:
return chainerx.testing.ignore()
if ((isinstance(start, bool)
or isinstance(stop, bool)
or isinstance(step, bool))
and dtype_spec is None):
# TODO(niboshi): This pattern needs dtype promotion.
return chainerx.testing.ignore()
out = xp.arange(start, stop, step, dtype=dtype_spec)
if dtype_spec in (None, Unspecified):
expected_dtype = _get_default_dtype(step)
out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype)
return out
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_arange_with_device(device):
def check(*args, **kwargs):
a = chainerx.arange(*args, device=device, **kwargs)
b = chainerx.arange(*args, **kwargs)
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
check(3)
check(3, dtype='float32')
check(0, 3)
check(0, 3, dtype='float32')
check(0, 3, 2)
check(0, 3, 2, dtype='float32')
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_arange_invalid_too_long_bool(device):
def check(xp, err):
with pytest.raises(err):
xp.arange(3, dtype='bool_')
with pytest.raises(err):
xp.arange(1, 4, 1, dtype='bool_')
# Should not raise since the size is <= 2.
xp.arange(1, 4, 2, dtype='bool_')
check(chainerx, chainerx.DtypeError)
check(numpy, ValueError)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_arange_invalid_zero_step(device):
def check(xp, err):
with pytest.raises(err):
xp.arange(1, 3, 0)
check(chainerx, chainerx.ChainerxError)
check(numpy, ZeroDivisionError)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
@pytest.mark.parametrize('n', [0, 1, 2, 257])
def test_identity(xp, n, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
out = xp.identity(n, dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_identity_with_device(device):
a = chainerx.identity(3, 'float32', device)
b = chainerx.identity(3, 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError))
@pytest.mark.parametrize('device', ['native:0', 'native:0'])
def test_identity_invalid_negative_n(xp, device):
xp.identity(-1, 'float32')
@chainerx.testing.numpy_chainerx_array_equal(accept_error=(TypeError,))
@pytest.mark.parametrize('device', ['native:1', 'native:0'])
def test_identity_invalid_n_type(xp, device):
xp.identity(3.0, 'float32')
# TODO(hvy): Add tests with non-ndarray but array-like inputs when supported.
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('N,M,k', [
(0, 0, 0),
(0, 0, 1),
(2, 1, -2),
(2, 1, -1),
(2, 1, 0),
(2, 1, 1),
(2, 1, 2),
(3, 4, -4),
(3, 4, -1),
(3, 4, 1),
(3, 4, 4),
(6, 3, 1),
(6, 3, -1),
(3, 6, 1),
(3, 6, -1),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_eye(xp, N, M, k, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
out = xp.eye(N, M, k, dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('N,M,k', [
(3, None, 1),
(3, 4, None),
(3, None, None),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_eye_with_default(xp, N, M, k, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if M is None and k is None:
return xp.eye(N, dtype=dtype_spec)
elif M is None:
return xp.eye(N, k=k, dtype=dtype_spec)
elif k is None:
return xp.eye(N, M=M, dtype=dtype_spec)
assert False
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_eye_with_device(device):
a = chainerx.eye(1, 2, 1, 'float32', device)
b = chainerx.eye(1, 2, 1, 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError))
@pytest.mark.parametrize('N,M', [
(-1, 2),
(1, -1),
(-2, -1),
])
@pytest.mark.parametrize('device', ['native:0', 'native:0'])
def test_eye_invalid_negative_N_M(xp, N, M, device):
xp.eye(N, M, 1, 'float32')
@chainerx.testing.numpy_chainerx_array_equal(accept_error=(TypeError,))
@pytest.mark.parametrize('N,M,k', [
(1.0, 2, 1),
(2, 1.0, 1),
(2, 3, 1.0),
(2.0, 1.0, 1),
])
@pytest.mark.parametrize('device', ['native:1', 'native:0'])
def test_eye_invalid_NMk_type(xp, N, M, k, device):
xp.eye(N, M, k, 'float32')
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('k', [0, -2, -1, 1, 2, -5, 4])
@pytest.mark.parametrize('shape', [(4,), (2, 3), (6, 5)])
@pytest.mark.parametrize('transpose', [False, True])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_diag(xp, k, shape, transpose, device):
v = xp.arange(array_utils.total_size(shape), dtype='int32').reshape(shape)
if transpose: # Test non-contiguous inputs for multi-dimensional shapes.
v = v.T
return xp.diag(v, k)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError))
@pytest.mark.parametrize('k', [0, -2, -1, 1, 2, -5, 4])
@pytest.mark.parametrize('shape', [(), (2, 1, 2), (2, 0, 1)])
@pytest.mark.parametrize('device', ['native:1', 'native:0'])
def test_diag_invalid_ndim(xp, k, shape, device):
v = xp.arange(array_utils.total_size(shape), dtype='int32').reshape(shape)
return xp.diag(v, k)
# TODO(hvy): Add tests with non-ndarray but array-like inputs when supported.
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('k', [0, -2, -1, 1, 2, -5, 4])
@pytest.mark.parametrize('shape', [(), (4,), (2, 3), (6, 5), (2, 0)])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_diagflat(xp, k, shape, device):
v = xp.arange(array_utils.total_size(shape), dtype='int32').reshape(shape)
return xp.diagflat(v, k)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError))
@pytest.mark.parametrize('k', [0, -2, -1, 1, 2, -5, 4])
@pytest.mark.parametrize('shape', [(2, 1, 2), (2, 0, 1)])
@pytest.mark.parametrize('device', ['native:1', 'native:0'])
def test_diagflat_invalid_ndim(xp, k, shape, device):
v = xp.arange(array_utils.total_size(shape), dtype='int32').reshape(shape)
return xp.diagflat(v, k)
@chainerx.testing.numpy_chainerx_allclose(float16_rtol=1e-3)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('start,stop', [
(0, 0),
(0, 1),
(1, 0),
(-1, 0),
(0, -1),
(1, -1),
(-13.3, 352.5),
(13.3, -352.5),
])
@pytest.mark.parametrize('num', [0, 1, 2, 257])
@pytest.mark.parametrize('endpoint', [True, False])
@pytest.mark.parametrize('range_type', [float, int])
def test_linspace(xp, start, stop, num, endpoint, range_type, dtype, device):
start = range_type(start)
stop = range_type(stop)
return xp.linspace(start, stop, num, endpoint=endpoint, dtype=dtype)
# Check only for closeness to numpy not the dtype
# as the default float of numpy and chainerx may differ.
@chainerx.testing.numpy_chainerx_allclose(dtype_check=False, float16_rtol=1e-7)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('start,stop', [
(0, 0),
(0, 1),
(1, 0),
(-1, 0),
(0, -1),
(1, -1),
(-13.3, 352.5),
(13.3, -352.5),
])
@pytest.mark.parametrize('num', [0, 1, 2, 257])
@pytest.mark.parametrize('endpoint', [True, False])
@pytest.mark.parametrize('range_type', [float, int])
def test_linspace_default_dtype(xp, start, stop, num, endpoint,
range_type, device):
start = range_type(start)
stop = range_type(stop)
return xp.linspace(start, stop, num, endpoint=endpoint)
@chainerx.testing.numpy_chainerx_allclose()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_linspace_dtype_spec(xp, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
return xp.linspace(3, 5, 10, dtype=dtype_spec)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_linspace_with_device(device):
a = chainerx.linspace(3, 5, 10, dtype='float32', device=device)
b = chainerx.linspace(3, 5, 10, dtype='float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.ChainerxError))
@pytest.mark.parametrize('device', ['native:0', 'native:0'])
def test_linspace_invalid_num(xp, device):
xp.linspace(2, 4, -1)
@pytest.mark.parametrize_device(['native:0'])
def test_frombuffer_from_numpy_array(device):
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'int32', padding=False)
a_chx = chainerx.frombuffer(obj, obj.dtype)
a_np = numpy.frombuffer(obj, obj.dtype)
chainerx.testing.assert_array_equal_ex(a_np, a_chx)
assert a_chx.device is chainerx.get_device(device)
# test buffer is shared
obj += obj
chainerx.testing.assert_array_equal_ex(obj.ravel(), a_chx)
# test possibly freed memory
obj_copy = obj.copy()
del obj
chainerx.testing.assert_array_equal_ex(obj_copy.ravel(), a_chx)
@pytest.mark.parametrize_device(['cuda:0'])
def test_frombuffer_from_numpy_array_with_cuda(device):
obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
with pytest.raises(chainerx.ChainerxError):
chainerx.frombuffer(obj, obj.dtype)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.ChainerxError))
def test_frombuffer_from_numpy_array_with_noncontiguous(xp):
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'int32', padding=True)
return xp.frombuffer(obj, obj.dtype)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.ChainerxError))
@pytest.mark.parametrize('count', [-1, 0, 1, 3, 4])
@pytest.mark.parametrize('offset', [-1, 0, 1, 4, 3 * 4, 3 * 4 + 4])
def test_frombuffer_from_numpy_array_with_offset_count(xp, count, offset):
obj = array_utils.create_dummy_ndarray(numpy, (3,), 'int32')
return xp.frombuffer(obj, obj.dtype, count=count, offset=offset)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_frombuffer_from_device_buffer(device):
dtype = 'int32'
device_buffer = chainerx.testing._DeviceBuffer(
[1, 2, 3, 4, 5, 6], (2, 3), dtype)
a = chainerx.frombuffer(device_buffer, dtype)
e = chainerx.array([1, 2, 3, 4, 5, 6], dtype)
chainerx.testing.assert_array_equal_ex(e, a)
assert a.device is chainerx.get_device(device)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_frombuffer_with_device(device):
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'int32', padding=False)
a = chainerx.frombuffer(obj, obj.dtype, device=device)
b = chainerx.frombuffer(obj, obj.dtype)
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('count', [-1, 0, 2])
@pytest.mark.parametrize('sep', ['', 'a'])
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_fromfile(xp, count, sep, dtype_spec, device):
# Skip if bool_ dtype and text mode
if numpy.dtype(dtype_spec) == numpy.bool_ and sep == 'a':
pytest.skip(
'numpy.fromfile does not work with bool_ dtype and text mode')
# Write array data to temporary file.
if isinstance(dtype_spec, chainerx.dtype):
numpy_dtype_spec = dtype_spec.name
else:
numpy_dtype_spec = dtype_spec
data = numpy.arange(2, dtype=numpy_dtype_spec)
f = tempfile.TemporaryFile()
data.tofile(f, sep=sep)
# Read file.
f.seek(0)
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = numpy_dtype_spec
return xp.fromfile(f, dtype=dtype_spec, count=count, sep=sep)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_loadtxt(xp, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
txt = '''// Comment to be ignored.
1 2 3 4
5 6 7 8
'''
txt = io.StringIO(txt)
# Converter that is used to add 1 to each element in the 3rd column.
def converter(element_str):
return float(element_str) + 1
return xp.loadtxt(
txt, dtype=dtype_spec, comments='//', delimiter=' ',
converters={3: converter}, skiprows=2, usecols=(1, 3), unpack=False,
ndmin=2, encoding='bytes')
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('count', [-1, 0, 5])
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_fromiter(xp, count, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
iterable = (x * x for x in range(5))
return xp.fromiter(iterable, dtype=dtype_spec, count=count)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('count', [-1, 0, 3])
@pytest.mark.parametrize('sep', [' ', 'a'])
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_fromstring(xp, count, sep, dtype_spec, device):
if isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
elements = ['1', '2', '3']
string = sep.join(elements)
return xp.fromstring(string, dtype=dtype_spec, count=count, sep=sep)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape', [(2, 2), [2, 2]])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_fromfunction(xp, shape, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
def function(i, j, addend):
return i * j + addend
# addend should be passed as a keyword argument to function.
return xp.fromfunction(function, shape, dtype=dtype_spec, addend=2)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_copy(xp, shape, dtype, device, is_module):
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
return xp.copy(a)
else:
return a.copy()
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('shape,k', [
((2,), -1),
((2,), 0),
((2,), 1),
((3, 3), -1),
((3, 3), 0),
((3, 3), 1),
((4, 3), -1),
((4, 3), 0),
((4, 3), 1),
((4, 3), 5),
((4, 3), -5),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_tri(xp, shape, k, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
out = xp.tri(*shape, k=k, dtype=dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('N,M,k', [
(3, None, 1),
(3, 4, None),
(3, None, None),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_tri_with_default(xp, N, M, k, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if M is None and k is None:
return xp.tri(N, dtype=dtype_spec)
elif M is None:
return xp.tri(N, k=k, dtype=dtype_spec)
elif k is None:
return xp.tri(N, M=M, dtype=dtype_spec)
assert False
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_tri_with_device(device):
a = chainerx.tri(1, 2, 1, 'float32', device)
b = chainerx.tri(1, 2, 1, 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(3, 3), (4, 3), (2, 3, 4)],
'k': [0, 1, -1, 5, -5]
}) +
chainer.testing.product({
'shape': [(3,)],
'k': [0, 1, -1, 5, -5],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
})
))
class TestTrilTriu(op_utils.NumpyOpTest):
def setup(self, float_dtype):
self.dtype = float_dtype
# backward with float16 sometimes does not pass tests with default rtol
if self.dtype == 'float16':
self.check_backward_options.update({'rtol': 2e-3, 'atol': 2e-3})
self.check_double_backward_options.update(
{'rtol': 2e-3, 'atol': 2e-3})
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
tril = xp.tril(a, self.k)
triu = xp.triu(a, self.k)
return tril, triu,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('indexing', ['xy', 'ij'])
@chainer.testing.parameterize_pytest('input_lens', [
# Test up to 4 inputs to check `indexing` behaviors
# 'xy': (1, 0, 2, ..., N-1)
# 'ij': (0, 1, 2, ..., N-1)
(7,),
(6, 4),
(2, 3, 5),
(4, 3, 2, 5),
])
class TestMeshgrid(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def setup(self, dtype):
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
if dtype == 'float16':
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
return tuple(
numpy.linspace(
numpy.random.uniform(-5, -1),
numpy.random.uniform(1, 5),
size)
.astype(self.dtype)
for size in self.input_lens)
def forward_xp(self, inputs, xp):
return tuple(xp.meshgrid(*inputs, indexing=self.indexing))
@chainerx.testing.numpy_chainerx_array_equal()
def test_meshgrid_no_array(xp):
return xp.meshgrid()
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('indexing', ['xy', 'ij'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_meshgrid_one_array_multi_dim(xp, indexing, dtype_spec):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
return xp.meshgrid(xp.ones((3, 3), dtype=dtype_spec), indexing=indexing)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('indexing', ['xy', 'ij'])
def test_meshgrid_no_array_indexing(xp, indexing):
return xp.meshgrid(indexing=indexing)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.ChainerxError, ValueError))
def test_meshgrid_invalid_kwarg_value(xp):
a = array_utils.create_dummy_ndarray(xp, (10,), 'float32')
return xp.meshgrid(a, a, indexing='xz')
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.ChainerxError, TypeError))
def test_meshgrid_invalid_kwarg(xp):
a = array_utils.create_dummy_ndarray(xp, (10,), 'float32')
return xp.meshgrid(a, a, indexing='xy', invalid_arg=0)
| mit |
clasnake/recommender | similarity.py | 1 | 6369 | from __future__ import division
from math import sqrt
def sim_distance(prefs, item1, item2):
#get the list of shared items
si = {};
for item in prefs[item1]:
if item in prefs[item2]:
si[item] = 1;
#if they have no shared items,return 0;
if len(si) == 0: return 0;
#Add the squares of all the differences
sum_of_squares = sum(
[pow(prefs[item1][item] - prefs[item2][item], 2) for item in prefs[item1] if item in prefs[item2]])
return 1 / (1 + sqrt(sum_of_squares))
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs, p1, p2):
# Get the list of mutually rated items
si = {}
for item in prefs[p1]:
if item in prefs[p2]: si[item] = 1
# if they are no ratings in common, return 0
if len(si) == 0: return 0
# Sum calculations
n = len(si)
# Sums of all the preferences
sum1 = float(sum([prefs[p1][it] for it in si]))
sum2 = float(sum([prefs[p2][it] for it in si]))
# Sums of the squares
sum1Sq = float(sum([pow(prefs[p1][it], 2) for it in si]))
sum2Sq = float(sum([pow(prefs[p2][it], 2) for it in si]))
# Sum of the products
pSum = float(sum([prefs[p1][it] * prefs[p2][it] for it in si]))
# Calculate r (Pearson score)
num = float(pSum - (sum1 * sum2 / n))
den = float(sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n)))
if den == 0: return 0
r = float(num / den)
return round(r, 7)
def sim_pearson1(prefs, person1, person2):
#get the list of shared items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item] = 1
#if they have no shared items, return 0
if len(si) == 0: return 0
#find the number of elements
n = len(si)
#add up all the prefs
sum1 = sum([prefs[person1][item] for item in si])
sum2 = sum([prefs[person2][item] for item in si])
#calculate the mean of the critics of p1 and p2
mean1 = sum1 / n;
mean2 = sum2 / n;
#calculate the covariance
covariance = sum([(prefs[person1][item] - mean1) * (prefs[person2][item] - mean2) for item in si]) / n
#calculate the standard_deviation
sd1 = sqrt(sum([pow(prefs[person1][item] - mean1, 2) for item in si]) / n)
sd2 = sqrt(sum([pow(prefs[person2][item] - mean2, 2) for item in si]) / n)
if sd1 * sd2 == 0: return 0
#calculate the pearson correlation improved
pearson = (covariance / (sd1 * sd2))
return pearson
def sim_pearson_improved(prefs, person1, person2):
#get the list of shared items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item] = 1
#if they have no shared items, return 0
if len(si) == 0: return 0
#find the number of elements
n = len(si)
#get the count of items rated by person
count1 = 0
count2 = 0
for person in prefs[person1]:
count1 += 1
for item in prefs[person2]:
count2 += 1
totalCount = count1 + count2 - n
#add up all the prefs
sum1 = sum([prefs[person1][item] for item in si])
sum2 = sum([prefs[person2][item] for item in si])
#calculate the mean of the critics of p1 and p2
mean1 = sum1 / n;
mean2 = sum2 / n;
#calculate the covariance
covariance = sum([(prefs[person1][item] - mean1) * (prefs[person2][item] - mean2) for item in si]) / n
#calculate the standard_deviation
sd1 = sqrt(sum([pow(prefs[person1][item] - mean1, 2) for item in si]) / n)
sd2 = sqrt(sum([pow(prefs[person2][item] - mean2, 2) for item in si]) / n)
if sd1 * sd2 == 0: return 0
#calculate the pearson correlation improved
pearson = (covariance / (sd1 * sd2)) * (float(n) / float(totalCount))
#print n,count,float(n)/float(count),pearson
return pearson
def sim_cosine(prefs, item1, item2):
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
if len(si) == 0: return 0
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos
def sim_cosine_improved(prefs, item1, item2):
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
n = len(si)
if n == 0: return 0
count1 = 0
count2 = 0
for item in prefs[item1]:
count1 += 1
for item in prefs[item2]:
count2 += 1
totalCount = count1 + count2 - n
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos * (float(n) / float(totalCount))
def sim_Jaccard(s1, s2, length):
count = 0
for i in range(0, length):
if s1[i] == '1' and s2[i] == '1':
count += 1
if s1[i] == '1\n' and s2[i] == '1\n':
count += 1
return count / (length - count)
def sim_itemType(s1, s2, length):
count = 0
for i in range(0, length):
if s1[i] == '1' and s2[i] == '1':
count += 1
if s1[i] == '1\n' and s2[i] == '1\n':
count += 1
return count / 5
def sim_cosine_improved_tag(prefs, item1, item2, movieTags):
common = 0
for i in movieTags[item1]:
if i in movieTags[item2]:
common += 1
if common >= 5:
return 0.8
else:
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
n = len(si)
if n == 0: return 0
count1 = 0
count2 = 0
for item in prefs[item1]:
count1 += 1
for item in prefs[item2]:
count2 += 1
totalCount = count1 + count2 - n
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos * (float(n) / float(totalCount))
#def sim_pearson_improved_typeAdded(prefs,item1,item2):
# pearson_improved=sim_pearson_improved(prefs,item1,item2)
# item_type=itemSimSet[item1][item2]
# return 0.9*(pearson_improved+1)/2.0+0.1*item_type
| mit |
Gagaro/django | tests/foreign_object/models/empty_join.py | 97 | 3211 | from django.db import models
from django.db.models.fields.related import (
ForeignObjectRel, ReverseManyToOneDescriptor,
)
from django.db.models.lookups import StartsWith
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible
class CustomForeignObjectRel(ForeignObjectRel):
"""
Define some extra Field methods so this Rel acts more like a Field, which
lets us use ReverseManyToOneDescriptor in both directions.
"""
@property
def foreign_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.field.related_fields)
def get_attname(self):
return self.name
class StartsWithRelation(models.ForeignObject):
"""
A ForeignObject that uses StartsWith operator in its joins instead of
the default equality operator. This is logically a many-to-many relation
and creates a ReverseManyToOneDescriptor in both directions.
"""
auto_created = False
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = CustomForeignObjectRel
def __init__(self, *args, **kwargs):
kwargs['on_delete'] = models.DO_NOTHING
super(StartsWithRelation, self).__init__(*args, **kwargs)
@property
def field(self):
"""
Makes ReverseManyToOneDescriptor work in both directions.
"""
return self.remote_field
def get_extra_restriction(self, where_class, alias, related_alias):
to_field = self.remote_field.model._meta.get_field(self.to_fields[0])
from_field = self.model._meta.get_field(self.from_fields[0])
return StartsWith(to_field.get_col(alias), from_field.get_col(related_alias))
def get_joining_columns(self, reverse_join=False):
return tuple()
def get_path_info(self):
to_opts = self.remote_field.model._meta
from_opts = self.model._meta
return [PathInfo(from_opts, to_opts, (to_opts.pk,), self, False, False)]
def get_reverse_path_info(self):
to_opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, to_opts, (to_opts.pk,), self.remote_field, False, False)]
def contribute_to_class(self, cls, name, virtual_only=False):
super(StartsWithRelation, self).contribute_to_class(cls, name, virtual_only)
setattr(cls, self.name, ReverseManyToOneDescriptor(self))
class BrokenContainsRelation(StartsWithRelation):
"""
This model is designed to yield no join conditions and
raise an exception in ``Join.as_sql()``.
"""
def get_extra_restriction(self, where_class, alias, related_alias):
return None
@python_2_unicode_compatible
class SlugPage(models.Model):
slug = models.CharField(max_length=20)
descendants = StartsWithRelation(
'self',
from_fields=['slug'],
to_fields=['slug'],
related_name='ascendants',
)
containers = BrokenContainsRelation(
'self',
from_fields=['slug'],
to_fields=['slug'],
)
class Meta:
ordering = ['slug']
def __str__(self):
return 'SlugPage %s' % self.slug
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.