code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from dal import autocomplete
from dashboard.models import PUC, PUCKind
from django.db.models import Q
class PUCAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if self.q:
split_q = self.q.split(":")
if len(split_q) > 1:
puckind = Q(kind__name__istartswith=split_q[0])
puc = split_q[1]
else:
puckind = Q()
puc = split_q[0]
return PUC.objects.filter(
puckind,
Q(gen_cat__icontains=puc)
| Q(prod_fam__icontains=puc)
| Q(prod_type__icontains=puc),
)
return None
def get_result_label(self, result):
return result.kind.code + ": " + result.__str__()
|
HumanExposure/factotum
|
dashboard/views/puc_autocomplete.py
|
Python
|
gpl-3.0
| 790
|
from unittest import TestCase
from unittest.mock import Mock
from grapher import errors
from grapher.parsers import QueryParser
from grapher.parsers import query
from nose_parameterized import parameterized
class QueryParserTest(TestCase):
def setUp(self):
r = Mock()
r.args = Mock()
r.args.get = Mock()
query.request = r
@parameterized.expand([
({},
{'query': {}, 'skip': 0, 'limit': None}),
({'skip': '2'},
{'query': {}, 'skip': 2, 'limit': None}),
({
'query': '{"test":"test 1"}',
'skip': '0',
'limit': '10'
},
{'query': {'test': 'test 1'}, 'skip': 0, 'limit': 10}),
])
def test_parse(self, request_query, expected):
query.request.args.get.side_effect = lambda e: request_query[e] if e in request_query else None
actual = QueryParser.parse()
self.assertEqual(actual, expected)
def test_invalid_query(self):
query.request.args.get.return_value = 'invalid$query:{{{}'
with self.assertRaises(errors.BadRequestError):
QueryParser.parse()
|
lucasdavid/grapher
|
tests/parsers/query_test.py
|
Python
|
mit
| 1,148
|
#!flask/bin/python
import os
import sys
if sys.platform == 'win32':
pybabel = 'flask\\Scripts\\pybabel'
else:
pybabel = 'flask/bin/pybabel'
if len(sys.argv) != 2:
print "usage: tr_init <language-code>"
sys.exit(1)
os.system(pybabel +
' extract -F babel.cfg -k lazy_gettext -o messages.pot app')
os.system(pybabel +
' init -i messages.pot -d app/translations -l ' + sys.argv[1])
os.unlink('messages.pot')
|
wasmith63/bluebird
|
tr_init.py
|
Python
|
bsd-3-clause
| 440
|
import sys
sys.path.insert(0, "..")
from mcda.types import Criteria
from xml.etree import ElementTree
from data_ticino_new import *
crit = c.to_xmcda()
ElementTree.dump(crit)
crit2 = Criteria()
crit2.from_xmcda(crit)
print(crit2)
xmcda = a.to_xmcda()
ElementTree.dump(xmcda[0])
ElementTree.dump(xmcda[1])
|
oso/qgis-etri
|
tests/test_mcda.py
|
Python
|
gpl-3.0
| 308
|
#!/usr/bin/env python3
import os
import glob
for lang in ("sme", "est", "fin"):
for gender in ("M", "F"):
for tool in ("s", "v"):
r = range(5,10)
if lang == "sme":
r = range(3,14)
for order in r:
for type in ("m", "w"):
with open("{}{}_cow_{}_{}g_{}.sh".format(lang,gender,tool,order,type), 'w') as f:
print("export TEST_NAME='{}{}_cow_{}_{}g_{}'".format(lang,gender,tool,order,type), file=f)
print("export TEST_DIR=$GROUP_DIR/p/sami/recog_tests/complete_wikipedia/", file=f)
print(file=f)
print("export TEST_LM_SCALES=30", file=f)
print(file=f)
possible_ams = list(glob.glob(os.environ["GROUP_DIR"]+"/p/sami/models/{}_{}/hmm/*_22.ph".format(lang, gender)))
am = possible_ams[0][:-3]
print("export TEST_AM={}".format(am), file=f)
print("export TEST_LM=$GROUP_DIR/p/sami/lmmodels/complete_wikipedia/{}{}_cow_{}_{}g_{}".format(lang,gender,tool,order,type), file=f)
print("export TEST_TRN=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.trn".format(lang, gender), file=f)
print("export TEST_WAVLIST=$GROUP_DIR/p/sami/audio_data/{}_{}/devel200.scp".format(lang, gender), file=f)
print("export ONE_BYTE_ENCODING=ISO-8859-10", file=f)
|
phsmit/iwclul2016-scripts
|
04_recognize/configs/complete_wikipedia/gen_configs.py
|
Python
|
bsd-3-clause
| 1,519
|
from .zoo import get_model # noqa: F401
from .weights_fetcher import fetch_weights # noqa: F401
from .git_cloner import GitCloneError # noqa: F401
from .model_loader import ModelLoader # noqa: F401
|
bethgelab/foolbox
|
foolbox/zoo/__init__.py
|
Python
|
mit
| 202
|
#
# SelfTest/Util/test_strxor.py: Self-test for XORing
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import unittest
from binascii import unhexlify, hexlify
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Util.strxor import strxor, strxor_c
class StrxorTests(unittest.TestCase):
def test1(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
result = unhexlify(b"c70ed123c59a7fcb6f12")
self.assertEqual(strxor(term1, term2), result)
self.assertEqual(strxor(term2, term1), result)
def test2(self):
es = b""
self.assertEqual(strxor(es, es), es)
def test3(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
all_zeros = b"\x00" * len(term1)
self.assertEqual(strxor(term1, term1), all_zeros)
def test_wrong_length(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"ff339a83e5cd4cdf564990")
self.assertRaises(ValueError, strxor, term1, term2)
def test_bytearray(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term1_ba = bytearray(term1)
term2 = unhexlify(b"383d4ba020573314395b")
result = unhexlify(b"c70ed123c59a7fcb6f12")
self.assertEqual(strxor(term1_ba, term2), result)
def test_memoryview(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term1_mv = memoryview(term1)
term2 = unhexlify(b"383d4ba020573314395b")
result = unhexlify(b"c70ed123c59a7fcb6f12")
self.assertEqual(strxor(term1_mv, term2), result)
def test_output_bytearray(self):
"""Verify result can be stored in pre-allocated memory"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
original_term1 = term1[:]
original_term2 = term2[:]
expected_xor = unhexlify(b"c70ed123c59a7fcb6f12")
output = bytearray(len(term1))
result = strxor(term1, term2, output=output)
self.assertEqual(result, None)
self.assertEqual(output, expected_xor)
self.assertEqual(term1, original_term1)
self.assertEqual(term2, original_term2)
def test_output_memoryview(self):
"""Verify result can be stored in pre-allocated memory"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
original_term1 = term1[:]
original_term2 = term2[:]
expected_xor = unhexlify(b"c70ed123c59a7fcb6f12")
output = memoryview(bytearray(len(term1)))
result = strxor(term1, term2, output=output)
self.assertEqual(result, None)
self.assertEqual(output, expected_xor)
self.assertEqual(term1, original_term1)
self.assertEqual(term2, original_term2)
def test_output_overlapping_bytearray(self):
"""Verify result can be stored in overlapping memory"""
term1 = bytearray(unhexlify(b"ff339a83e5cd4cdf5649"))
term2 = unhexlify(b"383d4ba020573314395b")
original_term2 = term2[:]
expected_xor = unhexlify(b"c70ed123c59a7fcb6f12")
result = strxor(term1, term2, output=term1)
self.assertEqual(result, None)
self.assertEqual(term1, expected_xor)
self.assertEqual(term2, original_term2)
def test_output_overlapping_memoryview(self):
"""Verify result can be stored in overlapping memory"""
term1 = memoryview(bytearray(unhexlify(b"ff339a83e5cd4cdf5649")))
term2 = unhexlify(b"383d4ba020573314395b")
original_term2 = term2[:]
expected_xor = unhexlify(b"c70ed123c59a7fcb6f12")
result = strxor(term1, term2, output=term1)
self.assertEqual(result, None)
self.assertEqual(term1, expected_xor)
self.assertEqual(term2, original_term2)
def test_output_ro_bytes(self):
"""Verify result cannot be stored in read-only memory"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
self.assertRaises(TypeError, strxor, term1, term2, output=term1)
def test_output_ro_memoryview(self):
"""Verify result cannot be stored in read-only memory"""
term1 = memoryview(unhexlify(b"ff339a83e5cd4cdf5649"))
term2 = unhexlify(b"383d4ba020573314395b")
self.assertRaises(TypeError, strxor, term1, term2, output=term1)
def test_output_incorrect_length(self):
"""Verify result cannot be stored in memory of incorrect length"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term2 = unhexlify(b"383d4ba020573314395b")
output = bytearray(len(term1) - 1)
self.assertRaises(ValueError, strxor, term1, term2, output=output)
import sys
if sys.version[:3] == "2.6":
del test_memoryview
del test_output_memoryview
del test_output_overlapping_memoryview
del test_output_ro_memoryview
class Strxor_cTests(unittest.TestCase):
def test1(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
result = unhexlify(b"be72dbc2a48c0d9e1708")
self.assertEqual(strxor_c(term1, 65), result)
def test2(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
self.assertEqual(strxor_c(term1, 0), term1)
def test3(self):
self.assertEqual(strxor_c(b"", 90), b"")
def test_wrong_range(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
self.assertRaises(ValueError, strxor_c, term1, -1)
self.assertRaises(ValueError, strxor_c, term1, 256)
def test_bytearray(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term1_ba = bytearray(term1)
result = unhexlify(b"be72dbc2a48c0d9e1708")
self.assertEqual(strxor_c(term1_ba, 65), result)
def test_memoryview(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
term1_mv = memoryview(term1)
result = unhexlify(b"be72dbc2a48c0d9e1708")
self.assertEqual(strxor_c(term1_mv, 65), result)
def test_output_bytearray(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
original_term1 = term1[:]
expected_result = unhexlify(b"be72dbc2a48c0d9e1708")
output = bytearray(len(term1))
result = strxor_c(term1, 65, output=output)
self.assertEqual(result, None)
self.assertEqual(output, expected_result)
self.assertEqual(term1, original_term1)
def test_output_memoryview(self):
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
original_term1 = term1[:]
expected_result = unhexlify(b"be72dbc2a48c0d9e1708")
output = memoryview(bytearray(len(term1)))
result = strxor_c(term1, 65, output=output)
self.assertEqual(result, None)
self.assertEqual(output, expected_result)
self.assertEqual(term1, original_term1)
def test_output_overlapping_bytearray(self):
"""Verify result can be stored in overlapping memory"""
term1 = bytearray(unhexlify(b"ff339a83e5cd4cdf5649"))
expected_xor = unhexlify(b"be72dbc2a48c0d9e1708")
result = strxor_c(term1, 65, output=term1)
self.assertEqual(result, None)
self.assertEqual(term1, expected_xor)
def test_output_overlapping_memoryview(self):
"""Verify result can be stored in overlapping memory"""
term1 = memoryview(bytearray(unhexlify(b"ff339a83e5cd4cdf5649")))
expected_xor = unhexlify(b"be72dbc2a48c0d9e1708")
result = strxor_c(term1, 65, output=term1)
self.assertEqual(result, None)
self.assertEqual(term1, expected_xor)
def test_output_ro_bytes(self):
"""Verify result cannot be stored in read-only memory"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
self.assertRaises(TypeError, strxor_c, term1, 65, output=term1)
def test_output_ro_memoryview(self):
"""Verify result cannot be stored in read-only memory"""
term1 = memoryview(unhexlify(b"ff339a83e5cd4cdf5649"))
term2 = unhexlify(b"383d4ba020573314395b")
self.assertRaises(TypeError, strxor_c, term1, 65, output=term1)
def test_output_incorrect_length(self):
"""Verify result cannot be stored in memory of incorrect length"""
term1 = unhexlify(b"ff339a83e5cd4cdf5649")
output = bytearray(len(term1) - 1)
self.assertRaises(ValueError, strxor_c, term1, 65, output=output)
import sys
if sys.version[:3] == "2.6":
del test_memoryview
del test_output_memoryview
del test_output_overlapping_memoryview
del test_output_ro_memoryview
def get_tests(config={}):
tests = []
tests += list_test_cases(StrxorTests)
tests += list_test_cases(Strxor_cTests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
|
cloudera/hue
|
desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/SelfTest/Util/test_strxor.py
|
Python
|
apache-2.0
| 10,618
|
#!/usr/bin/env python
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
"""runs upgma or nj on a distance matrix file, writes the resulting tree
with distances to specified output file
"""
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from optparse import OptionParser
from qiime.parse import parse_distmat
from scipy.cluster.hierarchy import linkage
from skbio.tree import TreeNode
from skbio.stats.distance import DistanceMatrix
from skbio.tree import nj
import os.path
def multiple_file_upgma(input_dir, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
file_names = os.listdir(input_dir)
file_names = [fname for fname in file_names if not fname.startswith('.')]
for fname in file_names:
base_fname, ext = os.path.splitext(fname)
single_file_upgma(os.path.join(input_dir, fname),
os.path.join(output_dir, 'upgma_' + base_fname + '.tre'))
def single_file_upgma(input_file, output_file):
# read in dist matrix
dist_mat = DistanceMatrix.read(input_file)
# SciPy uses average as UPGMA:
# http://docs.scipy.org/doc/scipy/reference/generated/
# scipy.cluster.hierarchy.linkage.html#scipy.cluster.hierarchy.linkage
linkage_matrix = linkage(dist_mat.condensed_form(), method='average')
tree = TreeNode.from_linkage_matrix(linkage_matrix, dist_mat.ids)
# write output
f = open(output_file, 'w')
try:
f.write(tree.to_newick(with_distances=True))
except AttributeError:
if c is None:
raise RuntimeError("""input file %s did not make a UPGMA tree.
Ensure it has more than one sample present""" % (str(input_file),))
raise
f.close()
def single_file_nj(input_file, output_file):
dm = DistanceMatrix.read(input_file)
tree = nj(dm)
# write output
f = open(output_file, 'w')
f.write(tree.to_newick(with_distances=True))
f.close()
def multiple_file_nj(input_dir, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
file_names = os.listdir(input_dir)
file_names = [fname for fname in file_names if not fname.startswith('.')]
for fname in file_names:
base_fname, ext = os.path.splitext(fname)
single_file_nj(os.path.join(input_dir, fname),
os.path.join(output_dir, 'nj_' + base_fname + '.tre'))
|
adamrp/qiime
|
qiime/hierarchical_cluster.py
|
Python
|
gpl-2.0
| 2,603
|
# -*- coding: utf-8 -*-
# Copyright 2008-2015 Richard Dymond (rjdymond@gmail.com)
#
# This file is part of Pyskool.
#
# Pyskool is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Pyskool is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pyskool. If not, see <http://www.gnu.org/licenses/>.
import sys
import os.path
from argparse import ArgumentParser
from .game import Game
from . import version, package_dir, user_dir, images_subdir, sounds_subdir, skoolsound
from .image import get_images, SKOOL_DAZE, BACK_TO_SKOOL
from .sdini import SDIniMaker
from .btsini import BTSIniMaker
SOUNDS = {
'skool_daze.py': skoolsound.SKOOL_DAZE,
'back_to_skool.py': skoolsound.BACK_TO_SKOOL,
'skool_daze_take_too.py': skoolsound.SKOOL_DAZE_TAKE_TOO,
'ezad_looks.py': skoolsound.EZAD_LOOKS,
'back_to_skool_daze.py': skoolsound.BACK_TO_SKOOL_DAZE
}
SEARCH_DIRS_MSG = """
Pyskool will search the following directories for 'pyskool.ini' and the
'{images}', '{sounds}' and '{ini}' subdirectories:
""".lstrip()
prog = os.path.basename(sys.argv[0].lower())
def info(text):
sys.stdout.write('%s\n' % text)
def error(text):
sys.stderr.write('%s\n' % text)
sys.exit(1)
def find(name, search_dirs, directory=False):
for f in [os.path.join(d, name) for d in search_dirs]:
if (directory and os.path.isdir(f)) or (not directory and os.path.isfile(f)):
return f
msg = ["Cannot find %s '%s' in any of these locations:" % ('directory' if directory else 'file', name)]
msg += [" %s" % d for d in search_dirs]
if directory:
msg.append("Have you run '{0} --setup' yet?".format(prog))
error('\n'.join(msg))
def find_dir(dname, search_dirs):
return find(dname, search_dirs, True)
def main(scripts_dir):
names = {}
names['skool_daze.py'] = 'Skool Daze'
names['back_to_skool.py'] = 'Back to Skool'
names['skool_daze_take_too.py'] = 'Skool Daze Take Too'
names['ezad_looks.py'] = 'Ezad Looks'
names['back_to_skool_daze.py'] = 'Back to Skool Daze'
name = names.get(prog, 'Unknown')
default_ini_dir = os.path.join('ini', prog[:-3])
parser = ArgumentParser(
usage='%(prog)s [options]',
description="Start Pyskool in {} mode.".format(name),
add_help=False
)
group = parser.add_argument_group('Options')
group.add_argument("--version", action="version", version=version,
help="show Pyskool's version number and exit")
group.add_argument("-h", "--help", action="help",
help="show this help message and exit")
group.add_argument("-c", "--cheat", dest="cheat", action="store_true",
help="enable cheat keys")
group.add_argument("--config", dest="config", metavar="P,V", action="append",
help="set the value of the configuration parameter P to V; this option may be used multiple times")
group.add_argument("--create-images", dest="create_images", action="store_true",
help="create the images required by the game and exit")
group.add_argument("--create-ini", dest="create_ini", action="store_true",
help="create the ini files required by the game and exit")
group.add_argument("--create-sounds", dest="create_sounds", action="store_true",
help="create the sound files required by the game and exit")
group.add_argument("--force", dest="force", action="store_true",
help="overwrite existing images, ini files and sound files")
group.add_argument("-i", "--inidir", dest="inidir",
help="read ini files from this directory instead of %s" % default_ini_dir)
group.add_argument("-l", "--load", dest="savefile",
help="load a saved game from the specified file")
group.add_argument("--package-dir", dest="package_dir", action="store_true",
help="show path to pyskool package directory and exit")
group.add_argument("-q", "--quick-start", dest="quick_start", action="store_true",
help="start the game quickly")
group.add_argument("-r", "--load-last", dest="savedir",
help="load the most recently saved game in the specified directory")
group.add_argument("--sample-rate", dest="sample_rate", metavar="RATE", type=int, default=44100,
help="set the sample rate of the sound files created by --create-sounds (default: 44100)")
group.add_argument("-s", "--scale", dest="scale", type=int,
help="scale graphics by this factor (1=original Speccy size)")
group.add_argument("--search-dirs", dest="search_dirs", action="store_true",
help="show the locations that Pyskool searches for data files and exit")
group.add_argument("--setup", dest="setup", action="store_true",
help="create the images, ini files and sound files required by the game and exit")
options, unknown_args = parser.parse_known_args()
if unknown_args:
parser.exit(2, parser.format_help())
# Set the search path for 'pyskool.ini', the 'images' directory, the
# 'sounds' directory, and the 'ini' directory
cwd = os.getcwd()
search_dirs = [
cwd,
os.path.join(os.sep, 'usr', 'share', 'pyskool'),
os.path.join(package_dir, 'data')
]
if scripts_dir not in search_dirs:
search_dirs.insert(1, scripts_dir)
# Attempt to create ~/.pyskool if it doesn't exist
pyskool_dir = user_dir
if not os.path.isdir(user_dir):
try:
os.makedirs(user_dir)
info("Created directory '%s'" % user_dir)
except OSError as e:
info("Could not create directory '%s': %s" % (user_dir, e.strerror))
pyskool_dir = cwd
if pyskool_dir not in search_dirs:
search_dirs.insert(1, pyskool_dir)
# Show package directory if requested
if options.package_dir:
info(package_dir)
sys.exit(0)
# Show search directories if requested
if options.search_dirs:
info(SEARCH_DIRS_MSG.format(images=images_subdir, sounds=sounds_subdir, ini=default_ini_dir))
for search_dir in search_dirs:
info('- {0}'.format(search_dir))
sys.exit(0)
# Get images if requested
if options.setup or options.create_images:
images_ini = find('images.ini', search_dirs)
info("Using ini file at %s" % images_ini)
if prog == 'skool_daze.py':
get_images(images_ini, SKOOL_DAZE, 0, user_dir, True, options.force)
elif prog == 'skool_daze_take_too.py':
get_images(images_ini, SKOOL_DAZE, 1, user_dir, True, options.force)
elif prog == 'ezad_looks.py':
get_images(images_ini, SKOOL_DAZE, 2, user_dir, True, options.force)
elif prog == 'back_to_skool.py':
get_images(images_ini, BACK_TO_SKOOL, 0, user_dir, True, options.force)
elif prog == 'back_to_skool_daze.py':
get_images(images_ini, BACK_TO_SKOOL, 1, user_dir, True, options.force)
# Create ini files if requested
if options.setup or options.create_ini:
ini_maker = None
if prog == 'skool_daze.py':
ini_maker = SDIniMaker(0)
elif prog == 'skool_daze_take_too.py':
ini_maker = SDIniMaker(1)
elif prog == 'ezad_looks.py':
ini_maker = SDIniMaker(2)
elif prog == 'back_to_skool.py':
ini_maker = BTSIniMaker(0)
elif prog == 'back_to_skool_daze.py':
ini_maker = BTSIniMaker(1)
if ini_maker:
odir = os.path.join(user_dir, 'ini', prog[:-3])
if not os.path.isdir(odir):
os.makedirs(odir)
ini_maker.write_ini_files(odir, True, options.force)
# Create sound files if requested
if options.setup or options.create_sounds:
if prog in SOUNDS:
odir = os.path.join(user_dir, 'sounds')
skoolsound.create_sounds(SOUNDS[prog], odir, True, options.force, options.sample_rate)
if options.setup or options.create_images or options.create_ini or options.create_sounds:
sys.exit(0)
# Look for 'pyskool.ini'
pyskool_ini = find('pyskool.ini', search_dirs)
info("Using ini file at %s" % pyskool_ini)
# Look for the directory that contains the game ini files
if options.inidir:
ini_dir = os.path.abspath(options.inidir)
if not os.path.isdir(ini_dir):
error('%s: directory does not exist' % ini_dir)
else:
ini_dir = find_dir(default_ini_dir, search_dirs)
info("Using game ini files in %s" % ini_dir)
# Look for the 'images' subdirectory
images_dir = find_dir(images_subdir, search_dirs)
info("Using images in %s" % images_dir)
# Look for the 'sounds' subdirectory
sounds_dir = find_dir(sounds_subdir, search_dirs)
info("Using sounds in %s" % sounds_dir)
# Look for a saved game (if specified on the command line)
sav_file = None
if options.savefile:
if not os.path.isfile(options.savefile):
error('%s: file not found' % options.savefile)
sav_file = os.path.abspath(options.savefile)
elif options.savedir:
if not os.path.isdir(options.savedir):
error('%s: directory does not exist' % options.savedir)
sav_files = [f for f in os.listdir(options.savedir) if f.endswith('.sav') and os.path.isfile(os.path.join(options.savedir, f))]
if sav_files:
sav_files.sort()
sav_file = os.path.abspath(os.path.join(options.savedir, sav_files[-1]))
else:
info("No saved games found in %s" % options.savedir)
# Change to the directory where games will be saved/loaded and screenshots
# will be dumped
os.chdir(user_dir)
info("Using %s to save/load games" % os.getcwd())
game = Game(pyskool_ini, images_dir, sounds_dir, ini_dir, options, version, sav_file)
game.play()
|
skoolkid/pyskool
|
pyskool/run.py
|
Python
|
gpl-3.0
| 10,238
|
# -*- coding: utf-8 -*-
import wikipedia, re
import pagegenerators
import MySQLdb as mysqldb
import config
faSite = wikipedia.getSite('fa')
enSite=wikipedia.getSite('en')
def numbertopersian(a):
a = str(a)
a = a.replace(u'0', u'۰')
a = a.replace(u'1', u'۱')
a = a.replace(u'2', u'۲')
a = a.replace(u'3', u'۳')
a = a.replace(u'4', u'۴')
a = a.replace(u'5', u'۵')
a = a.replace(u'6', u'۶')
a = a.replace(u'7', u'۷')
a = a.replace(u'8', u'۸')
a = a.replace(u'9', u'۹')
a = a.replace(u'.', u'٫')
return a
def data2fa(number, strict=False):
data=wikipedia.DataPage(int(number))
try:
items=data.get()
except:
return ""
if isinstance(items['links'],list):
items['links']={}
if items['links'].has_key('fawiki'):
return items['links']['fawiki']['name']
if strict:
return ""
if items['label'].has_key('fa'):
return items['label']['fa']
try:
return items['label']['en']
except:
return ""
def ClaimFinder(our_Site,page_title,claim_num,more):
fa_result=False
fa_result_more=[]
en_wdata=wikipedia.DataPage(wikipedia.Page(our_Site,page_title))
try:
items=en_wdata.get()
except:
return u' '
if items['claims']:
case=items['claims']
for i in case:
if i['m'][1]==claim_num:
fa_result=data2fa(i[u'm'][3][u'numeric-id'])
fa_result_more.append(fa_result)
if fa_result_more:
fa_result=u' '
lenth=len(fa_result_more)-1
if lenth>0:
for i in fa_result_more:
if i==fa_result_more[lenth]:
join=u' و '
else:
join=u'، '
fa_result+=join+i
fa_result=fa_result[2:]
else:
fa_result=fa_result_more[0]
if fa_result:
wikipedia.output(u'\03{lightgreen}All Claims '+str(claim_num)+u' ='+fa_result+u'\03{default}')
fa_result=fa_result.strip()
return fa_result
savetext = u"{{ویکیپدیا:مقالههای مهم ایجادنشده/فهرست/بالا}}\n"+u"آخرین به روز رسانی: ~~~~~"+u"\n{| class=\"wikitable sortable\"\n! نام !! تعداد{{سخ}}میانویکیها" + u" !! موضوع مقاله"
# sql part
query="Select /* SLOW_OK */ page_title AS title, cid From (Select ll_from As page_id, count(ll_lang) As cid From langlinks Group By ll_from Having count(ll_lang)>=35 And max(ll_lang='fa')=0) As sq Natural Join page Where page_namespace=0 Order By cid DESC,page_title;"
conn = mysqldb.connect('enwiki.labsdb', db = enSite.dbName(),
user = config.db_username,
passwd = config.db_password)
cursor = conn.cursor()
wikipedia.output(u'Executing query:\n%s' % query)
query = query.encode(enSite.encoding())
cursor.execute(query)
while True:
try:
pageTitle, pagelinkCount = cursor.fetchone()
print pageTitle, pagelinkCount
except TypeError:
# Limit reached or no more results
break
if pageTitle:
pageTitle = unicode(pageTitle, enSite.encoding())
pageTitle = re.sub(ur"_",u" ",pageTitle)
pageTitle = re.sub(ur"(^\"|\"$)",u"",pageTitle)
pageTitle = re.sub(ur"(^\s*|\s$)",u"",pageTitle)
if pageTitle != u"":
type=False
try:
savetext = savetext + u"\n|-\n| [[:en:" + pageTitle + u"|]] || " + numbertopersian(pagelinkCount) + u" || " + ClaimFinder(enSite,pageTitle,31,True)
except:
savetext = savetext + u"\n|-\n| [[:en:" + pageTitle + u"|]] || " + numbertopersian(pagelinkCount) + u" || "
# pywikipedia part
savetext = savetext + "\n|}"
wikipedia.output(savetext)
page = wikipedia.Page(faSite,u"ویکیپدیا:گزارش دیتابیس/مقالههای مهم ایجادنشده")
page.put(savetext,u"ربات: به روز رسانی")
sign_page=u'ویکیپدیا:گزارش دیتابیس/مقالههای مهم ایجادنشده/امضا'
madak=u'~~~~~'
sign_page=wikipedia.Page(faSite,sign_page)
sign_page.put(madak,u'ربات:تاریخ بروز رسانی')
|
PersianWikipedia/fawikibot
|
Compat codes/zziwneededpage.py
|
Python
|
mit
| 4,252
|
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import six
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiAttributeError(OpenApiException, AttributeError):
def __init__(self, msg, path_to_item=None):
"""
Raised when an attribute reference or assignment fails.
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiAttributeError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class NotFoundException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(NotFoundException, self).__init__(status, reason, http_resp)
class UnauthorizedException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(UnauthorizedException, self).__init__(status, reason, http_resp)
class ForbiddenException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ForbiddenException, self).__init__(status, reason, http_resp)
class ServiceException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ServiceException, self).__init__(status, reason, http_resp)
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
|
cliffano/swaggy-jenkins
|
clients/python-legacy/generated/openapi_client/exceptions.py
|
Python
|
mit
| 5,136
|
from django.test import TestCase
from community.constants import (CONTENT_CONTRIBUTOR, CONTENT_MANAGER,
USER_CONTENT_MANAGER, COMMUNITY_ADMIN,
DEFAULT_COMMUNITY_ACTIVE_PAGE,
COMMUNITY_PRESENCE_CHOICES, COMMUNITY_TYPES_CHOICES,
COMMUNITY_CHANNEL_CHOICES, YES_NO_CHOICES,
ORDER_NULL_MSG, ORDER_ALREADY_EXISTS_MSG, SLUG_ALREADY_EXISTS_MSG,
ORDER_NULL, SLUG_ALREADY_EXISTS, ORDER_ALREADY_EXISTS, OK,
SUCCESS_MSG)
class ConstantsTestCase(TestCase):
def setUp(self):
self.foo = "Foo"
self.bar = "Bar"
def test_content_contributor_constant(self):
"""Test CONTENT_CONTRIBUTOR constant value"""
content_contributor = CONTENT_CONTRIBUTOR.format(self.foo)
self.assertEqual(content_contributor, "Foo: Content Contributor")
def test_content_manager_constant(self):
"""Test CONTENT_MANAGER constant value"""
content_manager = CONTENT_MANAGER.format(self.foo)
self.assertEqual(content_manager, "Foo: Content Manager")
def test_user_content_manager_constant(self):
"""Test USER_CONTENT_MANAGER constant value"""
user_content_manager = USER_CONTENT_MANAGER.format(self.foo)
self.assertEqual(user_content_manager, "Foo: User and Content Manager")
def test_community_admin_constant(self):
"""Test COMMUNITY_ADMIN constant value"""
community_admin = COMMUNITY_ADMIN.format(self.foo)
self.assertEqual(community_admin, "Foo: Community Admin")
def test_default_active_community_page_constant(self):
default_active_community_page = 'news'
self.assertEqual(default_active_community_page,
DEFAULT_COMMUNITY_ACTIVE_PAGE)
def test_community_presence_choices_constant(self):
"""Test community presence field choices """
community_presence_choices = [
('Facebook Page', 'Facebook Page'),
('Facebook Group', 'Facebook Group'),
('Twitter', 'Twitter'),
('Instagram', 'Instagram'),
('Other', 'Other')
]
self.assertCountEqual(community_presence_choices,
COMMUNITY_PRESENCE_CHOICES)
def test_community_types_choices_constant(self):
"""Test community types field choices """
community_types_choices = [
('Affinity Group', 'Affinity Group (Latinas in Computing, LGBT, etc)'),
('Special Interest Group',
'Special Interest Group (Student Researchers, Systers in Government,'
'Women in Cyber Security, etc) '),
('Email list', 'Email list (using Mailman3)'),
('Other', 'Other')
]
self.assertCountEqual(community_types_choices, COMMUNITY_TYPES_CHOICES)
def test_community_channel_choices_constant(self):
"""Test community channel field choices """
community_channel_choices = [
('Existing Social Media Channels ', 'Existing Social Media Channels '),
('Request New Social Media Channels ',
'Request New Social Media Channels ')
]
self.assertCountEqual(community_channel_choices,
COMMUNITY_CHANNEL_CHOICES)
def test_yes_no_choices_constant(self):
"""Test yes_no choices """
yes_no_choices = [
('Yes', 'Yes'),
('No', 'No')
]
self.assertCountEqual(yes_no_choices, YES_NO_CHOICES)
def test_order_null_msg_constant(self):
"""Test message"""
order_null_msg = ORDER_NULL_MSG
self.assertEqual(
order_null_msg, "Order is not set, Please choose an order.")
def test_success_msg_constant(self):
"""Test message"""
success_msg = SUCCESS_MSG
self.assertEqual(success_msg, "Community created successfully!")
def test_order_already_exists_msg_constant(self):
"""Test message"""
order_already_exists_msg = ORDER_ALREADY_EXISTS_MSG.format(
self.foo, self.bar)
self.assertEqual(order_already_exists_msg,
"Order Foo already exists, please choose an order other than Bar.")
def test_slug_already_exists_msg_constant(self):
"""Test message"""
slug_already_exists_msg = SLUG_ALREADY_EXISTS_MSG.format(
self.foo, self.bar)
self.assertEqual(slug_already_exists_msg,
"Slug Foo already exists, please choose a slug other than Bar.")
def test_order_null_constant(self):
order_null_constant = ORDER_NULL
self.assertEqual(order_null_constant, "order_null")
def test_slug_already_exists_constant(self):
slug_already_exists_constant = SLUG_ALREADY_EXISTS
self.assertEqual(slug_already_exists_constant, "slug_already_exists")
def test_order_already_exists_constant(self):
order_already_exists_constant = ORDER_ALREADY_EXISTS
self.assertEqual(order_already_exists_constant, "order_already_exists")
def test_ok_constant(self):
ok_constant = OK
self.assertEqual(ok_constant, "success")
|
payal97/portal
|
systers_portal/community/tests/test_constants.py
|
Python
|
gpl-2.0
| 5,312
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""dagrun start end
Revision ID: 4446e08588
Revises: 561833c1c74b
Create Date: 2015-12-10 11:26:18.439223
"""
# revision identifiers, used by Alembic.
revision = '4446e08588'
down_revision = '561833c1c74b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('dag_run', sa.Column('end_date', sa.DateTime(), nullable=True))
op.add_column('dag_run', sa.Column('start_date', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('dag_run', 'start_date')
op.drop_column('dag_run', 'end_date')
|
danielvdende/incubator-airflow
|
airflow/migrations/versions/4446e08588_dagrun_start_end.py
|
Python
|
apache-2.0
| 1,396
|
# Copyright (C) 2007-2012 University of Dundee & Open Microscopy Environment.
# All Rights Reserved.
import turbogears, cherrypy, re
from turbogears import controllers, expose, validators
from docutils.core import publish_parts
from time import gmtime, strftime
import os
from OmeValidator import *
# from model import *
# import logging
# log = logging.getLogger("validator.controllers")
#default upload dir to ./uploads
UPLOAD_DIR = cherrypy.config.get("validator.uploads", os.path.join(os.getcwd(),"uploads"))
if not os.path.exists(UPLOAD_DIR):
os.makedirs(UPLOAD_DIR)
class Root(controllers.Root):
@expose(template="validator.templates.validator")
def index(self):
return dict()
@expose("validator.templates.upload")
def error(self, message):
turbogears.flash("ERROR: %s " % message)
raise turbogears.redirect("/")
@expose()
def upload(self, upload_file, **keywords):
# check type of file (only XML and TIFF)
sufix=upload_file.filename.split('.')
size=len(sufix)
if sufix[size-1] != 'tif' and sufix[size-1] != 'tiff' and sufix[size-1] != 'xml' and sufix[size-1] != 'ome':
if upload_file.filename == '':
msg = 'FileError: no file selected.'
else:
msg="FileError: File %s could not be uploaded because is not OME-TIFF file." % upload_file.filename
raise turbogears.redirect("error",message=msg)
time = strftime("%Y-%m-%d-%H-%M-%S_", gmtime())
try:
dirpath = os.getcwd()+'/'+UPLOAD_DIR+'/'+cherrypy.session.id + '/'
if not os.path.exists(dirpath):
os.makedirs(dirpath)
# target file
target_file_name = os.path.join(os.getcwd(), UPLOAD_DIR, cherrypy.session.id,time+upload_file.filename)
# open file in binary mode for writing
f = open(target_file_name, 'wb')
except IOError:
msg="IOError: File %s could not be uploaded" % upload_file.filename
raise turbogears.redirect("error",message=msg)
except:
msg="UnexpectedError: File could not be uploaded"
raise turbogears.redirect("error",message=msg)
else:
try:
# create buffer for reading
bytes = upload_file.file.read(1024)
while bytes:
f.write(bytes)
bytes = upload_file.file.read(1024)
except IOError:
msg="IOError: File %s could not be written" % upload_file.filename
raise turbogears.redirect("error",message=msg)
except AttributeError:
msg="AttributeError: File %s could not be readed" % upload_file.filename
raise turbogears.redirect("error",message=msg)
except:
msg="UnexpectedError: File %s could not be read" % upload_file.filename
raise turbogears.redirect("error",message=msg)
if f: #finally:
f.close()
# Session variable initialization (or recall if exists)
cherrypy.session['filenames'] = cherrypy.session.get('filenames', [])
cherrypy.session['dirpath'] = cherrypy.session.get('dirpath', 0)
# Variable assignment
cherrypy.session.get('filenames', []).append(time+upload_file.filename)
cherrypy.session['dirpath'] = dirpath
# Return the value to your template
turbogears.flash("File uploaded successfully: %s." % (time+upload_file.filename))
raise turbogears.redirect("/fileslist")
@expose(html="validator.templates.fileslist")
def fileslist(self):
filenames = cherrypy.session.get('filenames', [])
if len(filenames) > 0:
return dict(uploadlist=filenames)
else:
raise turbogears.redirect("/")
@expose(html="validator.templates.result")
def result(self, filename):
# list from session
filenames = cherrypy.session.get('filenames', [])
if len(filenames) > 0:
filepath = cherrypy.session.get('dirpath', 0)+filename
if not os.path.isfile(filepath):
msg="UnexpectedError: File could valid"
raise turbogears.redirect("error",message=msg)
else:
sufix=filename.split('.')
size=len(sufix)
if sufix[size-1] =='tif' or sufix[size-1] =='tiff':
result = XmlReport.validateTiff(filepath)
else:
result = XmlReport.validateFile(filepath)
#more
if result.theNamespace == 'http://www.openmicroscopy.org/XMLschemas/OME/FC/ome.xsd' and result.isOmeTiff:
schema = 'http://www.openmicroscopy.org/XMLschemas/OME/FC/ome.xsd (OME-TIFF variant)'
elif result.theNamespace == 'http://www.openmicroscopy.org/XMLschemas/OME/FC/ome.xsd' and not result.isOmeTiff:
schema = 'http://www.openmicroscopy.org/XMLschemas/OME/FC/ome.xsd (Standard)'
elif result.theNamespace == "http://www.openmicroscopy.org/Schemas/OME/2007-06":
schema = "http://www.openmicroscopy.org/Schemas/OME/2007-06 (Standard V2)"
elif result.theNamespace == "http://www.openmicroscopy.org/Schemas/OME/2008-02":
schema = "http://www.openmicroscopy.org/Schemas/OME/2008-02 (Standard V2)"
elif result.theNamespace == "http://www.openmicroscopy.org/Schemas/OME/2008-09":
schema = "http://www.openmicroscopy.org/Schemas/OME/2008-09 (Standard V1)"
elif result.theNamespace == "http://www.openmicroscopy.org/Schemas/OME/2009-09":
schema = "http://www.openmicroscopy.org/Schemas/OME/2009-09 (Standard V1)"
elif result.theNamespace == "http://www.openmicroscopy.org/Schemas/OME/2010-04":
schema = "http://www.openmicroscopy.org/Schemas/OME/2010-04 (Standard V1)"
elif result.theNamespace == "http://www.openmicroscopy.org/Schemas/OME/2010-06":
schema = "http://www.openmicroscopy.org/Schemas/OME/2010-06 (Standard V1)"
elif result.theNamespace == "http://www.openmicroscopy.org/Schemas/OME/2011-06":
schema = "http://www.openmicroscopy.org/Schemas/OME/2011-06 (Standard V1) - Pre-Release"
else:
schema = "No schema found - using http://www.openmicroscopy.org/Schemas/OME/2010-06 (Standard V1)"
return dict(filepath=filename, result=result, schema=schema)
else:
raise turbogears.redirect("/")
@expose()
def default(self, name):
return self.index()
|
rleigh-dundee/openmicroscopy
|
components/validator/WebApp/validator/controllers.py
|
Python
|
gpl-2.0
| 5,828
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
#Filename: dict_zip.py
names = ['raymond', 'rachel', 'matthew']
colors = ['red', 'green', 'blue']
# d = dict(izip(names, colors))
d = dict(zip(names, colors))
print (d)
# >>> {'raymond': 'red', 'rachel': 'green', 'matthew': 'blue'}
|
klose911/klose911.github.io
|
src/python/src/pythonic/dict/dict_zip.py
|
Python
|
apache-2.0
| 292
|
"""
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
from ctypes import byref
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.layer import Layer
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_bytes, force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
destructor = capi.destroy_ds
def __init__(self, ds_input, ds_driver=False, write=False, encoding='utf-8'):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# See also http://trac.osgeo.org/gdal/wiki/rfc23_ogr_unicode
self.encoding = encoding
Driver.ensure_registered()
if isinstance(ds_input, str):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(force_bytes(ds_input), self._write, byref(ds_driver))
except GDALException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise GDALException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise GDALException('Invalid data source input type: %s' % type(ds_input))
if ds:
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise GDALException('Invalid data source file "%s"' % ds_input)
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, str):
try:
layer = capi.get_layer_by_name(self.ptr, force_bytes(index))
except GDALException:
raise IndexError('Invalid OGR layer name given: %s.' % index)
elif isinstance(index, int):
if 0 <= index < self.layer_count:
layer = capi.get_layer(self._ptr, index)
else:
raise IndexError('Index out of range when accessing layers in a datasource: %s.' % index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(layer, self)
def __len__(self):
"Return the number of layers within the data source."
return self.layer_count
def __str__(self):
"Return OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, self.driver)
@property
def layer_count(self):
"Return the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Return the name of the data source."
name = capi.get_ds_name(self._ptr)
return force_text(name, self.encoding, strings_only=True)
|
edmorley/django
|
django/contrib/gis/gdal/datasource.py
|
Python
|
bsd-3-clause
| 4,490
|
#!/usr/bin/python3
# -*- coding: utf-8
import logging
import warnings
from shapely import geometry as geopy
import feedinlib.models as models
import feedinlib.powerplants as plants
from oemof.db import coastdat
import oemof.db as db
try:
from matplotlib import pyplot as plt
except ImportError:
plt = None
# Feel free to remove or change these lines
warnings.simplefilter(action="ignore", category=RuntimeWarning)
logging.getLogger().setLevel(logging.INFO)
# Specification of the wind model
required_parameter_wind = {
'h_hub': 'height of the hub in meters',
'd_rotor': 'diameter of the rotor in meters',
'wind_conv_type': 'wind converter according to the list in the csv file.',
'data_height': 'dictionary containing the heights of the data model',
}
# Specification of the pv model
required_parameter_pv = {
'azimuth': 'Azimuth angle of the pv module',
'tilt': 'Tilt angle of the pv module',
'module_name': 'According to the sandia module library.',
'albedo': 'Albedo value',
}
# Specification of the weather data set CoastDat2
coastDat2 = {
'dhi': 0,
'dirhi': 0,
'pressure': 0,
'temp_air': 2,
'v_wind': 10,
'Z0': 0,
}
# Specification of the pv module
advent210 = {
'module_name': 'Advent_Solar_Ventura_210___2008_',
'azimuth': 180,
'tilt': 30,
'albedo': 0.2,
}
# Specification of the pv module
yingli210 = {
'module_name': 'Yingli_YL210__2008__E__',
'azimuth': 180,
'tilt': 30,
'albedo': 0.2,
}
loc_berlin = {'tz': 'Europe/Berlin', 'latitude': 52.5, 'longitude': 13.5}
# Specifications of the wind turbines
enerconE126 = {
'h_hub': 135,
'd_rotor': 127,
'wind_conv_type': 'ENERCON E 126 7500',
'data_height': coastDat2,
}
vestasV90 = {
'h_hub': 105,
'd_rotor': 90,
'wind_conv_type': 'VESTAS V 90 3000',
'data_height': coastDat2,
}
year = 2010
conn = db.connection()
my_weather_single = coastdat.get_weather(
conn, geopy.Point(loc_berlin['longitude'], loc_berlin['latitude']), year
)
geo = geopy.Polygon([(12.2, 52.2), (12.2, 51.6), (13.2, 51.6), (13.2, 52.2)])
multi_weather = coastdat.get_weather(conn, geo, year)
my_weather = multi_weather[0]
# my_weather = my_weather_single
# Initialise different power plants
E126_power_plant = plants.WindPowerPlant(**enerconE126)
V90_power_plant = plants.WindPowerPlant(**vestasV90)
# Create a feedin series for a specific powerplant under specific weather
# conditions. One can define the number of turbines or the over all capacity.
# If no multiplier is set, the time series will be for one turbine.
E126_feedin = E126_power_plant.feedin(weather=my_weather, number=2)
V90_feedin = V90_power_plant.feedin(
weather=my_weather, installed_capacity=(15 * 10 ** 6)
)
E126_feedin.name = 'E126'
V90_feedin.name = 'V90'
if plt:
E126_feedin.plot(legend=True)
V90_feedin.plot(legend=True)
plt.show()
else:
print(V90_feedin)
# Apply the model
yingli_module = plants.Photovoltaic(**yingli210)
advent_module = plants.Photovoltaic(**advent210)
# Apply the pv plant
pv_feedin1 = yingli_module.feedin(weather=my_weather, number=30000)
pv_feedin2 = yingli_module.feedin(weather=my_weather, area=15000)
pv_feedin3 = yingli_module.feedin(weather=my_weather, peak_power=15000)
pv_feedin4 = yingli_module.feedin(weather=my_weather)
pv_feedin5 = advent_module.feedin(weather=my_weather)
pv_feedin4.name = 'Yingli'
pv_feedin5.name = 'Advent'
# Output
if plt:
pv_feedin4.plot(legend=True)
pv_feedin5.plot(legend=True)
plt.show()
else:
print(pv_feedin5)
# Use directly methods of the model
w_model = models.SimpleWindTurbine()
w_model.get_wind_pp_types()
cp_values = models.SimpleWindTurbine().fetch_cp_values(
wind_conv_type='ENERCON E 126 7500'
)
if plt:
plt.plot(
cp_values.loc[0, :][2:55].index, cp_values.loc[0, :][2:55].values, '*'
)
plt.show()
else:
print(cp_values.loc[0, :][2:55].values)
logging.info('Done!')
|
oemof/oemof_pg
|
src/oemof/db/examples/feedinlib_example_coastdat.py
|
Python
|
gpl-3.0
| 3,962
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record API."""
from __future__ import absolute_import, print_function
import uuid
import os
from invenio_records.api import Record
from invenio_jsonschemas import current_jsonschemas
from .minters import kwid_minter
class Keyword(Record):
"""Define API for a keywords."""
_schema = 'keywords/keyword-v1.0.0.json'
@classmethod
def create(cls, data, id_=None, **kwargs):
"""Create a keyword."""
data['$schema'] = current_jsonschemas.path_to_url(cls._schema)
key_id = data.get('key_id', None)
name = data.get('name', None)
data.setdefault('deleted', False)
if not id_:
id_ = uuid.uuid4()
kwid_minter(id_, data)
data['suggest_name'] = {
'input': name,
'payload': {
'key_id': key_id,
'name': name
},
}
return super(Keyword, cls).create(data=data, id_=id_, **kwargs)
@property
def ref(self):
"""Get the url."""
return Keyword.get_ref(self['key_id'])
@classmethod
def get_id(cls, ref):
"""Get the ID from the reference."""
return os.path.basename(ref)
@classmethod
def get_ref(cls, id_):
"""Get reference from an ID."""
return 'https://cds.cern.ch/api/keywords/{0}'.format(str(id_))
class Category(Record):
"""Define API for a category."""
_schema = 'categories/category-v1.0.0.json'
@classmethod
def create(cls, data, id_=None, **kwargs):
"""Create a category."""
data['$schema'] = current_jsonschemas.path_to_url(cls._schema)
data['suggest_name'] = {
'input': data.get('name', None),
'payload': {
'types': data.get('types', [])
}
}
return super(Category, cls).create(data=data, id_=id_, **kwargs)
|
omelkonian/cds
|
cds/modules/records/api.py
|
Python
|
gpl-2.0
| 2,845
|
"""Tests for `maas.client.flesh.machines`."""
from functools import partial
from operator import itemgetter
import yaml
from .testing import TestCaseWithProfile
from .. import ArgumentParser, machines, tabular
from ...enum import NodeStatus, PowerState
from ...testing import make_name_without_spaces
from ...viscera.testing import bind
from ...viscera.machines import Machine, Machines
from ...viscera.resource_pools import ResourcePool
from ...viscera.tags import Tag, Tags
from ...viscera.users import User
from ...viscera.zones import Zone
def make_origin():
"""Make origin for machines."""
return bind(Machines, Machine, User, ResourcePool, Zone, Tag, Tags)
class TestMachines(TestCaseWithProfile):
"""Tests for `cmd_machines`."""
def test_returns_table_with_machines(self):
origin = make_origin()
parser = ArgumentParser()
machine_objs = [
{
"hostname": make_name_without_spaces(),
"architecture": "amd64/generic",
"status": NodeStatus.READY.value,
"status_name": NodeStatus.READY.name,
"owner": None,
"power_state": PowerState.OFF.value,
"cpu_count": 2,
"memory": 1024,
"pool": {"id": 1, "name": "pool1", "description": "pool1"},
"zone": {"id": 1, "name": "zone1", "description": "zone1"},
},
{
"hostname": make_name_without_spaces(),
"architecture": "i386/generic",
"status": NodeStatus.DEPLOYED.value,
"status_name": NodeStatus.DEPLOYED.name,
"owner": make_name_without_spaces(),
"power_state": PowerState.ON.value,
"cpu_count": 4,
"memory": 4096,
"pool": {"id": 2, "name": "pool2", "description": "pool2"},
"zone": {"id": 2, "name": "zone2", "description": "zone2"},
},
]
origin.Machines._handler.read.return_value = machine_objs
cmd = machines.cmd_machines(parser)
subparser = machines.cmd_machines.register(parser)
options = subparser.parse_args([])
output = yaml.safe_load(
cmd.execute(origin, options, target=tabular.RenderTarget.yaml)
)
self.assertEquals(
[
{"name": "hostname", "title": "Hostname"},
{"name": "power", "title": "Power"},
{"name": "status", "title": "Status"},
{"name": "owner", "title": "Owner"},
{"name": "architecture", "title": "Arch"},
{"name": "cpus", "title": "#CPUs"},
{"name": "memory", "title": "RAM"},
{"name": "pool", "title": "Resource pool"},
{"name": "zone", "title": "Zone"},
],
output["columns"],
)
machines_output = sorted(
[
{
"hostname": machine["hostname"],
"power": machine["power_state"],
"status": machine["status_name"],
"owner": machine["owner"] if machine["owner"] else "(none)",
"architecture": machine["architecture"],
"cpus": machine["cpu_count"],
"memory": machine["memory"],
"pool": machine["pool"]["name"],
"zone": machine["zone"]["name"],
}
for machine in machine_objs
],
key=itemgetter("hostname"),
)
self.assertEquals(machines_output, output["data"])
def test_calls_handler_with_hostnames(self):
origin = make_origin()
parser = ArgumentParser()
origin.Machines._handler.read.return_value = []
subparser = machines.cmd_machines.register(parser)
cmd = machines.cmd_machines(parser)
hostnames = [make_name_without_spaces() for _ in range(3)]
options = subparser.parse_args(hostnames)
cmd.execute(origin, options, target=tabular.RenderTarget.yaml)
origin.Machines._handler.read.assert_called_once_with(hostname=hostnames)
class TestMachine(TestCaseWithProfile):
"""Tests for `cmd_machine`."""
def setUp(self):
super().setUp()
origin = make_origin()
parser = ArgumentParser()
self.hostname = make_name_without_spaces()
machine_objs = [
{
"hostname": self.hostname,
"architecture": "amd64/generic",
"status": NodeStatus.READY.value,
"status_name": NodeStatus.READY.name,
"owner": None,
"power_state": PowerState.OFF.value,
"cpu_count": 2,
"memory": 1024,
"pool": {"id": 1, "name": "pool1", "description": "pool1"},
"zone": {"id": 1, "name": "zone1", "description": "zone1"},
"tag_names": ["tag1", "tag2"],
"distro_series": "",
"power_type": "Manual",
},
]
origin.Machines._handler.read.return_value = machine_objs
cmd = machines.cmd_machine(parser)
subparser = machines.cmd_machine.register(parser)
options = subparser.parse_args([machine_objs[0]["hostname"]])
self.cmd = partial(cmd.execute, origin, options)
def test_yaml_machine_details_with_tags(self):
yaml_output = yaml.safe_load(self.cmd(target=tabular.RenderTarget.yaml))
self.assertEqual(yaml_output.get("tags"), ["tag1", "tag2"])
def test_plain_machine_details_with_tags(self):
plain_output = self.cmd(target=tabular.RenderTarget.plain)
self.assertEqual(
plain_output,
f"""\
+---------------+-------------+
| Hostname | {self.hostname} |
| Status | READY |
| Image | (none) |
| Power | Off |
| Power Type | Manual |
| Arch | amd64 |
| #CPUs | 2 |
| RAM | 1.0 GB |
| Interfaces | 0 physical |
| IP addresses | |
| Resource pool | pool1 |
| Zone | zone1 |
| Owner | (none) |
| Tags | tag1 |
| | tag2 |
+---------------+-------------+""",
)
|
alburnum/alburnum-maas-client
|
maas/client/flesh/tests/test_machines.py
|
Python
|
agpl-3.0
| 6,399
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from PyQt5.Qt import QToolButton, QAction, pyqtSignal, QIcon
from calibre.gui2.actions import InterfaceAction
from calibre.utils.icu import sort_key
class SortAction(QAction):
sort_requested = pyqtSignal(object, object)
def __init__(self, text, key, ascending, parent):
QAction.__init__(self, text, parent)
self.key, self.ascending = key, ascending
self.triggered.connect(self)
def __call__(self):
self.sort_requested.emit(self.key, self.ascending)
class SortByAction(InterfaceAction):
name = 'Sort By'
action_spec = (_('Sort By'), 'arrow-up.png', _('Sort the list of books'), None)
action_type = 'current'
popup_type = QToolButton.InstantPopup
action_add_menu = True
dont_add_to = frozenset([
'toolbar', 'toolbar-device', 'context-menu-device', 'toolbar-child',
'menubar', 'menubar-device', 'context-menu-cover-browser'])
def genesis(self):
self.sorted_icon = QIcon(I('ok.png'))
def location_selected(self, loc):
self.qaction.setEnabled(loc == 'library')
def update_menu(self):
menu = self.qaction.menu()
for action in menu.actions():
action.sort_requested.disconnect()
menu.clear()
lv = self.gui.library_view
m = lv.model()
db = m.db
try:
sort_col, order = m.sorted_on
except TypeError:
sort_col, order = 'date', True
fm = db.field_metadata
def get_name(k):
ans = fm[k]['name']
if k == 'cover':
ans = _('Has cover')
return ans
name_map = {get_name(k):k for k in fm.sortable_field_keys() if fm[k]['name']}
self._sactions = []
for name in sorted(name_map, key=sort_key):
key = name_map[name]
if key in {'sort', 'series_sort', 'formats', 'path'}:
continue
if key == 'ondevice' and self.gui.device_connected is None:
continue
ascending = None
if key == sort_col:
name = _('%s [reverse current sort]') % name
ascending = not order
sac = SortAction(name, key, ascending, menu)
if key == sort_col:
sac.setIcon(self.sorted_icon)
sac.sort_requested.connect(self.sort_requested)
menu.addAction(sac)
def sort_requested(self, key, ascending):
if ascending is None:
self.gui.library_view.intelligent_sort(key, True)
else:
self.gui.library_view.sort_by_named_field(key, ascending)
|
sharad/calibre
|
src/calibre/gui2/actions/sort.py
|
Python
|
gpl-3.0
| 2,866
|
'''EffectWidget
============
(highly experimental)
Experiment to make an EffectWidget, exposing part of the shader
pipeline so users can write and easily apply their own glsl effects.
Basic idea: Take implementation inspiration from shadertree example,
draw children to Fbo and apply custom shader to a RenderContext.
Effects
-------
An effect is a string representing part of a glsl fragment shader. It
must implement a function :code:`effect` as below::
vec4 effect( vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
...
}
The parameters are:
- **color**: The normal colour of the current pixel (i.e. texture sampled at tex_coords)
- **texture**: The texture containing the widget's normal background
- **tex_coords**: The normal texture_coords used to access texture
- **coords**: The pixel indices of the current pixel.
The shader code also has access to two useful uniform variables,
:code:`time` containing the time (in seconds) since the program start,
and :code:`resolution` containing the shape (x pixels, y pixels) of
the widget.
'''
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import StringProperty, ObjectProperty, ListProperty
from kivy.graphics import (RenderContext, Fbo, Color, Rectangle,
Translate, PushMatrix, PopMatrix,
ClearColor, ClearBuffers)
from kivy.base import EventLoop
from kivy.graphics.opengl import *
from kivy.graphics import *
Builder.load_string('''
<EffectWidget>:
canvas:
Color:
rgba: 1, 1, 1, 0
Rectangle:
texture: self.texture
pos: self.pos
size: self.size
''')
shader_header = '''
#ifdef GL_ES
precision highp float;
#endif
/* Outputs from the vertex shader */
varying vec4 frag_color;
varying vec2 tex_coord0;
/* uniform texture samplers */
uniform sampler2D texture0;
uniform sampler2D texture1;
uniform sampler2D texture3;
uniform sampler2D texture4;
uniform sampler2D texture5;
'''
shader_uniforms = '''
uniform vec2 resolution;
uniform float time;
'''
shader_footer_trivial = '''
void main (void){
gl_FragColor = frag_color * texture2D(texture0, tex_coord0);
}
'''
shader_footer_effect = '''
void main (void){
vec4 normal_color = frag_color * texture2D(texture0, tex_coord0);
vec4 effect_color = effect(normal_color, texture0, tex_coord0,
gl_FragCoord.xy);
gl_FragColor = effect_color;
}
'''
effect_trivial = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
return color;
}
'''
effect_monochrome = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
float mag = 1.0/3.0 * (color.x + color.y + color.z);
return vec4(mag, mag, mag, color.w);
}
'''
effect_red = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
return vec4(color.x, 0.0, 0.0, 1.0);
}
'''
effect_green = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
return vec4(0.0, color.y, 0.0, 1.0);
}
'''
effect_blue = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
return vec4(0.0, 0.0, color.z, 1.0);
}
'''
effect_invert = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
return vec4(1.0 - color.xyz, 1.0);
}
'''
effect_mix = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
return vec4(color.z, color.x, color.y, 1.0);
}
'''
effect_flash = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
return color * abs(sin(time));
}
'''
effect_blur_h = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
float dt = 0.5 * 1.0 / resolution.x;
vec4 sum = vec4(0.0);
sum += texture2D(texture, vec2(tex_coords.x - 4.0*dt, tex_coords.y)) * 0.05;
sum += texture2D(texture, vec2(tex_coords.x - 3.0*dt, tex_coords.y)) * 0.09;
sum += texture2D(texture, vec2(tex_coords.x - 2.0*dt, tex_coords.y)) * 0.12;
sum += texture2D(texture, vec2(tex_coords.x - dt, tex_coords.y)) * 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y)) * 0.16;
sum += texture2D(texture, vec2(tex_coords.x + dt, tex_coords.y)) * 0.15;
sum += texture2D(texture, vec2(tex_coords.x + 2.0*dt, tex_coords.y)) * 0.12;
sum += texture2D(texture, vec2(tex_coords.x + 3.0*dt, tex_coords.y)) * 0.09;
sum += texture2D(texture, vec2(tex_coords.x + 4.0*dt, tex_coords.y)) * 0.05;
return sum;
}
'''
effect_blur_v = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
float dt = 0.5 * 1.0 / resolution.y;
vec4 sum = vec4(0.0);
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 4.0*dt)) * 0.05;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 3.0*dt)) * 0.09;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 2.0*dt)) * 0.12;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - dt)) * 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y)) * 0.16;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + dt)) * 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 2.0*dt)) * 0.12;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 3.0*dt)) * 0.09;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 4.0*dt)) * 0.05;
return sum;
}
'''
effect_postprocessing = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
vec2 q = tex_coords * vec2(1, -1);
vec2 uv = 0.5 + (q-0.5);//*(0.9);// + 0.1*sin(0.2*time));
vec3 oricol = texture2D(texture,vec2(q.x,1.0-q.y)).xyz;
vec3 col;
col.r = texture2D(texture,vec2(uv.x+0.003,-uv.y)).x;
col.g = texture2D(texture,vec2(uv.x+0.000,-uv.y)).y;
col.b = texture2D(texture,vec2(uv.x-0.003,-uv.y)).z;
col = clamp(col*0.5+0.5*col*col*1.2,0.0,1.0);
//col *= 0.5 + 0.5*16.0*uv.x*uv.y*(1.0-uv.x)*(1.0-uv.y);
col *= vec3(0.8,1.0,0.7);
col *= 0.9+0.1*sin(10.0*time+uv.y*1000.0);
col *= 0.97+0.03*sin(110.0*time);
float comp = smoothstep( 0.2, 0.7, sin(time) );
//col = mix( col, oricol, clamp(-2.0+2.0*q.x+3.0*comp,0.0,1.0) );
return vec4(col,1.0);
}
'''
effect_plasma = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
float x = coords.x;
float y = coords.y;
float mov0 = x+y+cos(sin(time)*2.)*100.+sin(x/100.)*1000.;
float mov1 = y / resolution.y / 0.2 + time;
float mov2 = x / resolution.x / 0.2;
float c1 = abs(sin(mov1+time)/2.+mov2/2.-mov1-mov2+time);
float c2 = abs(sin(c1+sin(mov0/1000.+time)+sin(y/40.+time)+
sin((x+y)/100.)*3.));
float c3 = abs(sin(c2+cos(mov1+mov2+c2)+cos(mov2)+sin(x/1000.)));
return vec4( 0.5*(c1 + color.z), 0.5*(c2 + color.x),
0.5*(c3 + color.y), 1.0);
}
'''
effect_pixelate = '''
vec4 effect(vec4 vcolor, sampler2D texture, vec2 texcoord, vec2 pixel_coords)
{
if (texture2D(texture4, texcoord).x <= 0.50)
return vcolor;
vec2 pixelSize = 10.0 / resolution;
vec2 xy = floor(texcoord/pixelSize)*pixelSize + pixelSize/2.0;
return texture2D(texture, xy);
}
'''
effect_waterpaint = '''
/*
Themaister's Waterpaint shader
Placed in the public domain.
(From this thread: http://board.byuu.org/viewtopic.php?p=30483#p30483
PD declaration here: http://board.byuu.org/viewtopic.php?p=30542#p30542 )
modified by slime73 for use with love2d and mari0
*/
vec4 compress(vec4 in_color, float threshold, float ratio)
{
vec4 diff = in_color - vec4(threshold);
diff = clamp(diff, 0.0, 100.0);
return in_color - (diff * (1.0 - 1.0/ratio));
}
vec4 effect( vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
if (texture2D(texture4, tex_coords).x <= 0.50)
return color;
vec2 textureSize = resolution;
float x = 0.5 * (1.0 / textureSize.x);
float y = 0.5 * (1.0 / textureSize.y);
vec2 dg1 = vec2( x, y);
vec2 dg2 = vec2(-x, y);
vec2 dx = vec2(x, 0.0);
vec2 dy = vec2(0.0, y);
vec3 c00 = texture2D(texture, tex_coords - dg1).xyz;
vec3 c01 = texture2D(texture, tex_coords - dx).xyz;
vec3 c02 = texture2D(texture, tex_coords + dg2).xyz;
vec3 c10 = texture2D(texture, tex_coords - dy).xyz;
vec3 c11 = texture2D(texture, tex_coords).xyz;
vec3 c12 = texture2D(texture, tex_coords + dy).xyz;
vec3 c20 = texture2D(texture, tex_coords - dg2).xyz;
vec3 c21 = texture2D(texture, tex_coords + dx).xyz;
vec3 c22 = texture2D(texture, tex_coords + dg1).xyz;
vec2 texsize = textureSize;
vec3 first = mix(c00, c20, fract(tex_coords.x * texsize.x + 0.5));
vec3 second = mix(c02, c22, fract(tex_coords.x * texsize.x + 0.5));
vec3 mid_horiz = mix(c01, c21, fract(tex_coords.x * texsize.x + 0.5));
vec3 mid_vert = mix(c10, c12, fract(tex_coords.y * texsize.y + 0.5));
vec3 res = mix(first, second, fract(tex_coords.y * texsize.y + 0.5));
vec4 final = vec4(0.26 * (res + mid_horiz + mid_vert) + 3.5 * abs(res - mix(mid_horiz, mid_vert, 0.5)), 1.0);
final = compress(final, 0.8, 5.0);
final.a = 1.0;
return final;
}
'''
effect_fxaa = '''
vec4 effect( vec4 color, sampler2D buf0, vec2 texCoords, vec2 coords)
{
return vec4(texture2D(texture0, texCoords));
return vec4(texture2D(texture3, texCoords).g , 0, 0, 1.0);
if (texture2D(texture4, texCoords).x <= 0.50)
return color;
vec2 frameBufSize = resolution;
float FXAA_SPAN_MAX = 8.0;
float FXAA_REDUCE_MUL = 1.0/8.0;
float FXAA_REDUCE_MIN = 1.0/128.0;
vec3 rgbNW=texture2D(buf0,texCoords+(vec2(-1.0,-1.0)/frameBufSize)).xyz;
vec3 rgbNE=texture2D(buf0,texCoords+(vec2(1.0,-1.0)/frameBufSize)).xyz;
vec3 rgbSW=texture2D(buf0,texCoords+(vec2(-1.0,1.0)/frameBufSize)).xyz;
vec3 rgbSE=texture2D(buf0,texCoords+(vec2(1.0,1.0)/frameBufSize)).xyz;
vec3 rgbM=texture2D(buf0,texCoords).xyz;
vec3 luma=vec3(0.299, 0.587, 0.114);
float lumaNW = dot(rgbNW, luma);
float lumaNE = dot(rgbNE, luma);
float lumaSW = dot(rgbSW, luma);
float lumaSE = dot(rgbSE, luma);
float lumaM = dot(rgbM, luma);
float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));
float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));
vec2 dir;
dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));
dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));
float dirReduce = max(
(lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL),
FXAA_REDUCE_MIN);
float rcpDirMin = 1.0/(min(abs(dir.x), abs(dir.y)) + dirReduce);
dir = min(vec2( FXAA_SPAN_MAX, FXAA_SPAN_MAX),
max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX),
dir * rcpDirMin)) / frameBufSize;
vec3 rgbA = (1.0/2.0) * (
texture2D(buf0, texCoords.xy + dir * (1.0/3.0 - 0.5)).xyz +
texture2D(buf0, texCoords.xy + dir * (2.0/3.0 - 0.5)).xyz);
vec3 rgbB = rgbA * (1.0/2.0) + (1.0/4.0) * (
texture2D(buf0, texCoords.xy + dir * (0.0/3.0 - 0.5)).xyz +
texture2D(buf0, texCoords.xy + dir * (3.0/3.0 - 0.5)).xyz);
float lumaB = dot(rgbB, luma);
vec4 return_color;
if((lumaB < lumaMin) || (lumaB > lumaMax)){
return_color = vec4(rgbA, color.w);
}else{
return_color = vec4(rgbB, color.w);
}
return return_color;
}
'''
effect_bloom = '''
vec4 effect( vec4 color, sampler2D bgl_RenderedTexture, vec2 texcoord, vec2 coords)
{
if (texture2D(texture4, texcoord).x <= 0.50)
return color;
vec4 sum = vec4(0);
vec4 return_color;
//vec2 texcoord = vec2(gl_TexCoord[0]);
int j;
int i;
//float glow_threshold = 0.25;
float glow_threshold = 0.23;
//float r_color = texture2D(texture4, texcoord).x;
for( i= -4 ;i < 4; i++)
{
for (j = -3; j < 3; j++)
{
sum += texture2D(bgl_RenderedTexture, texcoord + vec2(j, i)*0.004) * glow_threshold;
}
}
if (texture2D(bgl_RenderedTexture, texcoord).r < 0.3)
{
return_color = sum*sum*0.012 + texture2D(bgl_RenderedTexture, texcoord);
}
else
{
if (texture2D(bgl_RenderedTexture, texcoord).r < 0.5)
{
return_color = sum*sum*0.009 + texture2D(bgl_RenderedTexture, texcoord);
}
else
{
return_color = sum*sum*0.0075 + texture2D(bgl_RenderedTexture, texcoord);
}
}
return_color.a = 1.0;
return return_color;
}
'''
effect_dop = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
//if (texture2D(texture4, tex_coords).x <= 0.50)
// return color;
float value = 0.5;
float depth = texture2D(texture5, tex_coords).z;
value = 4.0 * (1.0 - depth);
value = 2.5-((1.0-depth)*500.0);
//value = 2.0 - depth*3.0;
//if (value < 4.0) value = 0.0;
//if (value > 5.0) value = 5.0;
if (depth < 1.0) value = 0.0;
//value = -30.0 + depth*20.0;
value = (depth)*4.0;
float dt = value * 1.0 / resolution.x;
vec4 sum = vec4(0.0);
sum += texture2D(texture, vec2(tex_coords.x - 4.0*dt, tex_coords.y)) * 0.05;
sum += texture2D(texture, vec2(tex_coords.x - 3.0*dt, tex_coords.y)) * 0.09;
sum += texture2D(texture, vec2(tex_coords.x - 2.0*dt, tex_coords.y)) * 0.12;
sum += texture2D(texture, vec2(tex_coords.x - dt, tex_coords.y)) * 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y)) * 0.16;
sum += texture2D(texture, vec2(tex_coords.x + dt, tex_coords.y)) * 0.15;
sum += texture2D(texture, vec2(tex_coords.x + 2.0*dt, tex_coords.y)) * 0.12;
sum += texture2D(texture, vec2(tex_coords.x + 3.0*dt, tex_coords.y)) * 0.09;
sum += texture2D(texture, vec2(tex_coords.x + 4.0*dt, tex_coords.y)) * 0.05;
return sum;
}
'''
effect_dop_v = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
//if (texture2D(texture4, tex_coords).x <= 0.50)
// return color;
float value = 0.5;
float depth = texture2D(texture5, tex_coords).z;
value = 4.0 * (1.0 - depth);
value = 2.5-((1.0-depth)*500.0);
//value = 2.0 - (depth)*3.0;
//if (value < 4.0) value = 0.0;
//if (value > 5.0) value = 5.0;
if (depth < 1.0) value = 0.0;
//value = -30.0 + depth*20.0;
value = (depth)*4.0;
float dt = value * 1.0 / resolution.x;
vec4 sum = vec4(0.0);
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 4.0*dt)) * 0.05;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 3.0*dt)) * 0.09;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 2.0*dt)) * 0.12;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - dt)) * 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y)) * 0.16;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + dt)) * 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 2.0*dt)) * 0.12;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 3.0*dt)) * 0.09;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 4.0*dt)) * 0.05;
return sum;
}
'''
effect_motion = '''
vec4 effect(vec4 color, sampler2D motion, vec2 uv, vec2 uv2)
{
//if (texture2D(texture4, uv).x <= 0.50)
// return color;
//vec2 speed = 2. * texture2D(texture5, uv).rg - 1.;
vec3 speedInfo = texture2D(texture5, uv).rgb;
vec2 speed = (2. * speedInfo.xy - 1.) * pow(speedInfo.z, 2.);
float intensity = 0.13;
vec2 offset = intensity * speed;
vec3 c = vec3(0.);
float inc = 0.05;
float weight = 0.;
for (float i = 0.; i <= 1.; i += inc)
{
c += texture2D(motion, uv + i * offset).rgb;
weight += 1.;
}
c /= weight;
return vec4(c, 1.);
return vec4(1);
}
'''
effect_bloom_b = '''
vec4 effect( vec4 color, sampler2D bgl_RenderedTexture, vec2 texcoord, vec2 coords)
{
//if (texture2D(texture4, texcoord).x <= 0.50)
// return color;
vec4 sum = vec4(0);
vec4 return_color;
//vec2 texcoord = vec2(gl_TexCoord[0]);
int j;
int i;
//float glow_threshold = 0.25;
float glow_threshold = 0.10;
//float glow_threshold = time/10.0;
//float r_color = texture2D(texture4, texcoord).x;
for( i= -4 ;i < 4; i++)
{
for (j = -3; j < 3; j++)
{
sum += texture2D(bgl_RenderedTexture, texcoord + vec2(j, i)*0.004) * glow_threshold;
}
}
if (texture2D(bgl_RenderedTexture, texcoord).r < 0.3)
{
return_color = sum*sum*0.012 + texture2D(bgl_RenderedTexture, texcoord);
}
else
{
if (texture2D(bgl_RenderedTexture, texcoord).r < 0.5)
{
return_color = sum*sum*0.009 + texture2D(bgl_RenderedTexture, texcoord);
}
else
{
return_color = sum*sum*0.0075 + texture2D(bgl_RenderedTexture, texcoord);
}
}
return_color.a = 1.0;
return return_color;
}
'''
C_SIZE = (1366, 768)
class EffectFbo(Fbo):
def __init__(self, *args, **kwargs):
super(EffectFbo, self).__init__(*args, **kwargs)
self.texture_rectangle = None
def set_fs(self, value):
# set the fragment shader to our source code
shader = self.shader
old_value = shader.fs
shader.fs = value
if not shader.success:
shader.fs = old_value
raise Exception('failed')
class EffectWidget(FloatLayout):
fs = StringProperty(None)
# Texture of the final Fbo
texture = ObjectProperty(None)
# Rectangle clearing Fbo
fbo_rectangle = ObjectProperty(None)
# List of effect strings
effects = ListProperty([])
# One extra Fbo for each effect
fbo_list = ListProperty([])
effect_mask = None
motion_effect = None
def __init__(self, **kwargs):
# Make sure opengl context exists
EventLoop.ensure_window()
self.mask_effect = kwargs.get("mask_effect", None)
self.motion_effect = kwargs.get("motion_effect", None)
self.fbo_canvas = kwargs.get("motion_effect", None)
self.canvas = RenderContext(use_parent_projection=True,
use_parent_modelview=True,
with_depthbuffer=True)
self.size = C_SIZE
with self.canvas:
#self._viewport = Rectangle(size=(800,600), pos=self.pos)
self.fbo = Fbo(size=C_SIZE, with_depthbuffer=True, compute_normal_mat=True,
clear_color=(0.3, 0.3, 0.7, 1))
with self.fbo.before:
#Rectangle(size=(800, 600))
PushMatrix()
self.fbo_translation = Translate(-self.x, -self.y, 0)
BindTexture(texture=self.fbo_canvas.texture, index=1)
BindTexture(texture=self.mask_effect.texture, index=4)
BindTexture(texture=self.motion_effect.texture, index=5)
with self.fbo:
Color(0, 0, 0)
BindTexture(texture=self.fbo_canvas.texture, index=1)
BindTexture(texture=self.mask_effect.texture, index=4)
BindTexture(texture=self.motion_effect.texture, index=5)
self.fbo_rectangle = Rectangle(size=C_SIZE)
self._instructions = InstructionGroup()
with self.fbo.after:
PopMatrix()
self.cbs = Callback(self.reset_gl_context)
super(EffectWidget, self).__init__(**kwargs)
self.size = C_SIZE
Clock.schedule_interval(self.update_glsl, 0)
self._instructions.add(Callback(self.setup_gl_context))
self.refresh_fbo_setup()
Clock.schedule_interval(self.update_fbos, 0)
def on_pos(self, *args):
self.fbo_translation.x = -self.x
self.fbo_translation.y = -self.y
def on_size(self, instance, value):
self.fbo.size = C_SIZE
self.fbo_rectangle.size = C_SIZE
self.refresh_fbo_setup()
#self._viewport.texture = self.fbo.texture
#self._viewport.size = value
def setup_gl_context(self, *args):
glEnable(GL_DEPTH_TEST)
self.fbo.clear_buffer()
#for fbo in self.fbo_list:
# fbo.clear_buffer()
def reset_gl_context(self, *args):
glDisable(GL_DEPTH_TEST)
def update_glsl(self, *largs):
time = Clock.get_boottime()
resolution = [float(size) for size in C_SIZE]
self.canvas['time'] = time
self.canvas['resolution'] = resolution
self.canvas['texture4'] = 4
self.canvas['texture5'] = 5
self.canvas['texture1'] = 1
for fbo in self.fbo_list:
fbo['time'] = time
fbo['resolution'] = resolution
fbo['texture4'] = 4
fbo['texture5'] = 5
fbo['texture1'] = 1
def on_effects(self, *args):
self.refresh_fbo_setup()
def update_fbos(self, *args):
for fbo in self.fbo_list:
fbo.ask_update()
def refresh_fbo_setup(self, *args):
# Add/remove fbos until there is one per effect
while len(self.fbo_list) < len(self.effects):
with self.canvas:
new_fbo = EffectFbo(size=C_SIZE)
with new_fbo:
Color(1, 1, 1, 1)
new_fbo.texture_rectangle = Rectangle(
size=C_SIZE)
new_fbo.texture_rectangle.size = C_SIZE
self.fbo_list.append(new_fbo)
while len(self.fbo_list) > len(self.effects):
old_fbo = self.fbo_list.pop()
self.canvas.remove(old_fbo)
# Do resizing etc.
self.fbo.size = C_SIZE
self.fbo_rectangle.size = C_SIZE
for i in range(len(self.fbo_list)):
self.fbo_list[i].size = C_SIZE
self.fbo_list[i].texture_rectangle.size = C_SIZE
# If there are no effects, just draw our main fbo
if len(self.fbo_list) == 0:
self.texture = self.fbo.texture
return
for i in range(1, len(self.fbo_list)):
fbo = self.fbo_list[i]
fbo.texture_rectangle.texture = self.fbo_list[i - 1].texture
for effect, fbo in zip(self.effects, self.fbo_list):
fbo.set_fs(shader_header + shader_uniforms + effect +
shader_footer_effect)
self.fbo_list[0].texture_rectangle.texture = self.fbo.texture
self.texture = self.fbo_list[-1].texture
def on_fs(self, instance, value):
# set the fragment shader to our source code
shader = self.canvas.shader
old_value = shader.fs
shader.fs = value
if not shader.success:
shader.fs = old_value
raise Exception('failed')
def add_widget(self, widget):
# Add the widget to our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).add_widget(widget)
#self._instructions.add(widget.canvas)
self.canvas = c
def remove_widget(self, widget):
# Remove the widget from our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).remove_widget(widget)
self.canvas = c
def clear_widgets(self, children=None):
# Clear widgets from our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).clear_widgets(children)
self.canvas = c
class BlurEffectWidget(EffectWidget):
#effects = ListProperty([effect_fxaa])
#effects = ListProperty([effect_bloom_b, effect_fxaa, effect_dop])
effects = ListProperty([effect_fxaa, effect_bloom_b])
#effects = ListProperty([effect_motion])
#effects = ListProperty()
|
kpiorno/kivy3dgui
|
kivy3dgui/effectwidget.py
|
Python
|
mit
| 24,600
|
import os
import imghdr
from collections import namedtuple
import numpy as np
from .. import io, img_as_ubyte
from ..transform import resize
from ..color import color_dict
from ..io.util import file_or_url_context, is_url
from ..io.collection import ImageCollection
import six
from six.moves.urllib import request
urlopen = request.urlopen
# Convert colors from `skimage.color` to uint8 and allow access through
# dict or a named tuple.
color_dict = dict((name, tuple(int(255 * c + 0.5) for c in rgb))
for name, rgb in six.iteritems(color_dict))
colors = namedtuple('colors', color_dict.keys())(**color_dict)
def open(path):
"""Return Picture object from the given image path."""
return Picture(path)
def _verify_picture_index(index):
"""Raise error if picture index is not a 2D index/slice."""
if not (isinstance(index, tuple) and len(index) == 2):
raise IndexError("Expected 2D index but got {0!r}".format(index))
if all(isinstance(i, int) for i in index):
return index
# In case we need to fix the array index, convert tuple to list.
index = list(index)
for i, dim_slice in enumerate(index):
# If either index is a slice, ensure index object returns 2D array.
if isinstance(dim_slice, int):
index[i] = dim_slice = slice(dim_slice, dim_slice + 1)
return tuple(index)
def rgb_transpose(array):
"""Return RGB array with first 2 axes transposed."""
return np.transpose(array, (1, 0, 2))
def array_to_xy_origin(image):
"""Return view of image transformed from array to Cartesian origin."""
return rgb_transpose(image[::-1])
def xy_to_array_origin(image):
"""Return view of image transformed from Cartesian to array origin."""
return rgb_transpose(image[:, ::-1])
class Pixel(object):
"""A single pixel in a Picture.
Attributes
----------
pic : Picture
The Picture object that this pixel references.
array : array_like
Byte array with raw image data (RGB).
x : int
Horizontal coordinate of this pixel (left = 0).
y : int
Vertical coordinate of this pixel (bottom = 0).
rgb : tuple
RGB tuple with red, green, and blue components (0-255)
alpha : int
Transparency component (0-255), 255 (opaque) by default
"""
def __init__(self, pic, array, x, y, rgb, alpha=255):
self._picture = pic
self._x = x
self._y = y
self._red = self._validate(rgb[0])
self._green = self._validate(rgb[1])
self._blue = self._validate(rgb[2])
self._alpha = self._validate(alpha)
@property
def x(self):
"""Horizontal location of this pixel in the parent image(left = 0)."""
return self._x
@property
def y(self):
"""Vertical location of this pixel in the parent image (bottom = 0)."""
return self._y
@property
def red(self):
"""The red component of the pixel (0-255)."""
return self._red
@red.setter
def red(self, value):
self._red = self._validate(value)
self._setpixel()
@property
def green(self):
"""The green component of the pixel (0-255)."""
return self._green
@green.setter
def green(self, value):
self._green = self._validate(value)
self._setpixel()
@property
def blue(self):
"""The blue component of the pixel (0-255)."""
return self._blue
@blue.setter
def blue(self, value):
self._blue = self._validate(value)
self._setpixel()
@property
def alpha(self):
"""The transparency component of the pixel (0-255)."""
return self._alpha
@alpha.setter
def alpha(self, value):
self._alpha = self._validate(value)
self._setpixel()
@property
def rgb(self):
"""The RGB color components of the pixel (3 values 0-255)."""
return (self.red, self.green, self.blue)
@rgb.setter
def rgb(self, value):
if len(value) == 4:
self.rgba = value
else:
self._red, self._green, self._blue \
= (self._validate(v) for v in value)
self._alpha = 255
self._setpixel()
@property
def rgba(self):
"""The RGB color and transparency components of the pixel
(4 values 0-255).
"""
return (self.red, self.green, self.blue, self.alpha)
@rgba.setter
def rgba(self, value):
self._red, self._green, self._blue, self._alpha \
= (self._validate(v) for v in value)
self._setpixel()
def _validate(self, value):
"""Verifies that the pixel value is in [0, 255]."""
try:
value = int(value)
if (value < 0) or (value > 255):
raise ValueError()
except ValueError:
msg = "Expected an integer between 0 and 255, but got {0} instead!"
raise ValueError(msg.format(value))
return value
def _setpixel(self):
# RGB + alpha
self._picture.xy_array[self._x, self._y] = self.rgba
self._picture._array_modified()
def __eq__(self, other):
if isinstance(other, Pixel):
return self.rgba == other.rgba
def __repr__(self):
args = self.red, self.green, self.blue, self.alpha
return "Pixel(red={0}, green={1}, blue={2}, alpha={3})".format(*args)
class Picture(object):
"""A 2-D picture made up of pixels.
Attributes
----------
path : str
Path to an image file to load / URL of an image
array : array
Raw RGB or RGBA image data [0-255], with origin at top-left.
xy_array : array
Raw RGB or RGBA image data [0-255], with origin at bottom-left.
Examples
--------
Load an image from a file:
>>> from skimage import novice
>>> from skimage import data
>>> picture = novice.open(data.data_dir + '/chelsea.png')
Load an image from a URL (the URL must start with ``http(s)://`` or
``ftp(s)://``):
>>> picture = novice.open('http://scikit-image.org/_static/img/logo.png')
Create a blank 100 pixel wide, 200 pixel tall white image:
>>> pic = Picture.from_size((100, 200), color=(255, 255, 255))
Use numpy to make an RGB byte array (shape is height x width x 3):
>>> import numpy as np
>>> data = np.zeros(shape=(200, 100, 3), dtype=np.uint8)
>>> data[:, :, 0] = 255 # Set red component to maximum
>>> pic = Picture(array=data)
Get the bottom-left pixel:
>>> pic[0, 0]
Pixel(red=255, green=0, blue=0, alpha=255)
Get the top row of the picture:
>>> pic[:, pic.height-1]
Picture(100 x 1)
Set the bottom-left pixel to black:
>>> pic[0, 0] = (0, 0, 0)
Set the top row to red:
>>> pic[:, pic.height-1] = (255, 0, 0)
"""
def __init__(self, path=None, array=None, xy_array=None):
self._modified = False
self._path = None
self._format = None
n_args = len([a for a in [path, array, xy_array] if a is not None])
if n_args != 1:
msg = "Must provide a single keyword arg (path, array, xy_array)."
ValueError(msg)
elif path is not None:
if not is_url(path):
path = os.path.abspath(path)
self._path = path
with file_or_url_context(path) as context:
self.array = img_as_ubyte(io.imread(context))
self._format = imghdr.what(context)
elif array is not None:
self.array = array
elif xy_array is not None:
self.xy_array = xy_array
# Force RGBA internally (use max alpha)
if self.array.shape[-1] == 3:
self.array = np.insert(self.array, 3, values=255, axis=2)
@staticmethod
def from_size(size, color='black'):
"""Return a Picture of the specified size and a uniform color.
Parameters
----------
size : tuple
Width and height of the picture in pixels.
color : tuple or str
RGB or RGBA tuple with the fill color for the picture [0-255] or
a valid key in `color_dict`.
"""
if isinstance(color, six.string_types):
color = color_dict[color]
rgb_size = tuple(size) + (len(color),)
color = np.array(color, dtype=np.uint8)
array = np.ones(rgb_size, dtype=np.uint8) * color
# Force RGBA internally (use max alpha)
if array.shape[-1] == 3:
array = np.insert(array, 3, values=255, axis=2)
return Picture(array=array)
@property
def array(self):
"""Image data stored as numpy array."""
return self._array
@array.setter
def array(self, array):
self._array = array.astype(np.uint8)
self._xy_array = array_to_xy_origin(self._array)
self._array_backup = self._array.copy()
@property
def xy_array(self):
"""Image data stored as numpy array with origin at the bottom-left."""
return self._xy_array
@xy_array.setter
def xy_array(self, array):
self._xy_array = array
self._array = xy_to_array_origin(array)
def save(self, path):
"""Saves the picture to the given path.
Parameters
----------
path : str
Path (with file extension) where the picture is saved.
"""
io.imsave(path, self.array)
self._modified = False
self._path = os.path.abspath(path)
self._format = imghdr.what(path)
def reset(self):
"""Reset image to its original state, removing modifications.
"""
self.array = self._array_backup
@property
def path(self):
"""The path to the picture."""
return self._path
@property
def modified(self):
"""True if the picture has changed."""
return self._modified
def _array_modified(self):
self._modified = True
self._path = None
@property
def format(self):
"""The image format of the picture."""
return self._format
@property
def size(self):
"""The size (width, height) of the picture."""
return self.xy_array.shape[:2]
@size.setter
def size(self, value):
# Don't resize if no change in size
if (value[0] != self.width) or (value[1] != self.height):
# skimage dimensions are flipped: y, x
new_size = (int(value[1]), int(value[0]))
new_array = resize(self.array, new_size, order=0,
preserve_range=True)
self.array = new_array.astype(np.uint8)
self._array_modified()
@property
def width(self):
"""The width of the picture."""
return self.size[0]
@width.setter
def width(self, value):
self.size = (value, self.height)
@property
def height(self):
"""The height of the picture."""
return self.size[1]
@height.setter
def height(self, value):
self.size = (self.width, value)
def show(self):
"""Display the image."""
io.imshow(self.array)
io.show()
def compare(self):
"""Compare the image to its unmodified version."""
images = [self._array_backup, self.array]
ic = ImageCollection([0, 1], load_func=lambda x: images[x])
io.imshow_collection(images)
io.show()
def _makepixel(self, x, y):
"""Create a Pixel object for a given x, y location."""
rgb = self.xy_array[x, y]
return Pixel(self, self.array, x, y, rgb)
def _get_channel(self, channel):
"""Return a specific dimension out of the raw image data slice."""
return self._array[:, :, channel]
def _set_channel(self, channel, value):
"""Set a specific dimension in the raw image data slice."""
self._array[:, :, channel] = value
@property
def red(self):
"""The red component of the pixel (0-255)."""
return self._get_channel(0).ravel()
@red.setter
def red(self, value):
self._set_channel(0, value)
@property
def green(self):
"""The green component of the pixel (0-255)."""
return self._get_channel(1).ravel()
@green.setter
def green(self, value):
self._set_channel(1, value)
@property
def blue(self):
"""The blue component of the pixel (0-255)."""
return self._get_channel(2).ravel()
@blue.setter
def blue(self, value):
self._set_channel(2, value)
@property
def alpha(self):
"""The transparency component of the pixel (0-255)."""
return self._get_channel(3).ravel()
@alpha.setter
def alpha(self, value):
self._set_channel(3, value)
@property
def rgb(self):
"""The RGB color components of the pixel (3 values 0-255)."""
return self.xy_array[:, :, :3]
@rgb.setter
def rgb(self, value):
self.xy_array[:, :, :3] = value
@property
def rgba(self):
"""The RGBA color components of the pixel (4 values 0-255)."""
return self.xy_array
@rgba.setter
def rgba(self, value):
self.xy_array[:] = value
def __iter__(self):
"""Iterates over all pixels in the image."""
for x in range(self.width):
for y in range(self.height):
yield self._makepixel(x, y)
def __getitem__(self, xy_index):
"""Return `Picture`s for slices and `Pixel`s for indexes."""
xy_index = _verify_picture_index(xy_index)
if all(isinstance(index, int) for index in xy_index):
return self._makepixel(*xy_index)
else:
return Picture(xy_array=self.xy_array[xy_index])
def __setitem__(self, xy_index, value):
xy_index = _verify_picture_index(xy_index)
if isinstance(value, tuple):
self[xy_index].rgb = value
elif isinstance(value, Picture):
self.xy_array[xy_index] = value.xy_array
else:
raise TypeError("Invalid value type")
self._array_modified()
def __eq__(self, other):
if not isinstance(other, Picture):
raise NotImplementedError()
return np.all(self.array == other.array)
def __repr__(self):
return "Picture({0} x {1})".format(*self.size)
def _repr_png_(self):
return self._repr_image_format('png')
def _repr_jpeg_(self):
return self._repr_image_format('jpeg')
def _repr_image_format(self, format_str):
str_buffer = six.BytesIO()
io.imsave(str_buffer, self.array, format_str=format_str)
return_str = str_buffer.getvalue()
str_buffer.close()
return return_str
if __name__ == '__main__':
import doctest
doctest.testmod()
|
pratapvardhan/scikit-image
|
skimage/novice/_novice.py
|
Python
|
bsd-3-clause
| 14,924
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2010 Salvatore J. Trimarchi <salvatore@trimarchi.co.cc>
# (http://salvatoreweb.co.cc)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# This module provides a minimal Honduran chart of accounts that can be use
# to build upon a more complex one. It also includes a chart of taxes and
# the Lempira currency.
#
# This module is based on the Guatemalan chart of accounts:
# Copyright (c) 2009-2010 Soluciones Tecnologócias Prisma S.A. All Rights Reserved.
# José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A.
# (http://www.solucionesprisma.com)
#
# This module works with OpenERP 6.0
#
{
'name': 'Honduras - Accounting',
'version': '0.1',
'category': 'Localization/Account Charts',
'description': """Agrega una nomenclatura contable para Honduras. También incluye impuestos y la moneda Lempira. -- Adds accounting chart for Honduras. It also includes taxes and the Lempira currency""",
'author': 'Salvatore Josue Trimarchi Pinto',
'website': 'http://trimarchi.co.cc',
'depends': ['base', 'account', 'account_chart'],
'init_xml': [],
'update_xml': [
'account_types.xml',
'account_chart.xml',
'account_tax.xml',
'l10n_hn_base.xml',
],
'demo_xml': [],
'installable': True,
'certificate': '00188692948445',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ksrajkumar/openerp-6.1
|
openerp/addons/l10n_hn/__openerp__.py
|
Python
|
agpl-3.0
| 2,181
|
import unittest
import selenium.webdriver
from holmium.core import Page, Element, Elements, Locators, ElementMap
class GoogleMain(Page):
search_box = Element(Locators.NAME, "q", timeout=2)
google_buttons = ElementMap(
Locators.CSS_SELECTOR, ".jsb input", timeout=2,
key=lambda e: e.get_attribute("value")
)
search_results = Elements(
Locators.CSS_SELECTOR, "div.g>div.rc", timeout=2,
value = lambda el : {
"link": el.find_element_by_css_selector("h3.r>a").get_attribute("href"),
"title": el.find_element_by_css_selector("h3.r>a").text
}
)
def search ( self, query ):
self.google_buttons["Google Search"].click() # self.google_buttons behaves just like a dictionary
self.search_box.clear() # self.search_box is now evaluated directly to a WebElement
self.search_box.send_keys(query)
self.search_box.submit()
class TextSearchTest(unittest.TestCase):
def setUp(self):
self.driver = selenium.webdriver.Firefox()
self.page = GoogleMain(self.driver, "http://www.google.com")
def test_text_search(self):
self.assertTrue(len(self.page.search("selenium").search_results) > 0)
def test_text_search_first_result(self):
self.page.search("selenium") # execute the page object method search
self.assertEquals(
self.page.search_results[0]["title"],
u"Selenium - Web Browser Automation"
)
self.assertEquals(
self.page.search_results[0]["link"],
u"http://www.seleniumhq.org/"
)
def tearDown(self):
self.driver.quit()
|
kejkz/holmium.core
|
examples/google/test_search_text.py
|
Python
|
mit
| 1,664
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Network protocol this rule applies to. Possible values
are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp', '*'
:type protocol: str or :class:`SecurityRuleProtocol
<azure.mgmt.network.v2016_12_01.models.SecurityRuleProtocol>`
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterix '*' can
also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param destination_address_prefix: The destination address prefix. CIDR or
source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet'
can also be used.
:type destination_address_prefix: str
:param access: The network traffic is allowed or denied. Possible values
are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny'
:type access: str or :class:`SecurityRuleAccess
<azure.mgmt.network.v2016_12_01.models.SecurityRuleAccess>`
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: The direction of the rule. The direction specifies if
rule will be evaluated on incoming or outcoming traffic. Possible values
are: 'Inbound' and 'Outbound'. Possible values include: 'Inbound',
'Outbound'
:type direction: str or :class:`SecurityRuleDirection
<azure.mgmt.network.v2016_12_01.models.SecurityRuleDirection>`
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'source_address_prefix': {'required': True},
'destination_address_prefix': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, protocol, source_address_prefix, destination_address_prefix, access, direction, id=None, description=None, source_port_range=None, destination_port_range=None, priority=None, provisioning_state=None, name=None, etag=None):
super(SecurityRule, self).__init__(id=id)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
SUSE/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/security_rule.py
|
Python
|
mit
| 5,400
|
#!/usr/bin/python
# Guillermo Torres MSc,
# ggtorrese@unal.edu.co, guigotoe@gmail.com
# Bioinformatic Research Group
# Biotechnology Institute of National University of Colombia
# 17 August 2011
# update November 2013
#* last update September 2014
#* trimming Ambioguosity "N", only the unambiougosity sequence that represent bigged than 70% of
#* whole sequence is retained, else is not considered for future analysis.
# This is written as part of IN SILCO MICROARRAY pipeline, but could be split to serve to different purposes.
#** How to use ****
#** fieDeb.py clustersimilarity(0-1) infilename
#** example: python fileDeb.py --infa path/nirs.fasta --clusID 0.85 --out genes.fa
#**
#** END ***
import os, time, sys, shutil, re
from tempfile import mkstemp
import subprocess as sub
from optparse import OptionParser
from Bio import SeqIO
## PARSING... ##
#* Options *#
parser = OptionParser()
usage = """\nfileDeb exploits the sequence similarities, clustering them, in order to estimate a biological sequence from consensus sequence derived from a multiple alignment using UCLUST\n
REQUIREMENTS:\n** Usearch v 5.2\n
\t%prog --infa IN_fileNAME --culsID 0.85 --out outFileName\n\t%prog {-h --help}\n
Part of in silico Hybridization system by Guillermo G. Torres (ggtorrese@unal.edu.co) - Bioinformatics
Group - IBUN - National University of Colombia"""
parser = OptionParser(usage=usage,version="%prog 1.0")
parser.add_option("--infa","--input_fasta",type="string",action="store",dest="multifa",#default='nirs.fasta',#metavar=" infile_NAME",#default='nirs.fasta',
help="Input multifasta file with raw genes information (from Data Bases)")
parser.add_option("--out","--out_file",type="string",action="store",dest="out_file_name",default='genes.fa',#metavar=" out_file_NAME"
help="Out multifasta file name of consensus gene sequences - default name = genes.fa")
parser.add_option("--clusID","--cluster_id",type="float",action="store",dest="clus_id",default=0.85,#metavar=" Genes clustering identity % (float)",
help="Denoising identity percentage - default % value = 0.85")
parser.add_option("-S","--Seed", dest="seed",action="store_true",default=False,
help="Extract the seeds sequences from cluster")
(o,args) = parser.parse_args()
if len(args)!=0:
parser = OptionParser(usage="\n\t%prog --infa IN_fileNAME --culsID 0.85 --out outFileName\n\t%prog {-h --help}")
parser.error("incorrect number of arguments\n")
path = os.getcwd()
def main():
'''
Check the unambiguosity of the sequences and then send them to cluster process
'''
inf = o.multifa
clID = o.clus_id
out = o.out_file_name
## Removing ambiguosities...
fd,entry = mkstemp(suffix='.tmp',prefix='_',dir=path) # making temporary file tuple
for seq_record in SeqIO.parse(inf, "fasta"):
fragments = {}
lenghts = []
for fragment in seq_record.seq.split('N'):
if len(fragment) > 0:
lenghts.append(len(fragment))
fragments[len(fragment)] = fragment
if len(lenghts) > 0:
longest = max(lenghts)
fragment_seq = fragments[longest]
rate = len(fragment_seq)/float(len(seq_record))
if rate >= 0.7:
os.write(fd,">%s\n%s\n"%(seq_record.description,fragment_seq)) # filling the temporary file with gene sequence in fasta format
## Clustering
clustering(entry,clID,o.seed)
os.close(fd)
os.remove(entry)
print ("Clustering Done!")
def clustering(inf,clID,out):
'''
Use Usearch to clusterize the input sequences
'''
outf = o.out_file_name
sorting = "usearch --sort %s --output %s.sorted" % (inf, outf)
consenso = "usearch --cluster %s.sorted --id %s --uc %s.uc --consout %s.fa --log %s.log" % (outf, clID, outf, outf, outf)
seeds = "usearch --cluster %s.sorted --id %s --uc %s.uc --seedsout %s.fa --log %s.log" % (outf,clID,outf,outf,outf)
sort = sub.Popen(sorting, shell = True, close_fds = True, stdout=sub.PIPE)
time.sleep(1)
if sort.wait() == 0:
if out == 1:
cluster = sub.Popen(seeds, shell = True, close_fds = True, stdout=sub.PIPE)
else:
cluster = sub.Popen(consenso, shell = True, close_fds = True, stdout=sub.PIPE)
if cluster.wait() == 0:
os.system("rm -r %s.sorted"%(outf))
if __name__ == "__main__": main()
|
guigotoe/HISS
|
scripts/filedebugger/fileDeb.py
|
Python
|
gpl-3.0
| 4,491
|
# coding: utf-8
from django.conf import settings
from django.http import Http404
from .views import website_page
class WebsitePageMiddleware(object):
# noinspection PyMethodMayBeStatic
def process_response(self, request, response):
if response.status_code != 404:
return response
# noinspection PyBroadException
try:
return website_page(request, request.path_info)
except Http404:
return response
except Exception:
if settings.DEBUG:
raise
return response
|
georgeyk/quickstartup
|
quickstartup/website/middleware.py
|
Python
|
mit
| 586
|
from .core import ImageOptim, NoImagesOptimizedError
|
derrickorama/image_optim
|
image_optim/__init__.py
|
Python
|
mit
| 52
|
# Copyright (C) 2013 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import json
import os
from frozendict import frozendict
from ycmd.utils import ReadFile
_USER_OPTIONS = {}
def SetAll( new_options ):
global _USER_OPTIONS
_USER_OPTIONS = frozendict( new_options )
def GetAll():
return _USER_OPTIONS
def Value( key ):
return _USER_OPTIONS[ key ]
def LoadDefaults():
SetAll( DefaultOptions() )
def DefaultOptions():
settings_path = os.path.join(
os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )
options = json.loads( ReadFile( settings_path ) )
options.pop( 'hmac_secret', None )
return options
|
snakeleon/YouCompleteMe-x86
|
third_party/ycmd/ycmd/user_options_store.py
|
Python
|
gpl-3.0
| 1,516
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import UserError, AccessError
from odoo.tests import tagged
from odoo.addons.sale_purchase.tests.common import TestCommonSalePurchaseNoChart
@tagged('-at_install', 'post_install')
class TestSalePurchase(TestCommonSalePurchaseNoChart):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
# create a generic Sale Order with 2 classical products and a purchase service
SaleOrder = cls.env['sale.order'].with_context(tracking_disable=True)
cls.sale_order_1 = SaleOrder.create({
'partner_id': cls.partner_a.id,
'partner_invoice_id': cls.partner_a.id,
'partner_shipping_id': cls.partner_a.id,
'pricelist_id': cls.company_data['default_pricelist'].id,
})
cls.sol1_service_deliver = cls.env['sale.order.line'].create({
'name': cls.company_data['product_service_delivery'].name,
'product_id': cls.company_data['product_service_delivery'].id,
'product_uom_qty': 1,
'product_uom': cls.company_data['product_service_delivery'].uom_id.id,
'price_unit': cls.company_data['product_service_delivery'].list_price,
'order_id': cls.sale_order_1.id,
'tax_id': False,
})
cls.sol1_product_order = cls.env['sale.order.line'].create({
'name': cls.company_data['product_order_no'].name,
'product_id': cls.company_data['product_order_no'].id,
'product_uom_qty': 2,
'product_uom': cls.company_data['product_order_no'].uom_id.id,
'price_unit': cls.company_data['product_order_no'].list_price,
'order_id': cls.sale_order_1.id,
'tax_id': False,
})
cls.sol1_service_purchase_1 = cls.env['sale.order.line'].create({
'name': cls.service_purchase_1.name,
'product_id': cls.service_purchase_1.id,
'product_uom_qty': 4,
'product_uom': cls.service_purchase_1.uom_id.id,
'price_unit': cls.service_purchase_1.list_price,
'order_id': cls.sale_order_1.id,
'tax_id': False,
})
cls.sale_order_2 = SaleOrder.create({
'partner_id': cls.partner_a.id,
'partner_invoice_id': cls.partner_a.id,
'partner_shipping_id': cls.partner_a.id,
'pricelist_id': cls.company_data['default_pricelist'].id,
})
cls.sol2_product_deliver = cls.env['sale.order.line'].create({
'name': cls.company_data['product_delivery_no'].name,
'product_id': cls.company_data['product_delivery_no'].id,
'product_uom_qty': 5,
'product_uom': cls.company_data['product_delivery_no'].uom_id.id,
'price_unit': cls.company_data['product_delivery_no'].list_price,
'order_id': cls.sale_order_2.id,
'tax_id': False,
})
cls.sol2_service_order = cls.env['sale.order.line'].create({
'name': cls.company_data['product_service_order'].name,
'product_id': cls.company_data['product_service_order'].id,
'product_uom_qty': 6,
'product_uom': cls.company_data['product_service_order'].uom_id.id,
'price_unit': cls.company_data['product_service_order'].list_price,
'order_id': cls.sale_order_2.id,
'tax_id': False,
})
cls.sol2_service_purchase_2 = cls.env['sale.order.line'].create({
'name': cls.service_purchase_2.name,
'product_id': cls.service_purchase_2.id,
'product_uom_qty': 7,
'product_uom': cls.service_purchase_2.uom_id.id,
'price_unit': cls.service_purchase_2.list_price,
'order_id': cls.sale_order_2.id,
'tax_id': False,
})
def test_sale_create_purchase(self):
""" Confirming 2 sales orders with a service that should create a PO, then cancelling the PO should shedule 1 next activity per SO """
self.sale_order_1.action_confirm()
self.sale_order_2.action_confirm()
purchase_order = self.env['purchase.order'].search([('partner_id', '=', self.supplierinfo1.name.id), ('state', '=', 'draft')])
purchase_lines_so1 = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.sale_order_1.order_line.ids)])
purchase_line1 = purchase_lines_so1[0]
purchase_lines_so2 = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.sale_order_2.order_line.ids)])
purchase_line2 = purchase_lines_so2[0]
self.assertEqual(len(purchase_order), 1, "Only one PO should have been created, from the 2 Sales orders")
self.assertEqual(len(purchase_order.order_line), 2, "The purchase order should have 2 lines")
self.assertEqual(len(purchase_lines_so1), 1, "Only one SO line from SO 1 should have create a PO line")
self.assertEqual(len(purchase_lines_so2), 1, "Only one SO line from SO 2 should have create a PO line")
self.assertEqual(len(purchase_order.activity_ids), 0, "No activity should be scheduled on the PO")
self.assertEqual(purchase_order.state, 'draft', "The created PO should be in draft state")
self.assertNotEqual(purchase_line1.product_id, purchase_line2.product_id, "The 2 PO line should have different products")
self.assertEqual(purchase_line1.product_id, self.sol1_service_purchase_1.product_id, "The create PO line must have the same product as its mother SO line")
self.assertEqual(purchase_line2.product_id, self.sol2_service_purchase_2.product_id, "The create PO line must have the same product as its mother SO line")
purchase_order.button_cancel()
self.assertEqual(len(self.sale_order_1.activity_ids), 1, "One activity should be scheduled on the SO 1 since the PO has been cancelled")
self.assertEqual(self.sale_order_1.user_id, self.sale_order_1.activity_ids[0].user_id, "The activity should be assigned to the SO responsible")
self.assertEqual(len(self.sale_order_2.activity_ids), 1, "One activity should be scheduled on the SO 2 since the PO has been cancelled")
self.assertEqual(self.sale_order_2.user_id, self.sale_order_2.activity_ids[0].user_id, "The activity should be assigned to the SO responsible")
def test_uom_conversion(self):
""" Test generated PO use the right UoM according to product configuration """
self.sale_order_2.action_confirm()
purchase_line = self.env['purchase.order.line'].search([('sale_line_id', '=', self.sol2_service_purchase_2.id)]) # only one line
self.assertTrue(purchase_line, "The SO line should generate a PO line")
self.assertEqual(purchase_line.product_uom, self.service_purchase_2.uom_po_id, "The UoM on the purchase line should be the one from the product configuration")
self.assertNotEqual(purchase_line.product_uom, self.sol2_service_purchase_2.product_uom, "As the product configuration, the UoM on the SO line should still be different from the one on the PO line")
self.assertEqual(purchase_line.product_qty, self.sol2_service_purchase_2.product_uom_qty * 12, "The quantity from the SO should be converted with th UoM factor on the PO line")
def test_no_supplier(self):
""" Test confirming SO with product with no supplier raise Error """
# delete the suppliers
self.supplierinfo1.unlink()
# confirm the SO should raise UserError
with self.assertRaises(UserError):
self.sale_order_1.action_confirm()
def test_reconfirm_sale_order(self):
""" Confirm SO, cancel it, then re-confirm it should not regenerate a purchase line """
self.sale_order_1.action_confirm()
purchase_order = self.env['purchase.order'].search([('partner_id', '=', self.supplierinfo1.name.id), ('state', '=', 'draft')])
purchase_lines = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.sale_order_1.order_line.ids)])
purchase_line = purchase_lines[0]
self.assertEqual(len(purchase_lines), 1, "Only one purchase line should be created on SO confirmation")
self.assertEqual(len(purchase_order), 1, "One purchase order should have been created on SO confirmation")
self.assertEqual(len(purchase_order.order_line), 1, "Only one line on PO, after SO confirmation")
self.assertEqual(purchase_order, purchase_lines.order_id, "The generated purchase line should be in the generated purchase order")
self.assertEqual(purchase_order.state, 'draft', "Generated purchase should be in draft state")
self.assertEqual(purchase_line.price_unit, self.supplierinfo1.price, "Purchase line price is the one from the supplier")
self.assertEqual(purchase_line.product_qty, self.sol1_service_purchase_1.product_uom_qty, "Quantity on SO line is not the same on the purchase line (same UoM)")
self.sale_order_1.action_cancel()
self.assertEqual(len(purchase_order.activity_ids), 1, "One activity should be scheduled on the PO since a SO has been cancelled")
purchase_order = self.env['purchase.order'].search([('partner_id', '=', self.supplierinfo1.name.id), ('state', '=', 'draft')])
purchase_lines = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.sale_order_1.order_line.ids)])
purchase_line = purchase_lines[0]
self.assertEqual(len(purchase_lines), 1, "Always one purchase line even after SO cancellation")
self.assertTrue(purchase_order, "Always one purchase order even after SO cancellation")
self.assertEqual(len(purchase_order.order_line), 1, "Still one line on PO, even after SO cancellation")
self.assertEqual(purchase_order, purchase_lines.order_id, "The generated purchase line should still be in the generated purchase order")
self.assertEqual(purchase_order.state, 'draft', "Generated purchase should still be in draft state")
self.assertEqual(purchase_line.price_unit, self.supplierinfo1.price, "Purchase line price is still the one from the supplier")
self.assertEqual(purchase_line.product_qty, self.sol1_service_purchase_1.product_uom_qty, "Quantity on SO line should still be the same on the purchase line (same UoM)")
self.sale_order_1.action_draft()
self.sale_order_1.action_confirm()
purchase_order = self.env['purchase.order'].search([('partner_id', '=', self.supplierinfo1.name.id), ('state', '=', 'draft')])
purchase_lines = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.sale_order_1.order_line.ids)])
purchase_line = purchase_lines[0]
self.assertEqual(len(purchase_lines), 1, "Still only one purchase line should be created even after SO reconfirmation")
self.assertEqual(len(purchase_order), 1, "Still one purchase order should be after SO reconfirmation")
self.assertEqual(len(purchase_order.order_line), 1, "Only one line on PO, even after SO reconfirmation")
self.assertEqual(purchase_order, purchase_lines.order_id, "The generated purchase line should be in the generated purchase order")
self.assertEqual(purchase_order.state, 'draft', "Generated purchase should be in draft state")
self.assertEqual(purchase_line.price_unit, self.supplierinfo1.price, "Purchase line price is the one from the supplier")
self.assertEqual(purchase_line.product_qty, self.sol1_service_purchase_1.product_uom_qty, "Quantity on SO line is not the same on the purchase line (same UoM)")
def test_update_ordered_sale_quantity(self):
""" Test the purchase order behovior when changing the ordered quantity on the sale order line.
Increase of qty on the SO
- If PO is draft ['draft', 'sent', 'to approve'] : increase the quantity on the PO
- If PO is confirmed ['purchase', 'done', 'cancel'] : create a new PO
Decrease of qty on the SO
- If PO is draft ['draft', 'sent', 'to approve'] : next activity on the PO
- If PO is confirmed ['purchase', 'done', 'cancel'] : next activity on the PO
"""
self.sale_order_1.action_confirm()
purchase_order = self.env['purchase.order'].search([('partner_id', '=', self.supplierinfo1.name.id), ('state', '=', 'draft')])
purchase_lines = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.sale_order_1.order_line.ids)])
purchase_line = purchase_lines[0]
self.assertEqual(purchase_order.state, 'draft', "The created purchase should be in draft state")
self.assertFalse(purchase_order.activity_ids, "There is no activities on the PO")
self.assertEqual(purchase_line.product_qty, self.sol1_service_purchase_1.product_uom_qty, "Quantity on SO line is not the same on the purchase line (same UoM)")
# increase the ordered quantity on sale line
self.sol1_service_purchase_1.write({'product_uom_qty': self.sol1_service_purchase_1.product_uom_qty + 12}) # product_uom_qty = 16
self.assertEqual(purchase_line.product_qty, self.sol1_service_purchase_1.product_uom_qty, "The quantity of draft PO line should be increased as the one from the sale line changed")
sale_line_old_quantity = self.sol1_service_purchase_1.product_uom_qty
# decrease the ordered quantity on sale line
self.sol1_service_purchase_1.write({'product_uom_qty': self.sol1_service_purchase_1.product_uom_qty - 3}) # product_uom_qty = 13
self.assertEqual(len(purchase_order.activity_ids), 1, "One activity should have been created on the PO")
self.assertEqual(purchase_order.activity_ids.user_id, purchase_order.user_id, "Activity assigned to PO responsible")
self.assertEqual(purchase_order.activity_ids.state, 'today', "Activity is for today, as it is urgent")
# confirm the PO
purchase_order.button_confirm()
# decrease the ordered quantity on sale line
self.sol1_service_purchase_1.write({'product_uom_qty': self.sol1_service_purchase_1.product_uom_qty - 5}) # product_uom_qty = 8
purchase_order.invalidate_cache() # Note: creating a second activity will not refresh the cache
self.assertEqual(purchase_line.product_qty, sale_line_old_quantity, "The quantity on the PO line should not have changed.")
self.assertEqual(len(purchase_order.activity_ids), 2, "a second activity should have been created on the PO")
self.assertEqual(purchase_order.activity_ids.mapped('user_id'), purchase_order.user_id, "Activities assigned to PO responsible")
self.assertEqual(purchase_order.activity_ids.mapped('state'), ['today', 'today'], "Activities are for today, as it is urgent")
# increase the ordered quantity on sale line
delta = 8
self.sol1_service_purchase_1.write({'product_uom_qty': self.sol1_service_purchase_1.product_uom_qty + delta}) # product_uom_qty = 16
self.assertEqual(purchase_line.product_qty, sale_line_old_quantity, "The quantity on the PO line should not have changed.")
self.assertEqual(len(purchase_order.activity_ids), 2, "Always 2 activity on confirmed the PO")
purchase_order2 = self.env['purchase.order'].search([('partner_id', '=', self.supplierinfo1.name.id), ('state', '=', 'draft')])
purchase_lines = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.sale_order_1.order_line.ids)])
purchase_lines2 = purchase_lines.filtered(lambda pol: pol.order_id == purchase_order2)
purchase_line2 = purchase_lines2[0]
self.assertTrue(purchase_order2, "A second PO is created by increasing sale quantity when first PO is confirmed")
self.assertEqual(purchase_order2.state, 'draft', "The second PO is in draft state")
self.assertNotEqual(purchase_order, purchase_order2, "The 2 PO are different")
self.assertEqual(len(purchase_lines), 2, "The same Sale Line has created 2 purchase lines")
self.assertEqual(len(purchase_order2.order_line), 1, "The 2nd PO has only one line")
self.assertEqual(purchase_line2.sale_line_id, self.sol1_service_purchase_1, "The 2nd PO line came from the SO line sol1_service_purchase_1")
self.assertEqual(purchase_line2.product_qty, delta, "The quantity of the new PO line is the quantity added on the Sale Line, after first PO confirmation")
|
rven/odoo
|
addons/sale_purchase/tests/test_sale_purchase.py
|
Python
|
agpl-3.0
| 16,599
|
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than five clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
poi = "poi"
features_list = [poi, feature_1, feature_2]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, the line below assumes 2 features)
for f1, f2 in finance_features:
plt.scatter( f1, f2 )
plt.show()
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
### rename the "name" parameter when you change the number of features
### so that the figure gets saved to a different file
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
|
jordanopensource/data-science-bootcamp
|
MachineLearning/Session3/k_means_cluster.py
|
Python
|
mit
| 2,249
|
from __future__ import absolute_import
import six
import warnings
import time
import logging
from sentry import options
from django.core.exceptions import SuspiciousOperation
from collections import namedtuple
from django.conf import settings
from requests.exceptions import RequestException, Timeout, ReadTimeout
from six.moves.urllib.parse import urlparse
from sentry.models import EventError
from sentry.exceptions import RestrictedIPAddress
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5_text
from sentry.utils.strings import truncatechars
# Importing for backwards compatible API
from sentry.net.socket import safe_socket_connect, is_valid_url, is_safe_hostname # NOQA
from sentry.net.http import SafeSession
logger = logging.getLogger(__name__)
# TODO(dcramer): we want to change these to be constants so they are easier
# to translate/link again
# the maximum number of remote resources (i.e. sourc eifles) that should be
# fetched
MAX_URL_LENGTH = 150
# UrlResult.body **must** be bytes
UrlResult = namedtuple("UrlResult", ["url", "headers", "body", "status", "encoding"])
# In case SSL is unavailable (light builds) we can't import this here.
try:
from OpenSSL.SSL import ZeroReturnError, Error as OpenSSLError
except ImportError:
class ZeroReturnError(Exception):
pass
class OpenSSLError(Exception):
pass
class BadSource(Exception):
error_type = EventError.UNKNOWN_ERROR
def __init__(self, data=None):
if data is None:
data = {}
data.setdefault("type", self.error_type)
super(BadSource, self).__init__(data["type"])
self.data = data
class CannotFetch(BadSource):
error_type = EventError.FETCH_GENERIC_ERROR
def get_server_hostname():
return urlparse(options.get("system.url-prefix")).hostname
build_session = SafeSession
def safe_urlopen(
url,
method=None,
params=None,
data=None,
json=None,
headers=None,
allow_redirects=False,
timeout=30,
verify_ssl=True,
user_agent=None,
):
"""
A slightly safer version of ``urlib2.urlopen`` which prevents redirection
and ensures the URL isn't attempting to hit a blacklisted IP range.
"""
if user_agent is not None:
warnings.warn("user_agent is no longer used with safe_urlopen")
session = SafeSession()
kwargs = {}
if json:
kwargs["json"] = json
if not headers:
headers = {}
headers.setdefault("Content-Type", "application/json")
if data:
kwargs["data"] = data
if params:
kwargs["params"] = params
if headers:
kwargs["headers"] = headers
if method is None:
method = "POST" if (data or json) else "GET"
response = session.request(
method=method,
url=url,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify_ssl,
**kwargs
)
return response
def safe_urlread(response):
return response.content
def expose_url(url):
if url is None:
return u"<unknown>"
if url[:5] == "data:":
return u"<data url>"
url = truncatechars(url, MAX_URL_LENGTH)
if isinstance(url, six.binary_type):
url = url.decode("utf-8", "replace")
return url
def fetch_file(
url,
domain_lock_enabled=True,
outfile=None,
headers=None,
allow_redirects=True,
verify_ssl=False,
timeout=settings.SENTRY_SOURCE_FETCH_SOCKET_TIMEOUT,
**kwargs
):
"""
Pull down a URL, returning a UrlResult object.
"""
# lock down domains that are problematic
if domain_lock_enabled:
domain = urlparse(url).netloc
domain_key = "source:blacklist:v2:%s" % (md5_text(domain).hexdigest(),)
domain_result = cache.get(domain_key)
if domain_result:
domain_result["url"] = url
raise CannotFetch(domain_result)
logger.debug("Fetching %r from the internet", url)
http_session = SafeSession()
response = None
try:
try:
start = time.time()
response = http_session.get(
url,
allow_redirects=allow_redirects,
verify=verify_ssl,
headers=headers,
timeout=timeout,
stream=True,
**kwargs
)
try:
cl = int(response.headers["content-length"])
except (LookupError, ValueError):
cl = 0
if cl > settings.SENTRY_SOURCE_FETCH_MAX_SIZE:
raise OverflowError()
return_body = False
if outfile is None:
outfile = six.BytesIO()
return_body = True
cl = 0
# Only need to even attempt to read the response body if we
# got a 200 OK
if response.status_code == 200:
for chunk in response.iter_content(16 * 1024):
if time.time() - start > settings.SENTRY_SOURCE_FETCH_TIMEOUT:
raise Timeout()
outfile.write(chunk)
cl += len(chunk)
if cl > settings.SENTRY_SOURCE_FETCH_MAX_SIZE:
raise OverflowError()
except Exception as exc:
logger.debug("Unable to fetch %r", url, exc_info=True)
if isinstance(exc, RestrictedIPAddress):
error = {"type": EventError.RESTRICTED_IP, "url": expose_url(url)}
elif isinstance(exc, SuspiciousOperation):
error = {"type": EventError.SECURITY_VIOLATION, "url": expose_url(url)}
elif isinstance(exc, (Timeout, ReadTimeout)):
error = {
"type": EventError.FETCH_TIMEOUT,
"url": expose_url(url),
"timeout": settings.SENTRY_SOURCE_FETCH_TIMEOUT,
}
elif isinstance(exc, OverflowError):
error = {
"type": EventError.FETCH_TOO_LARGE,
"url": expose_url(url),
# We want size in megabytes to format nicely
"max_size": float(settings.SENTRY_SOURCE_FETCH_MAX_SIZE) / 1024 / 1024,
}
elif isinstance(exc, (RequestException, ZeroReturnError, OpenSSLError)):
error = {
"type": EventError.FETCH_GENERIC_ERROR,
"value": six.text_type(type(exc)),
"url": expose_url(url),
}
else:
logger.exception(six.text_type(exc))
error = {"type": EventError.UNKNOWN_ERROR, "url": expose_url(url)}
# TODO(dcramer): we want to be less aggressive on disabling domains
if domain_lock_enabled:
cache.set(domain_key, error or "", 300)
logger.warning("source.disabled", extra=error)
raise CannotFetch(error)
headers = {k.lower(): v for k, v in response.headers.items()}
encoding = response.encoding
body = None
if return_body:
body = outfile.getvalue()
outfile.close() # we only want to close StringIO
result = (headers, body, response.status_code, encoding)
finally:
if response is not None:
response.close()
return UrlResult(url, result[0], result[1], result[2], result[3])
|
mvaled/sentry
|
src/sentry/http.py
|
Python
|
bsd-3-clause
| 7,479
|
import parse
from flask import Blueprint, render_template
bpp = Blueprint('old', __name__)
HALL_URLS = [
"http://cms.business-services.upenn.edu/dining/hours-locations-a-menus/residential-dining/hill-house/daily-menu.html",
"http://cms.business-services.upenn.edu/dining/hours-locations-a-menus/residential-dining/1920-commons/1920-menu.html",
"http://cms.business-services.upenn.edu/dining/hours-locations-a-menus/residential-dining/english-house/daily-menu.html",
"http://cms.business-services.upenn.edu/dining/hours-locations-a-menus/residential-dining/mcclelland-express/daily-menu.html"
]
@bpp.route('/old/')
@bpp.route('/<hill>/<commons>/<kings>/<mcclelland>/')
def index(hill=None, commons=None, kings=None, mcclelland=None):
hill = parse.get_all_meals(HALL_URLS[0])
commons = parse.get_all_meals(HALL_URLS[1])
kings = parse.get_all_meals(HALL_URLS[2])
mcclelland = parse.get_all_meals(HALL_URLS[3])
return render_template('default.html', hill=hill, commons=commons, kings=kings, mcclelland=mcclelland)
@bpp.route('/')
@bpp.route('/<hill>/<commons>/<kings>/<mcclelland>/')
def default(hill=None, commons=None, kings=None, mcclelland=None):
hill, commons, kings, mcclelland = parse.get_all_meals_concurrent(HALL_URLS)
return render_template('default.html', hill=hill, commons=commons, kings=kings, mcclelland=mcclelland)
|
adelq/penndining
|
old.py
|
Python
|
gpl-3.0
| 1,376
|
# Copyright (C) 2001,2002 Python Software Foundation
# email package unit tests
# The specific tests now live in Lib/email/test
from email.test.test_email import suite
from email.test.test_email_renamed import suite as suite2
from test import test_support
def test_main():
test_support.run_unittest(suite())
test_support.run_unittest(suite2())
if __name__ == '__main__':
test_main()
|
j5shi/Thruster
|
pylibs/test/test_email.py
|
Python
|
gpl-2.0
| 412
|
#!/usr/bin/env python
"""
Usage: get-addons [-m] path1 [path2 ...]
Given a list of paths, finds and returns a list of valid addons paths.
With -m flag, will return a list of modules names instead.
"""
import ast
import os
import sys
from git_run import GitRun
MANIFEST_FILES = [
'__manifest__.py',
'__odoo__.py',
'__openerp__.py',
'__terp__.py',
]
def is_module(path):
"""return False if the path doesn't contain an odoo module, and the full
path to the module manifest otherwise"""
if not os.path.isdir(path):
return False
files = os.listdir(path)
filtered = [x for x in files if x in (MANIFEST_FILES + ['__init__.py'])]
if len(filtered) == 2 and '__init__.py' in filtered:
return os.path.join(
path, next(x for x in filtered if x != '__init__.py'))
else:
return False
def get_modules(path, depth=1):
""" Return modules of path repo (used in test_server.py)"""
return sorted(list(get_modules_info(path, depth).keys()))
def get_modules_info(path, depth=1):
""" Return a digest of each installable module's manifest in path repo"""
# Avoid empty basename when path ends with slash
if not os.path.basename(path):
path = os.path.dirname(path)
modules = {}
if os.path.isdir(path) and depth > 0:
for module in os.listdir(path):
manifest_path = is_module(os.path.join(path, module))
if manifest_path:
manifest = ast.literal_eval(open(manifest_path).read())
if manifest.get('installable', True):
modules[module] = {
'application': manifest.get('application'),
'depends': manifest.get('depends') or [],
'auto_install': manifest.get('auto_install'),
}
else:
deeper_modules = get_modules_info(
os.path.join(path, module), depth-1)
modules.update(deeper_modules)
return modules
def is_addons(path):
res = get_modules(path) != []
return res
def get_addons(path, depth=1):
"""Return repositories in path. Can search in inner folders as depth."""
if not os.path.exists(path) or depth < 0:
return []
res = []
if is_addons(path):
res.append(path)
else:
new_paths = [os.path.join(path, x)
for x in sorted(os.listdir(path))
if os.path.isdir(os.path.join(path, x))]
for new_path in new_paths:
res.extend(get_addons(new_path, depth-1))
return res
def get_modules_changed(path, ref='HEAD'):
"""Get modules changed from git diff-index {ref}
:param path: String path of git repo
:param ref: branch or remote/branch or sha to compare
:return: List of paths of modules changed
"""
git_run_obj = GitRun(os.path.join(path, '.git'))
if ref != 'HEAD':
fetch_ref = ref
if ':' not in fetch_ref:
# to force create branch
fetch_ref += ':' + fetch_ref
git_run_obj.run(['fetch'] + fetch_ref.split('/', 1))
items_changed = git_run_obj.get_items_changed(ref)
folders_changed = set([
item_changed.split('/')[0]
for item_changed in items_changed
if '/' in item_changed]
)
modules = set(get_modules(path))
modules_changed = list(modules & folders_changed)
modules_changed_path = [
os.path.join(path, module_changed)
for module_changed in modules_changed]
return modules_changed_path
def get_dependencies(modules, module_name):
"""Return a set of all the dependencies in deep of the module_name.
The module_name is included in the result."""
result = set()
for dependency in modules.get(module_name, {}).get('depends', []):
result |= get_dependencies(modules, dependency)
return result | set([module_name])
def get_dependents(modules, module_name):
"""Return a set of all the modules that are dependent of the module_name.
The module_name is included in the result."""
result = set()
for dependent in modules.keys():
if module_name in modules.get(dependent, {}).get('depends', []):
result |= get_dependents(modules, dependent)
return result | set([module_name])
def add_auto_install(modules, to_install):
""" Append automatically installed glue modules to to_install if their
dependencies are already present. to_install is a set. """
found = True
while found:
found = False
for module, module_data in modules.items():
if (module_data.get('auto_install') and
module not in to_install and
all(dependency in to_install
for dependency in module_data.get('depends', []))):
found = True
to_install.add(module)
return to_install
def get_applications_with_dependencies(modules):
""" Return all modules marked as application with their dependencies.
For our purposes, l10n modules cannot be an application. """
result = set()
for module, module_data in modules.items():
if module_data.get('application') and not module.startswith('l10n_'):
result |= get_dependencies(modules, module)
return add_auto_install(modules, result)
def get_localizations_with_dependents(modules):
""" Return all localization modules with the modules that depend on them
"""
result = set()
for module in modules.keys():
if module.startswith('l10n_'):
result |= get_dependents(modules, module)
return result
def main(argv=None):
if argv is None:
argv = sys.argv
params = argv[1:]
if not params:
print(__doc__)
return 1
list_modules = False
application = None
localization = None
exclude_modules = []
while params and params[0].startswith('-'):
param = params.pop(0)
if param == '-m':
list_modules = True
elif param == '-e':
exclude_modules = [x for x in params.pop(0).split(',')]
elif param == '--only-applications':
application = True
elif param == '--exclude-applications':
application = False
elif param == '--only-localization':
localization = True
elif param == '--exclude-localization':
localization = False
elif param.startswith('-'):
raise Exception('Unknown parameter: %s' % param)
if list_modules:
modules = {}
for path in params:
modules.update(get_modules_info(path))
res = set(modules.keys())
applications, localizations = set(), set()
if application is True or application is False:
applications = get_applications_with_dependencies(modules)
if not application:
res -= applications
applications = set()
if localization is True or localization is False:
localizations = get_localizations_with_dependents(modules)
if not localization:
res -= localizations
localizations = set()
if application or localization:
res = applications | localizations
res = sorted(list(res))
else:
lists = [get_addons(path) for path in params]
res = [x for l in lists for x in l] # flatten list of lists
if exclude_modules:
res = [x for x in res if x not in exclude_modules]
print(','.join(res))
return res
if __name__ == "__main__":
sys.exit(main())
|
OCA/maintainer-quality-tools
|
travis/getaddons.py
|
Python
|
agpl-3.0
| 7,667
|
# encoding: utf8
from django.template.response import TemplateResponse
from posts.models import Post, Reply
def home(request):
ctx = {
'recent_replies' : Reply.objects.all().order_by('-created_at')[:10],
}
return TemplateResponse(request, 'home.html', ctx)
|
libchaos/erya
|
erya/views.py
|
Python
|
gpl-3.0
| 281
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .health_evaluation import HealthEvaluation
class ApplicationsHealthEvaluation(HealthEvaluation):
"""Represents health evaluation for applications, containing health
evaluations for each unhealthy application that impacted current aggregated
health state.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str or :class:`enum
<azure.servicefabric.models.enum>`
:param description: Description of the health evaluation, which represents
a summary of the evaluation process.
:type description: str
:param kind: Polymorphic Discriminator
:type kind: str
:param max_percent_unhealthy_applications: Maximum allowed percentage of
unhealthy applications from the ClusterHealthPolicy.
:type max_percent_unhealthy_applications: int
:param total_count: Total number of applications from the health store.
:type total_count: long
:param unhealthy_evaluations:
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'kind': {'key': 'Kind', 'type': 'str'},
'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'},
'total_count': {'key': 'TotalCount', 'type': 'long'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
}
def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_applications=None, total_count=None, unhealthy_evaluations=None):
super(ApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description)
self.max_percent_unhealthy_applications = max_percent_unhealthy_applications
self.total_count = total_count
self.unhealthy_evaluations = unhealthy_evaluations
self.kind = 'Applications'
|
AutorestCI/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/applications_health_evaluation.py
|
Python
|
mit
| 2,699
|
#!/usr/bin/env python3
import sys
sys.path.append('..') # fix import directory
from app import app
from app.models import User
from PIL import Image
from app.utils import rand_str
ctx = app.test_request_context()
ctx.push()
users = User.query.all()
for u in users:
if u._avatar:
with Image.open('../uploads/images/' + u._avatar) as img:
image_width, image_height = img.size
thumbnail_width = 192
thumbnail_height = 192
if image_width <= thumbnail_width and image_height <= thumbnail_height:
continue
# generate thumbnail if the avatar is too large
new_filename = rand_str() + '.png'
try:
img.thumbnail((thumbnail_width, thumbnail_height), Image.ANTIALIAS)
img.save('../uploads/images/' + new_filename, "PNG")
except IOError:
print("Failed to create thumbnail from '" + u._avatar + "' to '" + new_filename + "'")
u._avatar = new_filename
u.save()
print('User ID ' + str(u.id) + ' original ' + u._avatar + ' thumbnail ' + new_filename)
|
JING-TIME/ustc-course
|
tests/resize_avatar.py
|
Python
|
agpl-3.0
| 1,146
|
#!/usr/bin/env python
from netmiko import ConnectHandler
from getpass import getpass
def main():
pynet2 = {
'device_type': 'cisco_ios',
'ip': '184.105.247.71',
'username': 'pyclass',
'password': '88newclass',
'port': 22,
}
pyrtr2 = ConnectHandler(**pynet2)
pyrtr2.config_mode()
pyrtr2.send_command("logging buffered 31313")
pyrtr2.exit_config_mode()
print pyrtr2.send_command("show run | i logging buffered")
pyrtr2.disconnect()
if __name__ == "__main__":
main()
|
MattBajro/pynet
|
class4/exercise7.py
|
Python
|
apache-2.0
| 546
|
from intercourse.swf.DataTypes import*
from intercourse .swf .loaders.records import*
def load(swf,tag):
r =StructDef().\
add(
'PlaceFlagHasClipActions',
'PlaceFlagHasClipDepth',
'PlaceFlagHasName',
'PlaceFlagHasRatio',
'PlaceFlagHasColorTransform',
'PlaceFlagHasMatrix',
'PlaceFlagHasCharacter',
'PlaceFlagMove',UB(1)
).add('Depth',UI16 ).addCond(
'CharacterId',UI16,'PlaceFlagHasCharacter' ).addCond(
'Matrix', MAT, 'PlaceFlagHasMatrix').addCond(
'ColorTransform',CXFORMWITHALPHA ,'PlaceFlagHasColorTransform' ).addCond(
'Ratio',UI16,'PlaceFlagHasRatio').addCond(
'Name', ['coursedaft',STRING ] [-1 ], ['coursedaft', 'PlaceFlagHasName'][ -1 ] ).addCond(
'ClipDepth',UI16,'PlaceFlagHasClipDepth').addCond(
'ClipActions',lambda n:CLIPACTIONS ,'PlaceFlagHasColorTransform')
swf.valueReader.r('r',r,tag)
|
icefapper/coursedaft
|
intercourse/swf/loaders/PlaceObject2.py
|
Python
|
gpl-2.0
| 828
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DescribeGroupRolesAction(BaseAction):
action = 'DescribeGroupRoles'
command = 'describe-group-roles'
usage = '%(prog)s [-r <group_roles> ...] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-r", "--group-roles", dest="group_roles",
action="store", type=str, default=None,
help="an array including IDs of user group roles.")
parser.add_argument("-s", "--status", dest="status",
action="store", type=str, default=None,
help="an array including role status.")
parser.add_argument("-w", "--search-word", dest="search_word",
action="store", type=str, default=None,
help="the search word which can be instance id and instance name.")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store", type=int, default=1,
help="Whether to return redundant message.if it is 1, return the details of the instance related other resources.")
parser.add_argument("-k", "--sort-key", dest="sort_key",
action="store", type=str, default=None,
help="the sort key, which defaults be create_time.")
parser.add_argument("-R", "--reverse", dest="reverse",
action="store", type=int, default=0,
help="0 for Ascending order, 1 for Descending order.")
@classmethod
def build_directive(cls, options):
directive = {
"group_roles": explode_array(options.group_roles),
"status": explode_array(options.status),
"search_word": options.search_word,
"verbose": options.verbose,
"sort_key": options.sort_key,
"reverse": options.reverse,
"limit": options.limit,
"offset": options.offset
}
return directive
|
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/collaboration/describe_group_roles.py
|
Python
|
apache-2.0
| 3,011
|
# @file Same Tree
# @brief Given 2 BT, do they have same structure and values
# https://leetcode.com/problems/same-tree/
'''
Given two binary trees, write a function to check if they are
equal or not.
Two binary trees are considered equal if they are structurally
identical and the nodes have the same value.
'''
def isSameTree(self, p, q):
if p is None and q is None:
return True
elif p and q:
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
else:
return False
# [1, 2, 3], [1, 2, 3], true
# [10,5,15], [10,5,null,null,15], false
|
ngovindaraj/Python
|
leetcode/ds_tree_same_tree.py
|
Python
|
mit
| 613
|
from errors import *
import sys
sys.path.append("core/include")
sys.path.append("core/include/simplejson")
sys.path.append("include")
import time
import gui
import os
import pygame
pygame.font.init()
import random
import pickle
import re
import textutil
import registry
import zipfile
import simplejson as json
ImgFont = textutil.ImgFont
try:
import pygame.movie as pymovie
except:
pymovie = None
try:
import audiere
aud = audiere.open_device()
except:
audiere = None
try:
import android
except:
android = None
if android:
import android.mixer as mixer
else:
import pygame.mixer as mixer
from pwvlib import *
d = get_data_from_folder(".")
__version__ = cver_s(d["version"])
VERSION = "Version "+cver_s(d["version"])
try:
import numpy
pygame.sndarray.use_arraytype("numpy")
pygame.use_numpy = True
except:
numpy = None
pygame.use_numpy = False
standard_sw,standard_sh = 256,192
spd = 6
ext_map = {"image":["png","jpg"],
"script":["txt"],
"music":["wav","mid","mod","ogg","s3m","it","xm"],
"sound":["wav","ogg"],
"movie":["mpeg","mpg"]}
def ext_for(types=["image","script","music","sound"]):
for et in types:
for e in ext_map[et]:
yield "."+e
def noext(p,types=["image","script","music","sound"]):
"""Path without the extension, if extension is in known types"""
for ext in ext_for(types):
if p.endswith(ext):
return p.rsplit(".",1)[0]
return p
def onlyext(p,types=["image","script","music","sound"]):
"""Returns the extension of the path"""
for ext in ext_for(types):
if p.endswith(ext):
return ext
return ""
assert noext("something.png",["image"])=="something"
assert noext("something.png",["music"])=="something.png"
assert onlyext("something.png")==".png"
def to_triplets(li):
new = []
now = []
for x in li:
now.append(x)
if len(now)==3:
new.append(now)
now = []
return new
def other_screen(y):
return y+(assets.num_screens-1)*assets.sh
class meta:
def __init__(self):
self.horizontal = 1
self.vertical = 1
self.length = 1
self.loops = False
self.sounds = {}
self.split = None
self.framecompress = [0,0]
self.blinkmode = "blinknoset"
self.offsetx = 0
self.offsety = 0
self.blipsound = None
self.frameoffset = {}
self.delays = {}
self.speed = 6
self.blinkspeed = [100,200]
def load_from(self,f):
text = f.read()
text = text.decode("utf8","ignore")
text = text.replace(u'\ufeff',u'')
lines = text.replace("\r\n","\n").split("\n")
setlength = False
for l in lines:
spl = l.split(" ")
if l.startswith("horizontal "): self.horizontal = int(spl[1])
if l.startswith("vertical "): self.vertical = int(spl[1])
if l.startswith("length "):
self.length = int(spl[1])
setlength = True
if l.startswith("loops "): self.loops = int(spl[1])
if l.startswith("sfx "):
self.sounds[int(spl[1])] = " ".join(spl[2:])
if l.startswith("offsetx "): self.offsetx = int(spl[1])
if l.startswith("offsety "): self.offsety = int(spl[1])
if l.startswith("framecompress "):
fc = l.replace("framecompress ","").split(",")
if len(fc)==1:
self.framecompress = [0,int(fc[0])]
else:
self.framecompress = [int(x) for x in fc]
if l.startswith("blinksplit "):
self.split = int(l.replace("blinksplit ",""))
if l.startswith("blinkmode "):
self.blinkmode = l.replace("blinkmode ","")
if l.startswith("blipsound "):
self.blipsound = l.replace("blipsound ","").strip()
if l.startswith("framedelay "):
frame,delay = l.split(" ")[1:]
self.delays[int(frame)] = int(delay)
if l.startswith("globaldelay "):
self.speed = float(l.split(" ",1)[1])
if l.startswith("blinkspeed "):
self.blinkspeed = [int(x) for x in l.split(" ")[1:]]
f.close()
if not setlength:
if self.vertical==1:
self.length = self.horizontal
else:
self.length = self.horizontal*self.vertical
return self
class layer(dict):
def index(self,ob):
for i in self.keys():
if ob in self[i]:
return i
zlayers = layer()
zi = 0
ulayers = layer()
ui = 0
sort = open("core/sorting.txt")
sortmode = None
for line in sort.readlines():
line = line.strip().split("#")[0].replace("\t"," ")
if line=="[z]":
sortmode = "z"
elif line=="[pri]":
sortmode = "pri"
elif line:
x = line.split(" ")
level = int(x[0])
del x[0]
while not x[0]: del x[0]
if sortmode == "z":
zlayers[zi]=x
zi += 1
elif sortmode == "pri":
ulayers[level]=x
class Variables(dict):
def __getitem__(self,key):
return self.get(key,"")
def get(self,key,*args):
if key.startswith("_layer_"):
layer = zlayers.index(key[7:])
if layer is not None:
return str(layer)
if key=="_version":
return __version__
if key=="_num_screens":
return str(assets.num_screens)
if key=="_debug":
return {True:"on",False:"off"}[assets.debug_mode]
if key=="_buildmode":
return str(assets.cur_script.buildmode)
return dict.get(self,key,*args)
def __setitem__(self,key,value,*args):
if key=="_speaking":
dict.__setitem__(self,key,value,*args)
try:
self["_speaking_name"] = assets.gportrait().nametag.split("\n")
except:
pass
if key=="_music_fade":
dict.__setitem__(self,key,value,*args)
assets.smus(assets.gmus())
return
if key=="_debug":
return
if key=="_sw":
print "SETTING SW"
assets.sw = int(value)
assets.make_screen()
return
if key=="_sh":
assets.sh = int(value)
assets.make_screen()
return
return dict.__setitem__(self,key,value,*args)
def set(self,key,value):
return self.__setitem__(key,value)
assert Variables().get("_version",None)
class ImgFrames(list):
pass
class Assets(object):
lists = {}
snds = {}
art_cache = {}
variables = Variables()
gbamode = False
screen_mode = "not_set"
num_screens = 2
screen_refresh = 1
if android:
screen_refresh = 3
sound_format = 44100
sound_bits = 16
sound_sign = -1
sound_buffer = 4096
sound_init = 0
sound_volume = 100
_music_vol = 100
mute_sound = 0
sw = standard_sw
sh = standard_sh
last_autosave = 0
autosave_interval = 0
path = ""
tool_path = ""
debug_mode = False
game_speed = 1
debugging = "SEARCH" #debugging mode. SEARCH for stop, STEP each line, or blank
registry = registry.Registry(".")
def init(self):
self.registry = registry.Registry(".")
def get_stack(self):
stack = []
for s in self.stack:
def gp(s,i):
si = s.si+i
if si<0:
return "PRE"
if si>=len(s.scriptlines):
return "END"
if not s.scriptlines:
return "NOSCRIPT"
return s.scriptlines[si]
p0 = gp(s,-1)
p1 = gp(s,0)
p2 = gp(s,1)
stack.append([p0,p1,p2])
return stack
def smus(self,v):
self._music_vol = v
try:
mixer.music.set_volume(v/100.0*(int(assets.variables.get("_music_fade","100"))/100.0))
except:
pass
def gmus(self):
return self._music_vol
music_volume = property(gmus,smus)
def set_mute_sound(self,value):
self.mute_sound = value
if android and self.mute_sound:
self.stop_music()
def _appendgba(self):
if not self.gbamode: return ""
return "_gba"
appendgba = property(_appendgba)
def raw_lines(self,name,ext=".txt",start="game",use_unicode=False):
if start=="game":
start = self.game
if start:
start = start+"/"
if name.endswith(".txt"):
ext = ""
try:
file = open(start+name+ext,"rU")
except IOError:
raise file_error("File named "+start+name+ext+" could not be read. Make sure you spelled it properly and check case.")
text = file.read()
if use_unicode:
text = text.decode("utf8","ignore")
#Replace the BOM
text = text.replace(u'\ufeff',u'')
return text.split("\n")
def parse_macros(self,lines):
"""Alters lines to not include macro definitions, and returns macros"""
macros = {}
mode = "normal"
i = 0
while i<len(lines):
line = lines[i]
if line.startswith("macro "):
del lines[i]
i -= 1
mode = "macro"
macroname = line[6:].strip()
macrolines = []
elif mode == "macro":
del lines[i]
i -= 1
if line=="endmacro":
mode = "normal"
macros[macroname] = macrolines
else:
macrolines.append(line)
i+=1
return macros
def replace_macros(self,lines,macros):
"""Applies macros to lines"""
i = 0
while i<len(lines):
line = lines[i]
if line.startswith("{"):
del lines[i]
i -= 1
args = line[1:-1].split(" ")
if macros.get(args[0],None):
newlines = "\n".join(macros[args[0]])
args = args[1:]
kwargs = {}
for a in args[:]:
if a.count("=")==1:
args.remove(a)
k,v = a.split("=",1)
kwargs[k] = v
newlines = newlines.replace("$0",str(i))
for i2 in range(len(args)):
newlines = newlines.replace("$%s"%(i2+1),args[i2])
for k in kwargs:
newlines = newlines.replace("$%s"%k,kwargs[k])
newlines = newlines.split("\n")
for l in reversed(newlines):
lines.insert(i+1,l)
i += 1
def open_script(self,name,macros=True,ext=".txt"):
lines = self.raw_lines(name,ext,use_unicode=True)
reallines = []
block_comment = False
for line in lines:
line = line.strip()
#~ if line.startswith("###"):
#~ if block_comment:
#~ block_comment = False
#~ else:
#~ block_comment = True
#~ continue
#~ if block_comment:
#~ continue
if macros and line.startswith("include "):
reallines.extend(self.open_script(line[8:].strip(),False))
else:
reallines.append(line)
lines = reallines
the_macros = {}
for f in os.listdir("core/macros"):
if f.endswith(".mcro"):
mlines = self.raw_lines("core/macros/"+f,"","",True)
parse = self.parse_macros(mlines)
the_macros.update(parse)
self.game = self.game.replace("\\","/")
case = self.game
game = self.game.rsplit("/",1)[0]
for pth in [game,case]:
if os.path.exists(pth+"/macros.txt"):
the_macros.update(self.parse_macros(self.raw_lines("macros.txt","",start=pth,use_unicode=True)))
for f in os.listdir(pth):
if f.endswith(".mcro"):
the_macros.update(self.parse_macros(self.raw_lines(f,"",start=pth,use_unicode=True)))
if macros:
the_macros.update(self.parse_macros(lines))
self.replace_macros(lines,the_macros)
self.macros = the_macros
return lines
def open_font(self,name,size):
pth = self.search_locations("fonts",name)
return pygame.font.Font(pth,size)
fonts = {}
deffonts = {}
for line in """set _font_tb pwinternational.ttf
set _font_update Vera.ttf
set _font_update_size 8
set _font_tb_size 10
set _font_block arial.ttf
set _font_block_size 10
set _font_nt arial.ttf
set _font_nt_size 10
set _font_list pwinternational.ttf
set _font_list_size 10
set _font_itemset pwinternational.ttf
set _font_itemset_size 10
set _font_itemset_big arial.ttf
set _font_itemset_big_size 14
set _font_itemname pwinternational.ttf
set _font_itemname_size 10
set _font_loading arial.ttf
set _font_loading_size 16
set _font_gametitle arial.ttf
set _font_gametitle_size 16
set _font_new_resume arial.ttf
set _font_new_resume_size 14""".split("\n"):
args = line.split(" ")
deffonts[args[1]] = args[2]
def get_font(self,name):
defs = {}
defs.update(self.deffonts)
defs.update(self.variables)
fn = defs.get("_font_%s"%name,"pwinternational.ttf")
size = defs.get("_font_%s_size"%name,"10")
full = fn+"."+size
if full in self.fonts:
return self.fonts[full]
font = self.open_font(fn,int(size))
self.fonts[full] = font
return font
def get_image_font(self,name):
fn = self.variables.get("_font_%s"%name,"pwinternational.ttf")
size = self.variables.get("_font_%s_size"%name,"10")
full = fn+"."+size+".i"
if full in self.fonts:
return self.fonts[full]
font = self.get_font(name)
imgfont = ImgFont("fonts/p.png",font)
self.fonts[full] = imgfont
return imgfont
def Surface(self,size,flags=0):
return pygame.Surface(size,flags)
def search_locations(self,search_path,name):
self.game = self.game.replace("\\","/")
case = self.game
game = self.game.rsplit("/",1)[0]
if os.path.exists(case+"/"+search_path+"/"+name):
return case+"/"+search_path+"/"+name
if os.path.exists(game+"/"+search_path+"/"+name):
return game+"/"+search_path+"/"+name
if os.path.exists(search_path+"/"+name):
return search_path+"/"+name
def _open_art_(self,name,key=None):
"""Returns list of frame images"""
if self.cur_script and self.cur_script.imgcache.has_key(name):
img = self.cur_script.imgcache[name]
self.meta = img._meta
self.real_path = img.real_path
return img
self.meta = meta()
pre = "data/art/"
textpath = self.registry.lookup(pre+name+".txt",True)
if not textpath:
textpath = self.registry.lookup(pre+name.rsplit(".",1)[0]+".txt",True)
print "lookup",pre+name
artpath = self.registry.lookup((pre+name).replace(".zip/","/"))
print pre+name+".txt",artpath,textpath
if textpath:
try:
f = self.registry.open(textpath)
self.meta.load_from(f)
except:
import traceback
traceback.print_exc()
raise art_error("Art textfile corrupt:"+pre+name[:-4]+".txt")
print self.registry.open(artpath)
texture = pygame.image.load(self.registry.open(artpath),artpath)
if texture.get_flags()&pygame.SRCALPHA:
texture = texture.convert_alpha()
else:
texture = texture.convert()
if key:
texture.set_colorkey(key)
img = []
x = 0
y = 0
width,height = texture.get_size()
incx = width//self.meta.horizontal
incy = height//self.meta.vertical
for frame in range(self.meta.length):
img.append(texture.subsurface([[x,y],[incx,incy]]))
x+=incx
if x>=width:
x=0
y+=incy
img = ImgFrames(img)
img._meta = self.meta
img.real_path = self.real_path = artpath
if self.cur_script:
self.cur_script.imgcache[name] = img
return img
def open_art(self,name,key=None):
"""Try to open an art file. Name has no extension.
Will open gif, then png, then jpg. Returns list of
frame images"""
self.real_path = None
tries = [name]
for ext in ext_for(["image"]):
tries.append(name+ext)
for t in tries:
try:
return self._open_art_(t,key)
except (IOError,ImportError,pygame.error,TypeError):
print "there was a typeerror"
pass
import traceback
traceback.print_exc()
print "raising corrupt or missing art file"
raise art_error("Art file corrupt or missing:"+name)
def init_sound(self,reset=False):
self.sound_repeat_timer = {}
self.min_sound_time = 0.01
if android:
self.min_sound_time = 0.1
if reset or not self.sound_init:
self.snds = {}
try:
mixer.stop()
mixer.quit()
except:
pass
try:
mixer.pre_init(self.sound_format, self.sound_sign*self.sound_bits, 2, self.sound_buffer)
mixer.init()
self.sound_init = 1
self.music_volume = self._music_vol
return True
except:
self.sound_init = -1
if self.sound_init==1: return True
return False
def get_path(self,track,type,pre=""):
tries = [track]
#Unknown extension, make sure to check all extension types
if noext(track)==track:
for ext in ext_for([type]):
tries.insert(0,track+ext)
#Get parent game folder, in case we are in a case folder
game = self.game.replace("\\","/").rsplit("/",1)[0]
if pre: pre = "/"+pre+"/"
else: pre = "/"
for t in tries:
if os.path.exists(self.game+pre+t):
return self.game+pre+t
if os.path.exists(game+pre+t):
return game+pre+t
if os.path.exists(pre[1:]+t):
return pre[1:]+t
return pre+track
def open_music(self,track,pre="data/music"):
p = self.get_path(track,"music",pre)
if not p:
return False
try:
mixer.music.load(p)
return True
except:
import traceback
traceback.print_exc()
return False
def open_movie(self,movie):
if not pymovie:
raise script_error("No movie player component in this pywright")
movie = self.get_path(movie,"movie","movies")
try:
mov = pymovie.Movie(movie)
return mov
except:
import traceback
traceback.print_exc()
raise art_error("Movie is missing or corrupt:"+movie)
def list_casedir(self):
return os.listdir(self.game)
def play_sound(self,name,wait=False,volume=1.0,offset=0,frequency=1,layer=0):
#self.init_sound()
if self.sound_init == -1 or not self.sound_volume or self.mute_sound: return
if name in self.sound_repeat_timer:
if time.time()-self.sound_repeat_timer[name]<self.min_sound_time:
return
self.sound_repeat_timer[name] = time.time()
path = self.get_path(name,"sound","data/sfx")
if self.snds.get(path,None):
snd = self.snds[path]
else:
try:
if path.endswith(".mp3") and audiere:
snd = aud.open_file(path)
else:
snd = mixer.Sound(path)
except:
import traceback
traceback.print_exc()
return
self.snds[path] = snd
if not layer:
snd.stop()
try:
snd.set_volume(float(self.sound_volume/100.0)*volume)
except:
snd.volume = (self.sound_volume/100.0)*volume
channel = snd.play()
return channel
def play_music(self,track=None,loop=0,pre="music",reset_track=True):
print self.music_volume,self.variables.get("_music_fade",None)
if reset_track:
assets.variables["_music_loop"] = track
self.init_sound()
if self.sound_init == -1 or self.mute_sound: return
self._track=track
self._loop=loop
if track:
track = self.open_music(track,pre)
if track:
try:
mixer.music.play()
except:
import traceback
traceback.print_exc()
else:
self.stop_music()
def stop_music(self):
if self.sound_init == -1: return
self._track = None
self._loop = 0
try:
mixer.music.stop()
except:
pass
def music_update(self):
mcb = mixer.music.get_busy
if android:
mcb = mixer.music_channel.get_busy
if getattr(self,"_track",None) and not mcb():
if assets.variables.get("_music_loop",None):
self.play_music(assets.variables["_music_loop"],self._loop)
def pause_sound(self):
mixer.pause()
mixer.music.pause()
def resume_sound(self):
mixer.unpause()
mixer.music.unpause()
def set_emotion(self,e):
"""Sets the emotion of the current portrait"""
if not self.portrait:
self.add_portrait(self.character+"/"+e+"(blink)")
if self.portrait:
self.portrait.set_emotion(e)
flash = 0 #Tells main to add a flash object
flashcolor = [255,255,255]
shakeargs = 0 #Tell main to add a shake object
def get_stack_top(self):
try:
return self.stack[-1]
except:
return None
cur_script = property(get_stack_top)
px = 0
py = 0
pz = None
def gportrait(self):
id_name = self.variables.get("_speaking","")
if isinstance(id_name,portrait) or not id_name:
return id_name
ports = []
for p in self.cur_script.obs:
if isinstance(p,portrait) and not getattr(p,"kill",0):
ports.append(p)
if getattr(p,"id_name",None)==id_name: return p
for p in self.cur_script.obs:
if isinstance(p,portrait) and not getattr(p,"kill",0):
ports.append(p)
if getattr(p,"charname",None)==id_name: return p
if ports: return ports[0]
return None
portrait = property(gportrait)
def add_portrait(self,name,fade=False,stack=False,hide=False):
if hide: stack = True
assets = self
self = self.cur_script
if not stack: [(lambda o:setattr(o,"kill",1))(o) for o in self.obs if isinstance(o,portrait)]
assets.variables["_speaking"] = None
p = portrait(name,hide)
p.pos[0] += assets.px
p.pos[1] += assets.py
if fade:
p.fade = 0
if assets.pz is not None:
p.z = assets.pz
assets.pz = None
self.obs.append(p)
assets.variables["_speaking"] = p.id_name
if stack: p.was_stacked = True
return p
def clear(self):
if not hasattr(self,"variables"):
self.variables = Variables()
self.variables.clear()
while assets.items:
assets.items.pop(0)
assets.stop_music()
assets.lists = {}
self.fonts = {}
def save(self):
self.last_autosave = time.time()
props = {}
for reg in ["character","_track","_loop","lists"]:
if hasattr(self,reg):
props[reg] = getattr(self,reg)
#save items
items = []
for x in self.items:
items.append({"id":x.id,"page":x.page})
props["items"] = items
#save variables
vars = {}
for x in self.variables:
v = self.variables[x]
if x == "_speaking" and hasattr(v,"id_name"):
vars[x] = v.id_name
else:
vars[x] = v
props["variables"] = vars
return ["Assets",[],props,None]
def after_load(self):
self.registry = registry.combine_registries("./"+self.game,self.show_load)
self.last_autosave = time.time()
itemobs = []
for x in self.items:
if isinstance(x,dict):
itemobs.append(evidence(x["id"],page=x["page"]))
else:
itemobs.append(evidence(x))
self.items = itemobs
v = self.variables
self.variables = Variables()
self.variables.update(v)
if getattr(self,"_track",None):
self.play_music(self._track,self._loop,reset_track=False)
def show_load(self):
self.make_screen()
txt = "LOADING " + random.choice(["/","\\","-","|"])
txt = assets.get_font("loading").render(txt,1,[200,100,100])
pygame.screen.blit(txt,[50,50])
self.draw_screen(0)
time.sleep(0.05)
def load_game_new(self,path=None,filename="save",hide=False):
if not vtrue(self.variables.get("_allow_saveload","true")):
return
if "\\" in filename or "/" in filename:
raise script_error("Invalid save file path:'%s'"%(filename,))
if not hide:
self.show_load()
if path:
self.game = path
try:
f = open(self.game+"/"+filename+".ns")
except:
self.cur_script.obs.append(saved(text="No game to load.",ticks=240))
return
save_text = f.read()
f.close()
self.load_game_from_string(save_text)
def convert_save_string_to_ob(self,text):
def read_oldsave(s):
return eval(s)
def read_newsave(s):
return json.loads(s)
def read_save(s):
try:
return read_newsave(s)
except:
return read_oldsave(s)
return read_save(text)
def load_game_from_string(self,save_text):
self.loading_cache = {}
things = self.convert_save_string_to_ob(save_text)
assets.clear()
stack = {}
loaded = []
for cls,args,props,dest in things:
if cls == "Assets":
ob = self
else:
ob = eval(cls)(*args)
for k in props:
setattr(ob,k,props[k])
if dest:
cont,index = dest
if cont == "stack":
stack[index] = ob
else:
stack[index].obs.append(ob)
loaded.append(ob)
keys = stack.keys()
keys.sort()
for k in keys:
assets.stack.append(stack[k])
for ob in loaded:
ob.after_load()
#Clear stack, keep only the last root script and its children
d = 0
for s in reversed(assets.stack):
if d:
print "removing",s
assets.stack.remove(s)
else:
print "keeping",s
if not s.parent:
d = 1
self.cur_script.obs.append(saved(text="Game restored",block=False))
self.cur_script.execute_macro("load_defaults")
self.cur_script.execute_macro("init_court_record_settings")
def backup(self,path,save):
if not os.path.exists(path+"/"+save):
return
if not os.path.exists(path+"/save_backup"):
os.mkdir(path+"/save_backup")
f = open(path+"/"+save)
t = f.read()
f.close()
f = open(path+"/save_backup/"+save+"_"+repr(os.path.getmtime(path+"/"+save)),"w")
f.write(t)
f.close()
if save!="autosave.ns":
return
autosaves = []
for f in os.listdir(path+"/save_backup"):
if f.startswith(save):
autosaves.append((f,float(f.split("_")[1])))
autosaves.sort(key=lambda s:s[1])
print len(autosaves)+1,self.autosave_keep
while len(autosaves)+1>self.autosave_keep:
p,t = autosaves.pop(0)
print "delete",p
os.remove(path+"/save_backup/"+p)
print "autosaves",autosaves
def save_game(self,filename="save",hide=False):
if not vtrue(self.variables.get("_allow_saveload","true")) and not vtrue(self.variables.get("_debug","false")):
return
if "\\" in filename or "/" in filename:
raise script_error("Invalid save file path:'%s'"%(filename,))
filename = filename.replace("/","_").replace("\\","_")+".ns"
#Collect *things* to save
stuff = [self.save()]
for script in self.stack:
if script.save_me:
stuff.append(script.save())
self.backup(self.game,filename)
f = open(self.game+"/"+filename,"w")
f.write(json.dumps(stuff,indent=4))
f.close()
if not hide:
self.cur_script.obs.append(saved(block=False))
def load_game(self,path=None,filename="save",hide=False):
self.cur_script.imgcache.clear()
chkpath=""
if path is not None:
chkpath=path+"/"
if filename == "save":
filename = self.check_autosave(chkpath)
self.load_game_new(path,filename,hide)
def check_autosave(self,path):
if not os.path.exists(path+"/autosave.ns"):
return "save"
if not os.path.exists(path+"/save.ns"):
return "autosave"
mt1 = os.path.getmtime(path+"/autosave.ns")
mt2 = os.path.getmtime(path+"/save.ns")
if mt1>mt2:
return 'autosave'
return "save"
def vdefault(self,var):
"""Return default value for a variable"""
if var == "_debug":
return "off"
return None
def v(self,var,default="_NOT GIVEN_"):
"""Return a variable value, or it's default value"""
if default == "_NOT_GIVEN_":
default = self.vdefault(var)
v = self.variables.get(var,default)
return v
def vtrue(self,var,default="_NOT_GIVEN_"):
v = self.v(var,default)
return vtrue(v)
def reset_state(self):
self.variables.clear()
self.stop_music()
self.stack[:] = []
def quit_game(self):
self.reset_state()
self.make_start_script(False)
def reset_game(self):
game = self.game
self.reset_state()
self.start_game(game)
def start_game(self,game,script=None,mode="casemenu"):
assets.show_load()
gamename = game
if "/" in game:
gamename = game.rsplit("/",1)[1]
print "starting game",game,gamename,script,mode
if not script:
print "not script",game+"/"+gamename+".txt"
if os.path.exists(game+"/"+gamename+".txt"):
script = gamename
else:
script = "intro"
print "starting game",game,script,mode
game = os.path.normpath(game).replace("\\","/")
self.last_autosave = time.time()
self.clear()
self.game = game
self.registry = registry.combine_registries("./"+self.game,self.show_load)
self.stack.append(self.Script())
if mode == "casemenu" and not os.path.exists(game+"/"+script+".txt"):
self.cur_script.obs = [bg("main"),bg("main"),case_menu(game)]
self.cur_script.obs[1].pos = [0,192]
return
print "set game to",assets.game
self.cur_script.init(script)
self.cur_script.execute_macro("init_defaults")
self.cur_script.execute_macro("font_defaults")
self.cur_script.execute_macro("load_defaults")
self.cur_script.execute_macro("init_court_record_settings")
def addevmenu(self):
try:
em = evidence_menu(self.items)
self.cur_script.add_object(em,True)
em.update()
except art_error,e:
self.cur_script.obs.append(error_msg(e.value,"",0,self.cur_script))
import traceback
traceback.print_exc()
return
return em
def addscene(self,scene):
#FIXME - assets.Script should be script.Script
s = self.Script()
s.init(scene)
self.stack.append(s)
def vtrue(variable):
if variable.lower() in ["on","1","true"]:
return True
return False
assets = Assets()
assets.subscripts = {}
def subscript(macro):
"""Runs a macro all the way through"""
#FIXME - limit recursion for subscripts. kind of a hack
if macro in assets.subscripts:
return
script = assets.cur_script.execute_macro(macro)
print "start subscript",macro,getattr(script,"scene","(no scene)")
from engine.script import interpret_scripts
interpret_scripts()
return
assets.subscripts[macro] = 1
while script in assets.stack:
#~ print script.scriptlines,script.si
#~ print script.scriptlines[script.si]
oldb = script.buildmode
e = script.update()
newb = script.buildmode
if e:
break
if not oldb and not newb:
break
print "end subscript",macro,getattr(script,"scene","(no scene)")
del assets.subscripts[macro]
class SoundEvent(object):
kill = 0
pri = -1000000
def __init__(self,name,after=0,volume=1.0):
self.name = name
self.wait = after
self.volume = volume
self.z = zlayers.index(self.__class__.__name__)
def delete(self):
self.kill = 1
def update(self):
self.wait-=assets.dt
if self.wait<=0:
assets.play_sound(self.name,volume=self.volume)
self.delete()
return False
def draw(self,*args):
pass
def color_str(rgbstring):
if rgbstring.startswith(" "):
rgbstring = rgbstring[1:]
cv = assets.variables.get("color_"+rgbstring,None)
if cv is not None:
rgbstring = cv
if len(rgbstring)==3:
return [int((int(colchar)/9.0)*255) for colchar in rgbstring]
elif len(rgbstring)==6:
v = [int(rgbstring[:2],16),int(rgbstring[2:4],16),int(rgbstring[4:6],16)]
return v
def trans_y(y):
"""Alter y value to place us in the proper screen"""
if assets.num_screens==1:
y-=192
return y
class ws_button(gui.button):
"""A button created from WrightScript"""
screen_setting = ""
id_name = "_ws_button_"
def delete(self):
print "deleting ws_button"
self.kill = 1
def getrpos(self):
rpos = self.rpos[:]
if self.screen_setting == "try_bottom":
rpos[1] = trans_y(rpos[1])
return rpos
def event(self,name,pos,*args):
orpos = self.rpos[:]
self.rpos = self.getrpos()
ret = super(ws_button,self).event(name,pos,*args)
self.rpos = orpos
return ret
def draw(self,dest):
orpos = self.rpos[:]
self.rpos = self.getrpos()
super(ws_button,self).draw(dest)
self.rpos = orpos
class ws_editbox(gui.editbox):
"""An editbox created from WrightScript"""
screen_setting = ""
id_name = "_ws_button_"
def delete(self):
print "deleting ws_button"
self.kill = 1
def getrpos(self):
rpos = self.rpos[:]
if self.screen_setting == "try_bottom":
rpos[1] = trans_y(rpos[1])
return rpos
def event(self,name,pos,*args):
orpos = self.rpos[:]
self.rpos = self.getrpos()
ret = super(ws_editbox,self).event(name,pos,*args)
self.rpos = orpos
return ret
def draw(self,dest):
orpos = self.rpos[:]
self.rpos = self.getrpos()
super(ws_editbox,self).draw(dest)
self.rpos = orpos
class sprite(gui.button):
blinkspeed = [100,200]
autoclear = False
pri = 0
#widget stuff
def _g_rpos(self):
if not hasattr(self,"pos"): return [0,0]
return self.getpos()
rpos = property(_g_rpos)
width,height = [assets.sw,assets.sh]
children = []
spd = 6
def getpos(self):
pos = self.pos[:]
if self.screen_setting == "try_bottom":
pos[1] = trans_y(pos[1])
return pos
def getprop(self,p):
if p in "xy":
return self.pos["xy".index(p)]
if p == "frame":
return self.x
if p == "screen_setting":
return self.screen_setting
return getattr(self,p,"")
def setprop(self,p,v):
if p in "xy":
self.pos["xy".index(p)] = float(v)
if p in "z":
self.z = int(v)
if p == "frame":
self.x = int(v)
if p == "screen_setting":
self.screen_setting = v
def delete(self):
self.kill = 1
def makestr(self):
"""A WrightScript string to recreate the object"""
if not getattr(self,"name",None): return ""
xs = ys = zs = id = ""
if self.pos[0]: xs = "x="+str(self.pos[0])+" "
if self.pos[1]: ys = "y="+str(self.pos[1])+" "
#Make this better (maybe only allow layer name for z?)
if type(self.z)==type(""):
if zlayers.index(self.__class__.__name__)!=zlayers.index(self.z.remove("_layer_")):
zs = "z="+self.z
else:
if self.z != zlayers.index(self.__class__.__name__):
zs = "z=_layer_"+zlayers[self.z][0]
if not getattr(self,"id_name","$$").startswith("$$"): id = "name="+self.id_name+" "
try:
comm = {"bg":"bg","fg":"fg","evidence":"ev"}[self.__class__.__name__]
except KeyError:
return ""
return (comm+" "+self.name.split("/",1)[1]+" "+xs+ys+zs+id).strip()
def click_down_over(self,mp):
pass
def load_extra(self,m):
self.sounds = m.sounds
self.loops = m.loops
self.split = m.split
self.blinkmode = m.blinkmode
self.offsetx = m.offsetx
self.offsety = m.offsety
self.blipsound = m.blipsound
self.delays = m.delays
self.spd = m.speed
self.blinkspeed = m.blinkspeed
if assets.variables.get("_blinkspeed_next",""):
self.blinkspeed = [int(x) for x in assets.variables["_blinkspeed_next"].split(" ")]
assets.variables["_blinkspeed_next"] = ""
elif assets.variables.get("_blinkspeed_global","default")!="default":
self.blinkspeed = [int(x) for x in assets.variables["_blinkspeed_global"].split(" ")]
def load(self,name,key=[255,0,255]):
self.key = key
if type(name)==type("") or type(name)==type(u""):
path = ""
self.base = assets.open_art(name,key)
self.load_extra(assets.meta)
else:
self.base = name
self.load_extra(meta())
self.real_path = assets.real_path
if self.base:
self.width,self.height = self.base[0].get_size()
else:
self.width,self.height = [0,0]
self.name = name
self.x = 0
self.next = self.spd
return self
self.img = self.base[0]
self.name = name
self.x = 0
self.next = self.delays.get(0,self.spd)
return self
def __init__(self,x=0,y=0,flipx=0,**kwargs):
self.spd = int(assets.variables.get("_default_frame_delay",self.spd))
self.loopmode = ""
self.next = self.spd
self.pos = [x,y]
self.dim = 1
self.z = zlayers.index(self.__class__.__name__)
self.rot = [0,0,0]
if kwargs.get("rotx",None): self.rot[0]=int(kwargs.get("rotx"))
if kwargs.get("roty",None): self.rot[1]=int(kwargs.get("roty"))
if kwargs.get("rotz",None): self.rot[2]=int(kwargs.get("rotz"))
self.sounds = {}
self.x = 0
self.offsetx = 0
self.offsety = 0
self.loops = 0
self.loopmode = 0
self.flipx=flipx
self.blinkmode = "blinknoset"
if kwargs.get("screen",None)==2:
self.pos[1]=other_screen(self.pos[1])
self.screen_setting=""
self.base = []
self.delays = {}
self.start = 0
self.end = None
def draw(self,dest):
if not getattr(self,"img",None): return
img = self.img
if self.flipx:
img = pygame.transform.flip(img,1,0)
pos = self.getpos()
if hasattr(self,"offsetx"): pos[0]+=self.offsetx
if hasattr(self,"offsety"): pos[1]+=self.offsety
if hasattr(self,"rot"):
if hasattr(img,"ori"):
img.ori = self.rot
elif self.rot[2]:
pos[0]+=img.get_width()//2
pos[1]+=img.get_height()//2
img = pygame.transform.rotate(img,self.rot[2]).convert_alpha()
pos[0]-=img.get_width()//2
pos[1]-=img.get_height()//2
if self.dim != 1:
os = img.get_size()
img = pygame.transform.rotozoom(img,0,self.dim)
ns = img.get_size()
pos[0]+=os[0]//2-ns[0]//2
pos[1]+=os[1]//2-ns[1]//2
dest.blit(img,pos)
def update(self):
if self.next>0:
self.next-=assets.dt
if self.next<=0:
if self.sounds.get(self.x,None):
assets.play_sound(self.sounds[self.x])
self.x += 1
self.next += self.delays.get(self.x,self.spd)
end = len(self.base)
if self.end is not None:
end = self.end
if self.x>=end:
if self.loops and (not self.loopmode or self.loopmode=="loop"):
self.x = 0
if self.loops>1:
self.loops -= 1
if self.loops == 1:
self.loops = 0
elif self.loopmode in ["blink","blinknoset"]:
self.x = self.start
self.next = random.randint(self.blinkspeed[0],self.blinkspeed[1])
else:
self.next = -1
self.x-=1
#self.x = 0
if self.loopmode == "stop":
self.loops = 0
if self.base:
if self.x<len(self.base):
self.img = self.base[self.x]
from soft3d import context
class surf3d(sprite):
def __init__(self,pos,sw,sh,rw,rh):
self.id_name = "surf3d"
self.pos = pos
self.sw,self.sh=sw,sh
self.z = 2
self.pri = -1000
self.width,self.height = rw,rh
self.context = context.SoftContext(sw,sh,rw,rh)
self.surf = self.context.draw()
self.next = 5
self.screen_setting = ""
self.primary = True
def click_down_over(self,pos):
print "click",pos
if pos[0]>=self.pos[0] and pos[0]<=self.pos[0]+self.width and pos[1]>=self.pos[1] and pos[1]<=self.pos[1]+self.height:
for o in assets.cur_script.obs:
if isinstance(o,mesh):
o.click(pos)
def draw(self,dest):
dest.blit(self.surf,self.getpos())
def update(self):
self.next -= assets.dt
if self.next>0:
return
if [x for x in self.context.objects if x.changed]:
self.surf = self.context.draw().convert()
[setattr(x,"changed",0) for x in self.context.objects]
self.next = 2
class mesh(sprite):
def __init__(self,meshfile,pos=[0,0],rot=[0,0,0],name="surf3d"):
self.pos = pos
self.z = 0
self.pri = 0
self.id_name = "mesh"
self.regions = []
self.fail = "none"
self.examine = False
self.dz = 0
self.rot = [0,0,0]
self.maxz=0
self.minz=-150
self.meshfile=meshfile
self.surfname = name
self.changed = 1
self.screen_setting = ""
def load(self,script=None):
if not script:
script = assets.cur_script
con = None
for o in assets.cur_script.world.all:
if getattr(o,"id_name",None)==self.surfname:
con = o
break
if not con:
return
self.con = con
path = assets.game+"/data/art/models/"
self.ob = ob = con.context.load_object(self.meshfile,path)
ob.trans(z=-100)
ob.rot(90,0,0)
ob.changed = 1
def trans(self,x=0,y=0,z=0):
if self.dz+z>self.maxz:
z = self.maxz-self.dz
elif self.dz+z<self.minz:
z = self.minz-self.dz
self.dz+=z
self.ob.trans(x,y,z)
self.ob.changed = 1
def click(self,pos):
if not self.examine:
return
x,y = pos
x=int((x-self.con.pos[0])*(self.con.context.s_w/float(self.con.context.r_w)))
y=int((y-self.con.pos[1])*(self.con.context.s_h/float(self.con.context.r_h)))
i = y*self.con.context.s_w+x
if i>=len(pygame.depth) or i<0:
return
point = pygame.depth[i][1]
if point:
u,v = point
for rect in self.regions:
if u>=rect[0] and u<=rect[0]+rect[2] and v>=rect[1] and v<=rect[1]+rect[3]:
label = rect[4]
self.goto(u,v,label)
return
return self.goto(u,v,self.fail)
def goto(self,u,v,label):
self.examine = False
assets.variables["_examine_clickx3d"] = str(u)
assets.variables["_examine_clicky3d"] = str(v)
self.regions[:] = []
assets.cur_script.goto_result(label,backup=self.fail)
def rotate(self,axis,amount):
r = [0,0,0]
r[axis] = amount
self.ob.rot(*r)
self.ob.changed = 1
def draw(self,dest):
pass
def update(self):
pass
class fadesprite(sprite):
real_path=None
invert = 0
tint = None
greyscale = 0
def setfade(self,val=255):
if val<0: val = 0
if val>255: val = 255
if getattr(self,"fade",None) is None: self.fade = 255
self.lastfade = self.fade
self.fade = val
return self
def draw(self,dest):
if getattr(self,"fade",None) is None: self.fade = 255
if self.fade == 0:
return
if self.fade == 255 and not self.invert and not self.tint and not self.greyscale:
return sprite.draw(self, dest)
if getattr(self,"img",None) and not getattr(self,"mockimg",None):
if pygame.use_numpy:
self.mockimg = self.img.convert_alpha()
self.mockimg_base = [x.convert_alpha() for x in self.base]
self.origa_base = [pygame.surfarray.array_alpha(x) for x in self.mockimg_base]
self.draw_func = self.numpydraw
else:
self.draw_func = self.mockdraw
ximg = pygame.Surface(self.img.get_size())
ximg.fill([255,0,255])
ximg.blit(self.img,[0,0])
ximg = ximg.convert()
ximg.set_colorkey([255,0,255])
self.mockimg = ximg
if (getattr(self,"tint",None) or getattr(self,"invert",None)) and not getattr(self,"origc_base",None) and pygame.use_numpy:
self.origc_base = [pygame.surfarray.array3d(x) for x in self.mockimg_base]
# print "set base foo",self.origc_base
try:
self.draw_func(dest)
except Exception:
if pygame.use_numpy:
pygame.use_numpy = False
self.mockimg = None
import traceback
traceback.print_exc()
raise art_error("Problem with fading code, switching to older fade technology. Did you install NumPy?")
def numpydraw(self,dest):
if not self.mockimg_base:
return
px = pygame.surfarray.pixels_alpha(self.mockimg_base[self.x])
px[:] = self.origa_base[self.x][:]*(self.fade/255.0)
del px
px = pygame.surfarray.pixels3d(self.mockimg_base[self.x])
if getattr(self,"origc_base",None):
px[:] = self.origc_base[self.x]
if self.invert:
px[:] = 255-px[:]
self.linvert = self.invert
if self.tint:
# NumPy 0.10 same_kind cast fix.
px[:,:,0] = px[:,:,0] * self.tint[0]
px[:,:,1] = px[:,:,1] * self.tint[1]
px[:,:,2] = px[:,:,2] * self.tint[2]
self.lt = self.tint
del px
img = self.img
self.img = self.mockimg_base[self.x]
if self.greyscale:
self.lgs = self.greyscale
ximg = pygame.Surface(self.img.get_size())
ximg.fill([255,0,255])
ximg.blit(self.img,[0,0])
ximg = ximg.convert(8)
pal = ximg.get_palette()
gpal = []
for col in pal:
if col == (255,0,255):
gpal.append(col)
continue
avg = (col[0]+col[1]+col[2])//3
gpal.append([avg,avg,avg])
ximg.set_palette(gpal)
ximg = ximg.convert()
ximg.set_colorkey([255,0,255])
yimg = pygame.Surface(self.img.get_size()).convert_alpha()
yimg.fill([0,0,0,0])
yimg.blit(ximg,[0,0])
self.img = yimg
sprite.draw(self,dest)
self.img = img
def mockdraw(self, dest):
self.mockimg.set_alpha(self.fade)
img = self.img
self.img = self.mockimg
sprite.draw(self,dest)
self.img = img
def update(self):
sprite.update(self)
class graphic(fadesprite):
def __init__(self,name,*args,**kwargs):
fadesprite.__init__(self,*args,**kwargs)
self.load(name)
class portrait(sprite):
autoclear = True
def get_self_image(self):
if hasattr(self,"img"):
return self.cur_sprite.img
img = property(get_self_image)
def makestr(self):
"""A WrightScript string to recreate the object"""
xs = ys = zs = id = emo = ""
if self.emoname != "normal": emo = "e="+self.emoname+" "
nt = ""
if self.pos[0]: xs = "x="+str(self.pos[0])+" "
if self.pos[1]: ys = "y="+str(self.pos[1])+" "
if type(self.z)==type(""):
if zlayers.index(self.__class__.__name__)!=zlayers.index(self.z.remove("_layer_")):
zs = "z="+self.z
else:
if self.z != zlayers.index(self.__class__.__name__):
zs = "z=_layer_"+zlayers[self.z][0]
if not getattr(self,"id_name","$$").startswith("$$"): id = "name="+self.id_name+" "
if getattr(self,"nametag",self.charname).strip("\n")!=self.charname: nt = "nametag="+self.nametag+" "
hide = {True: " hide", False: ""}[bool(self.hide)]
stack = {True: " stack", False: ""}[bool(getattr(self,"was_stacked",False))]
return ("char "+self.charname+" "+xs+ys+zs+id+emo+nt+hide+stack+getattr(self,"extrastr","")).strip()
f = open("core/blipsounds.txt")
bliplines = f.readlines()
f.close()
male = bliplines[1].strip().split(" ")
female = bliplines[4].strip().split(" ")
def __init__(self,name=None,hide=False):
self.talk_sprite = fadesprite()
self.blink_sprite = fadesprite()
self.combined = fadesprite()
super(portrait,self).__init__()
self.init(name,hide)
def init_sounds(self):
self.clicksound = assets.variables.get("char_defsound","blipmale.ogg")
if self.charname in self.female:
self.clicksound = "blipfemale.ogg"
if self.charname in self.male:
self.clicksound = "blipmale.ogg"
if hasattr(self,"talk_sprite"):
if getattr(self.talk_sprite,"blipsound",None):
self.clicksound = self.talk_sprite.blipsound
if "char_"+self.charname+"_defsound" in assets.variables:
self.clicksound = assets.variables["char_"+self.charname+"_defsound"]
def init(self,name=None,hide=False,blinkname=None,init_basic=True):
if not name: return self.init_sounds()
charname,rest = name.split("/",1)
if init_basic:
self.z = zlayers.index(self.__class__.__name__)
self.pri = ulayers.index(self.__class__.__name__)
self.pos = [0,0]
self.rot = [0,0,0]
self.name = name
self.id_name = charname
self.nametag = assets.variables.get("char_"+charname+"_name",charname.capitalize())+"\n"
#super(portrait,self).__init__()
emo = rest
mode = ""
if emo.endswith("(combined)"):
emo,mode = emo.rsplit("(combined)",1)[0],"combined)"
elif emo.endswith("(blink)"):
emo,mode = emo.rsplit("(blink)",1)[0],"blink)"
elif emo.endswith("(talk)"):
emo,mode = emo.rsplit("(talk)",1)[0],"talk)"
blinkemo = emo
blinkmode = mode
if blinkname:
blinkemo = blinkname
blinkmode = "blink"
mode = mode[:-1]
self.charname = charname
self.emoname = emo
self.blinkemo = blinkemo
self.modename = mode
self.supermode = "lipsync"
if not self.emoname: hide = "wait"
self.hide = hide
if self.hide: return self.init_sounds()
self.talk_sprite = fadesprite()
self.blink_sprite = fadesprite()
self.combined = fadesprite()
def shrink(t):
if not t.startswith("/"):
t = "/"+t
return t[t.rfind("/data/art/")+5:-4]
def loadfrom(path):
if not path.endswith("/"):path+="/"
print ">",blinkemo
print path+blinkemo+"(blink)"
blink = assets.registry.lookup(path+blinkemo+"(blink)")
if blink and not hasattr(self.blink_sprite,"img"):
self.blink_sprite.load(blink.rsplit("data/art/",1)[1][:-4])
talk = assets.registry.lookup(path+emo+"(talk)")
if talk and not hasattr(self.talk_sprite,"img"):
self.talk_sprite.load(talk.rsplit("data/art/",1)[1][:-4])
combined = assets.registry.lookup(path+emo+"(combined)")
if combined and not hasattr(self.combined,"img"):
self.combined.load(combined.rsplit("data/art/",1)[1][:-4])
if not self.combined.split:
self.combined.split = len(self.combined.base)//2
self.talk_sprite.load(self.combined.base[:self.combined.split])
self.talk_sprite.name = self.combined.name+"_talk"
self.blink_sprite.load(self.combined.base[self.combined.split:])
self.blink_sprite.name = self.combined.name+"_blink"
self.talk_sprite.loops = 1
self.blink_sprite.loops = 1
self.talk_sprite.delays = self.combined.delays
self.blink_sprite.delays = self.combined.delays
self.blink_sprite.blinkmode = self.combined.blinkmode
self.blink_sprite.blinkspeed = self.combined.blinkspeed
self.talk_sprite.offsetx = self.combined.offsetx
self.talk_sprite.offsety = self.combined.offsety
self.blink_sprite.offsetx = self.combined.offsetx
self.blink_sprite.offsety = self.combined.offsety
available = assets.registry.lookup(path+emo)
if available and not hasattr(self.blink_sprite,"img"):
self.blink_sprite.load(available.rsplit("data/art/",1)[1][:-4])
if self.blink_sprite.blinkmode=="blinknoset": self.blink_sprite.blinkmode = "stop"
print blink,talk,combined,available
if blink or talk or combined or available:
return True
raise art_error("Character folder %s not found"%charname)
loadfrom("data/art/port/"+charname)
if hasattr(self.talk_sprite,"img") and not hasattr(self.blink_sprite,"img"):
self.blink_sprite.img = i = self.talk_sprite.img
self.blink_sprite.base = [i]
self.blink_sprite.blinkmode = "stop"
#self.blink_sprite.load(self.talk_sprite.base[:])
#self.blink_sprite.base = [self.blink_sprite.base[0]]
if hasattr(self.blink_sprite,"img") and not hasattr(self.talk_sprite,"img"):
self.talk_sprite.img = i = self.blink_sprite.img
self.talk_sprite.base = [i]
#self.talk_sprite.load(self.blink_sprite.base[:])
#self.talk_sprite.base = [self.talk_sprite.base[0]]
self.blink_sprite.loopmode = self.blink_sprite.blinkmode
self.blink_sprite.spd = int(assets.variables.get("_default_port_frame_delay",self.talk_sprite.spd))
self.talk_sprite.spd = int(assets.variables.get("_default_port_frame_delay",self.talk_sprite.spd))
if hasattr(self.talk_sprite,"img") and hasattr(self.blink_sprite,"img"):
if mode=="blink":self.set_blinking()
#if mode=="talk":self.set_talking()
else:
raise art_error("Can't load character "+charname+"/"+emo+"("+mode+")")
self.blinkspeed = self.blink_sprite.blinkspeed
#if init_basic:
self.init_sounds()
def setprop(self,p,v):
if p in "xy":
self.pos["xy".index(p)] = float(v)
if p in "z":
self.z = int(v)
if p in ["supermode","mode"]:
setattr(self,p,v)
def set_dim(self,amt):
self.blink_sprite.dim = amt
self.talk_sprite.dim = amt
def get_dim(self):
return self.blink_sprite.dim
dim = property(get_dim,set_dim)
def draw(self,dest):
if not self.hide and getattr(self.cur_sprite,"img",None):
self.cur_sprite.tint = self.tint
self.cur_sprite.greyscale = self.greyscale
self.cur_sprite.invert = self.invert
pos = self.pos[:]
pos[0] += (assets.sw-(self.cur_sprite.offsetx+self.cur_sprite.img.get_width()))//2
pos[1] += (assets.sh-(self.cur_sprite.img.get_height()-self.cur_sprite.offsety))
self.cur_sprite.pos = pos
self.cur_sprite.rot = self.rot[:]
self.cur_sprite.draw(dest)
def delete(self):
self.kill = 1
def update(self):
if not self.hide and getattr(self.cur_sprite,"img",None):
return self.cur_sprite.update()
def set_emotion(self,emo):
if self.hide and self.hide != "wait": return
if not emo: return
self.hide = False
self.init(self.charname+"/"+emo+"("+self.modename+")",init_basic=False)
def set_blink_emotion(self,emo):
if self.hide and self.hide != "wait": return
if not emo: return
self.hide = False
self.init(self.charname+"/"+self.emoname+"("+self.modename+")",blinkname=emo,init_basic=False)
def set_talking(self):
self.modename = "talk"
def set_blinking(self):
self.modename = "blink"
def set_single(self):
self.modename = "blink"
self.supermode = "blink"
def set_lipsync(self):
self.supermode = "lipsync"
self.modename = "blink"
def get_current_sprite(self):
if self.supermode == "lipsync":
mode = self.modename
else:
mode = self.supermode
if mode == "blink":
return self.blink_sprite
if mode == "talk":
return self.talk_sprite
if mode == "loop":
self.blink_sprite.loopmode = "loop"
return self.blink_sprite
return self.blink_sprite
cur_sprite = property(get_current_sprite)
def setfade(self,*args):
self.blink_sprite.setfade(*args)
self.talk_sprite.setfade(*args)
invert = 0
tint = None
greyscale = 0
class evidence(fadesprite):
autoclear = True
def __init__(self,name="ev",**kwargs):
if not kwargs.has_key("x"): kwargs["x"]=5
if not kwargs.has_key("y"): kwargs["y"]=5
if not kwargs.has_key("pri"): kwargs["pri"]=50
if not kwargs.get("page",None):
pages = assets.variables.get("_ev_pages","evidence profiles").split(" ")
if len(pages)==1:
pages = pages + pages
if name.endswith("$"):
kwargs["page"] = pages[1]
else:
kwargs["page"] = pages[0]
self.page = kwargs["page"]
super(evidence,self).__init__(**kwargs)
self.id = name
self.reload()
def reload(self):
artname = assets.variables.get(self.id+"_pic",self.id.replace("$",""))
try:
self.load("ev/"+artname)
except:
import traceback
traceback.print_exc()
self.img = assets.Surface([16,16])
self.img.fill([255,255,255])
try:
self.small = pygame.transform.smoothscale(self.img,[36,36])
self.scaled = pygame.transform.smoothscale(self.img,[70,70])
except:
self.small = pygame.transform.scale(self.img,[36,36])
self.scaled = pygame.transform.scale(self.img,[70,70])
self.setfade()
self.name = assets.variables.get(self.id+"_name",self.id.replace("$",""))
self.desc = assets.variables.get(self.id+"_desc",self.id.replace("$",""))
class penalty(fadesprite):
def __init__(self,end=100,var="penalty",flash_amount=None):
self.id_name = "penalty"
self.var = var
super(penalty,self).__init__()
self.gfx = assets.open_art("general/healthbar",key=[255,0,255])[0]
self.left = self.gfx.subsurface([[0,0],[2,14]])
self.right = self.gfx.subsurface([[82,0],[2,14]])
self.good = self.gfx.subsurface([[2,0],[1,14]])
self.bad = self.gfx.subsurface([[66,0],[1,14]])
self.pos = [0,0]
if end<0: end = 0
self.end = end
self.delay = 50
self.flash_amount = min(flash_amount, self.gv()) if flash_amount else flash_amount
self.flash_color = [255,242,129,150]
self.flash_dir = 1
self.change = 0
def gv(self):
v = assets.variables.get(self.var,100)
try:
v = int(v)
except:
v = 100
return v
def sv(self,val):
assets.variables[self.var] = str(val)
def draw(self,dest):
v = self.gv()
x = assets.sw-110
dest.blit(self.left,[x,2]); x+=2
if v<0: v=0
for i in range(v):
dest.blit(self.good,[x,2]); x+=1
for i in range(100-v):
dest.blit(self.bad,[x,2]); x += 1
dest.blit(self.right,[x,2])
if self.flash_amount:
fx = assets.sw-108+v-self.flash_amount
fw = self.flash_amount
fy = 4
fh = 10
surf = pygame.Surface([fw,fh]).convert_alpha()
surf.fill(self.flash_color)
dest.blit(surf,[fx,fy])
self.flash_color[3]+=self.flash_dir*8
if self.flash_color[3]>200 or self.flash_color[3]<100:
self.flash_dir = -self.flash_dir
self.sv(v)
def update(self):
self.change += assets.dt
while self.change>1:
self.change -= 1
v = self.gv()
if self.end<v:
v -= 1
if v<0: v = 0
elif self.end>v:
v += 1
if v>100: v = 100
elif self.delay:
self.delay -= 1
if self.delay==0:
self.die()
self.delete()
self.sv(v)
if self.delay:
return True
def die(self):
if self.gv()<=0:
print "bad penalty about to die"
ps = assets.variables.get("_penalty_script","")
if ps:
args = []
if " " in ps:
ps,label = ps.split(" ",1)
args.append("label="+label)
assets.cur_script._script("script",ps,*args)
class bg(fadesprite):
autoclear = True
def __init__(self,name="",**kwargs):
super(bg,self).__init__(**kwargs)
if name:
self.load("bg/"+name)
class fg(fadesprite):
autoclear = True
def __init__(self,name="",**kwargs):
super(fg,self).__init__(**kwargs)
if name:
self.load("fg/"+name)
self.pos = [(assets.sw-self.img.get_width())/2+self.pos[0],(assets.sh-self.img.get_height())/2+self.pos[1]]
self.wait = kwargs.get("wait",1)
self.spd = int(assets.variables.get("_default_fg_frame_delay",self.spd))
def update(self):
super(fg,self).update()
if self.next>=0 and self.wait:
return True
#Keep this for saved games
class testimony_blink(fg):
id_name = "_tb_blinker_"
def draw(self,dest):
self.pos[0] = 0
self.pos[1] = 22
self.fade = 255
if not hasattr(self,"time"):
self.time = 80
self.time -= 1
if self.time == 0:
self.time = 80
if self.time>20 and vtrue(assets.variables.get("_testimony_blinker", "true")):
w,h = self.img.get_size()
dest.blit(pygame.transform.scale(self.img,[int(w//1.5),int(h//1.5)]),self.pos)
class textbox(gui.widget):
pri = 30
def click_down_over(self,pos):
if not hasattr(self,"rpos1"): return
if getattr(self,"hidden",0): return
if self.statement:
if pos[1]>=self.rpos[1] and pos[1]<=self.rpos1[1]+self.height1:
if pos[0]>=self.rpos1[0] and pos[0]<=self.rpos1[0]+self.width/2:
self.k_left()
if pos[0]>=self.rpos1[0]+self.width/2 and pos[0]<=self.rpos1[0]+self.width:
self.k_right()
if pos[0]>=self.rpos1[0] and pos[0]<=self.rpos1[0]+self.width1 and pos[1]>=self.rpos1[1] and pos[1]<=self.rpos1[1]+self.height1:
self.enter_down()
def set_text(self,text):
#print "SETTING TEXT:",repr(text)
text = textutil.markup_text(text)
#print "marked up text:",repr(text)
text.m_replace(lambda c:hasattr(c,"variable"),lambda c:assets.variables[c.variable])
lines = text.fulltext().split(u"\n")
wrap = vtrue(assets.variables.get("_textbox_wrap","true"))
if vtrue(assets.variables.get("_textbox_wrap_avoid_controlled","true")):
if len(lines)>1:
wrap = False
lines = textutil.wrap_text(lines,assets.get_image_font("tb"),assets.sw-6,wrap)
self.pages = [lines[i:i+3] for i in xrange(0, len(lines), 3)]
self._text = u"\n"
self._markup = textutil.markup_text("")
for page in self.pages:
for line in page:
self._text += line.fulltext()
self._text+="\n"
self._markup._text.extend(line.chars())
self._markup._text.append("\n")
text = property(lambda self: self._text,set_text)
def __init__(self,text="",color=[255,255,255],delay=2,speed=1,rightp=True,leftp=False,nametag="\n"):
self.nametag = nametag
ImgFont.lastcolor = [255,255,255]
gui.widget.__init__(self,[0,0],[assets.sw,assets.sh])
self.z = zlayers.index(self.__class__.__name__)
nametag = self.nametag
if assets.portrait:
nametag = getattr(assets.portrait,"nametag",nametag) or nametag
self.nt_full = None
self.nt_left = None
self.nt_text_image = None
self.base = assets.open_art(assets.variables.get("_textbox_bg","general/textbox_2"))[0].convert_alpha()
nt_full_image = assets.variables.get("_nt_image","")
if nt_full_image:
self.nt_full = assets.open_art(nt_full_image)[0].convert_alpha()
elif nametag.strip():
self.nt_left = assets.open_art("general/nt_left")[0].convert_alpha()
self.nt_middle = assets.open_art("general/nt_middle")[0].convert_alpha()
self.nt_right = assets.open_art("general/nt_right")[0].convert_alpha()
self.nametag = nametag
self.img = self.base.copy()
self.go = 0
self.text = text
self.mwritten = []
self.num_lines = 4
self.next = self.num_lines
self.color = color
self.delay = delay
self.speed = speed
self.next_char = 0
self.in_paren = 0 #Record whether we are inside parenthesis or not
#Show pointer left and right
self.rightp = rightp
self.leftp = leftp
self.rpi = fg("pointer")
self.kill = False
self.statement = None
self.wait = "auto"
self.can_skip = True
self.blocking = not vtrue(assets.variables.get("_textbox_skipupdate","0"))
self.made_gui = False
self.id_name = "_textbox_"
self.is_cross = False
def init_cross(self):
self.is_cross = True
def init_normal(self):
subscript("show_court_record_button")
def init_gui(self):
if not self.made_gui:
self.made_gui = True
if self.statement:
self.init_cross()
else:
self.init_normal()
def delete(self):
self.kill = 1
subscript("hide_court_record_button")
subscript("hide_press_button")
subscript("hide_present_button")
if vtrue(assets.variables.get("_tb_on","off")):
subscript("tboff")
assets.cur_script.refresh_arrows(self)
print "TEXTBOX DELETED"
assets.cur_script.build_mode = True
def gsound(self):
if hasattr(self,"_clicksound"): return self._clicksound
if assets.portrait:
return assets.portrait.clicksound
return "blipmale.ogg"
def ssound(self,v):
self._clicksound = v
clicksound = property(gsound,ssound)
def can_continue(self):
"""If not blocking, player cannot make text continue
If skip mode is on (_debug mode) we can just skip the text
Otherwise, check to see if all the text has been written out"""
if not self.blocking:
return
if self.can_skip or self.nextline():
return True
def enter_down(self):
if not self.can_continue(): return
if not self.nextline():
while not self.nextline():
self.add_character()
#self.mwritten = self._markup._text
else:
self.forward()
def k_left(self):
if self.statement and self.nextline():
assets.cur_script.prev_statement()
self.forward()
def k_right(self):
if self.statement and self.nextline():
self.forward()
def k_z(self):
if self.statement and self.nextline():
assets.cur_script.cross = "pressed"
assets.cur_script.clear_arrows()
assets.cur_script.goto_result("press "+self.statement,backup=assets.variables.get("_court_fail_label",None))
self.delete()
def k_x(self):
if self.statement and self.nextline():
em = assets.addevmenu()
em.fail = assets.variables.get("_court_fail_label",None)
assets.cur_script.clear_arrows()
def forward(self,sound=True):
"""Set last written text to the contents of the textbox
turn off testimony blinking
scroll to next 3 lines of text if they exist
if there is no more text, delete textbox
play the bloop sound"""
t = textutil.markup_text()
t._text = self.mwritten
assets.variables["_last_written_text"] = t.fulltext()
lines = self.text.split("\n")
lines = lines[4:]
self.set_text("\n".join(lines))
self.mwritten = []
self.next = self.num_lines
self.img = self.base.copy()
if not self.text.strip():
self.delete()
if sound:
assets.play_sound("bloop.ogg",volume=0.7)
def draw(self,dest):
self.children = []
if not self.go or self.kill:
return
#For the widget
x = assets.variables.get("_textbox_x","")
y = assets.variables.get("_textbox_y","")
self.rpos1 = [(assets.sw-self.img.get_width())/2,
assets.sh-self.img.get_height()]
if x!="":
self.rpos1[0] = int(x)
if y!="":
self.rpos1[1] = int(y)
self.width1 = self.img.get_width()
self.height1 = self.img.get_height()
dest.blit(self.img,
self.rpos1)
if self.rightp and self.nextline():
dest.blit(self.rpi.img,[self.rpos1[0]+self.width1-16,
self.rpos1[1]+self.height1-16])
if getattr(self,"showleft",False) and self.nextline():
dest.blit(pygame.transform.flip(self.rpi.img,1,0),[self.rpos1[0],
self.rpos1[1]+self.height1-16])
#End
x = assets.variables.get("_nt_x","")
y = assets.variables.get("_nt_y","")
if self.nt_full:
nx,ny = self.rpos1[0],(self.rpos1[1]-self.nt_full.get_height())
if x!="":
nx = int(x)
if y!="":
ny = int(y)
dest.blit(self.nt_full,[nx,ny])
if self.nt_text_image:
if assets.variables.get("_nt_text_x","")!="":
nx += int(assets.variables.get("_nt_text_x",0))
if assets.variables.get("_nt_text_y","")!="":
ny += int(assets.variables.get("_nt_text_y",0))
dest.blit(self.nt_text_image,[nx+5,ny])
elif self.nt_left and self.nt_text_image:
nx,ny = self.rpos1[0],(self.rpos1[1]-self.nt_left.get_height())
if x!="":
nx = int(x)
if y!="":
ny = int(y)
dest.blit(self.nt_left,[nx,ny])
for ii in range(self.nt_text_image.get_width()+8):
dest.blit(self.nt_middle,[nx+3+ii,ny])
dest.blit(self.nt_right,[nx+3+ii+1,ny])
if assets.variables.get("_nt_text_x","")!="":
nx += int(assets.variables.get("_nt_text_x",0))
if assets.variables.get("_nt_text_y","")!="":
ny += int(assets.variables.get("_nt_text_y",0))
dest.blit(self.nt_text_image,[nx+5,ny])
def add_character(self):
command = None
next_char = 1
char = self._markup._text[len(self.mwritten)]
self.mwritten.append(char)
if isinstance(char,textutil.markup_command):
command,args = char.command,char.args
if assets.cur_script.macros.get(command,None):
print "RUNNING A MACRO"
assets.variables["_return"] = ""
this = assets.cur_script
ns = assets.cur_script.execute_macro(command,args)
old = ns._endscript
s = len(self.mwritten)-1
mt = self._markup._text
self._markup._text = self.mwritten
def back(*args):
old()
print "MWRIT",s,self.mwritten
t0=[]
t1=[]
for i,c in enumerate(mt):
if i<s and not isinstance(c,textutil.markup_command):
t0.append(c)
if i>s:
t1.append(c)
t2 = [textutil.markup_command("_fullspeed","")]+t0+[textutil.markup_command("_endfullspeed","")]+list(assets.variables["_return"])+t1[:-1]
print "t0","".join([str(x) for x in t0])
print "t1","".join([str(x) for x in t1])
t = textutil.markup_text()
t._text = t2
print repr(t.fulltext())
self.set_text(t.fulltext())
self.mwritten = []
self.next_char = 0
ns._endscript = back
else:
print "no macro for",command
commands = ["sfx","sound","delay","spd","_fullspeed","_endfullspeed",
"wait","center","type","next",
"tbon","tboff",
"e","f","s","p","c"]
commands.sort(key=lambda o:len(o))
commands.reverse()
for cm in commands:
if command.startswith(cm):
nargs = command.split(cm,1)[1]
if nargs and not nargs.startswith(" "):
command,args = cm,nargs
break
print "new command:",command,args
if command == "sfx":
assets.play_sound(args)
elif command == "sound":
self.clicksound = args
elif command == "delay":
self.delay = int(args)
self.wait = "manual"
elif command == "spd":
self.speed = float(args)
elif command == "_fullspeed":
self.last_speed = self.speed
self.speed = 0
elif command == "_endfullspeed":
self.speed = self.last_speed
elif command == "wait":
self.wait = args
elif command == "center":
pass
elif command == "type":
self.clicksound = "typewriter.ogg"
self.delay = 2
self.wait = "manual"
elif command == "next":
if assets.portrait:
assets.portrait.set_blinking()
del self.mwritten[-1]
self.forward(False)
return 0
elif command=="e":
try:
assets.set_emotion(args.strip())
except:
import traceback
traceback.print_exc()
raise markup_error("No character to apply emotion to")
elif command=="f":
assets.flash = 3
assets.flashcolor = [255,255,255]
command = args.split(" ")
if len(command)>0 and command[0]:
assets.flash = int(command[0])
if len(command)>1:
assets.flashcolor = color_str(command[1])
elif command=="s":
assets.shakeargs = [x for x in args.split(" ") if x.strip()]
elif command=="p":
next_char = int(args.strip())
elif command=="c":
pass
elif command=="tbon":
assets.cur_script.tbon()
elif command=="tboff":
assets.cur_script.tboff()
else:
raise markup_error("No macro or markup command valid for:"+command)
elif isinstance(char,textutil.markup):
pass
else:
if not hasattr(self,"_lc"):
self._lc = ""
self.go = 1
if self._lc in [".?"] and char == " ":
next_char = 6
if self._lc in ["!"] and char == " ":
next_char = 8
if self._lc in [","] and char == " ":
next_char = 4
if self._lc in ["-"] and (char.isalpha() or char.isdigit()):
next_char = 4
if char in ["("]:
self.in_paren = 1
if char in [")"]:
self.in_paren = 0
if assets.portrait:
punctuation = [x for x in assets.variables.get("_punctuation",u".,?!")]
if not self.in_paren and not char in punctuation:
assets.portrait.set_talking()
if self.in_paren:
assets.portrait.set_blinking()
if str(char).strip():
assets.play_sound(self.clicksound,volume=random.uniform(0.65,1.0))
next_char = int(next_char*self.delay)
if self.wait=="manual":
if char.strip():
next_char = 5*self.delay
else:
next_char = 2
self._lc = char
return next_char
def nextline(self):
"""Returns true if all the text waiting to be written into the textbox has been written"""
t = textutil.markup_text()
t._text = self.mwritten
return not len(self.mwritten)<len(self._markup._text) or len(t.fulltext().split("\n"))>=self.num_lines
def update(self,dt=None):
#assets.play_sound(self.clicksound)
self.rpi.update()
if self.kill: return
if dt is None:
dt = assets.dt
self.next_char -= dt
#FIXME - logic is horrendously convoluted
while (not self.nextline()) and self.next_char<=0:
#self.next_char += 1
num_chars = max(int(self.speed),1)
next_char = 0
cnum = num_chars
while (not self.nextline()) and ((not self.speed) or cnum>0):
cnum -= 1
ac_next = self.add_character()
if self.speed:
next_char += ac_next
if self.speed:
self.next_char += (next_char/float(self.speed))
if assets.portrait:
if self.next_char>10 or self.nextline():
assets.portrait.set_blinking()
title = True
self.next = 0
if self.next==0:
self.img = self.base.copy()
y, stx, inc = 6, 6, 18*(assets.sh/standard_sh)
x = stx
color = self.color
center = False
t = textutil.markup_text()
t._text = self.mwritten
lines = [self.nametag.replace("\n","")]+t.fulltext().split("\n")
nlines = assets.variables["_textbox_lines"]
if nlines == "auto":
if len(lines)==4:
nlines = "3"
else:
nlines = "2"
if not nlines:
nlines = "3"
nlines = int(nlines)
if nlines == 2:
y,inc = 8,24
for i,line in enumerate(lines[:nlines+1]):
if title:
if line.strip():
ncolor = assets.variables.get("_nt_text_color","")
if ncolor:
ncolor = color_str(ncolor)
else:
ncolor = color
nt_image = assets.get_font("nt").render(line.replace(u"_",u" "),1,ncolor)
self.nt_text_image = nt_image
title = False
else:
img = assets.get_image_font("tb").render(line,color)
color = ImgFont.lastcolor
if "{center}" in line:
center = not center
if center:
x = (assets.sw-img.get_width())//2
if x+img.get_width()>assets.sw:
if not getattr(self,"OVERAGE",None) and vtrue(assets.variables.get("_debug","false")):
self.OVERAGE = x+img.get_width()-assets.sw
raise offscreen_text('Text Overflow:"%s" over by %s'%(line,self.OVERAGE))
self.img.blit(img,[x,y])
y+=inc
x = stx
self.next = self.num_lines
if self.is_cross and self.nextline():
self.is_cross = False
subscript("show_press_button")
subscript("show_present_button")
if self.blocking:
return True
return
class uglyarrow(fadesprite):
def __init__(self):
fadesprite.__init__(self,x=0,y=assets.sh)
self.load(assets.variables.get("_bigbutton_bg","bg/main"))
self.arrow = sprite(0,0).load("general/arrow_big")
self.scanlines = fadesprite(0,0).load("fg/scanlines")
self.border_top = fadesprite(0,0).load(assets.variables.get("_screen2_letterbox_img","general/bigbutton/border"))
self.border_bottom = fadesprite(0,0).load(assets.variables.get("_screen2_letterbox_img","general/bigbutton/border"))
self.scanlines.fade = 50
self.button = None
self.double = None
self.textbox = None
self.pri = ulayers.index(self.__class__.__name__)
self.width = self.iwidth = assets.sw
self.height = self.iheight = assets.sh
self.high = False
self.showleft = True
self.last = None
self.id_name = "_uglyarrow_"
def show_unclicked(self):
p = assets.variables.get("_bigbutton_img","general/buttonpress")
if self.last != p:
self.last = p
self.button = sprite(0,0).load(p)
def show_clicked(self):
p = assets.variables.get("_bigbutton_img","general/buttonpress")
high = noext(p)+"_high"+onlyext(p)
if self.last != high:
self.last = high
self.button = sprite(0,0).load(high)
def show_cross(self):
if not self.double:
self.double = sprite(0,0).load(assets.variables.get("_bigbutton_cross","general/cross_exam_buttons"))
self.button = None
def update(self):
self.pos[1] = assets.sh
if self.high:
self.show_clicked()
else:
self.show_unclicked()
if self.textbox and self.textbox.statement:
self.show_cross()
self.arrow.update()
return False
def draw(self,dest):
fadesprite.draw(self,dest)
if self.button:
self.button.pos[0] = (assets.sw-self.button.img.get_width())//2
self.button.pos[1] = (assets.sh-self.button.img.get_height())//2+assets.sh
self.button.draw(dest)
self.iwidth = self.button.img.get_width()
self.iheight = self.button.img.get_height()
if self.can_click():
self.arrow.pos[0] = (assets.sw-self.arrow.img.get_width())//2
self.arrow.pos[1] = (assets.sh-self.arrow.img.get_height())//2+assets.sh
self.arrow.draw(dest)
elif self.double:
self.double.pos[0] = (assets.sw-self.double.img.get_width())//2
self.double.pos[1] = (assets.sh-self.double.img.get_height())//2+assets.sh
self.double.draw(dest)
self.iwidth = self.double.img.get_width()
self.iheight = self.double.img.get_height()
if self.can_click():
self.arrow.pos[0] = (assets.sw-self.arrow.img.get_width())//2-75
self.arrow.pos[1] = (assets.sh-self.arrow.img.get_height())//2+assets.sh
self.arrow.img = pygame.transform.flip(self.arrow.img,1,0)
if self.showleft:
self.arrow.draw(dest)
self.arrow.pos[0] = (assets.sw-self.arrow.img.get_width())//2+70
self.arrow.pos[1] = (assets.sh-self.arrow.img.get_height())//2+assets.sh
self.arrow.img = pygame.transform.flip(self.arrow.img,1,0)
self.arrow.draw(dest)
if vtrue(assets.variables.get("_screen2_scanlines","off")):
self.scanlines.pos = self.pos
self.scanlines.draw(dest)
if vtrue(assets.variables.get("_screen2_letterbox","on")):
self.border_top.pos = self.pos
self.border_top.draw(dest)
self.border_bottom.pos[0] = self.pos[0]
self.border_bottom.pos[1] = self.pos[1]+192-self.border_bottom.height
self.border_bottom.draw(dest)
def over(self,mp):
if self.button:
if mp[0]>=self.button.pos[0] and mp[1]>=self.button.pos[1]\
and mp[0]<=self.button.pos[0]+self.iwidth\
and mp[1]<=self.button.pos[1]+self.iheight:
return True
if self.double:
if mp[0]>=self.double.pos[0] and mp[1]>=self.double.pos[1]\
and mp[0]<=self.double.pos[0]+self.iwidth/2\
and mp[1]<=self.double.pos[1]+self.iheight:
return "left"
if mp[0]>=self.double.pos[0]+self.iwidth/2 and mp[1]>=self.double.pos[1]\
and mp[0]<=self.double.pos[0]+self.iwidth\
and mp[1]<=self.double.pos[1]+self.iheight:
return "right"
def move_over(self,mp,rel,bt):
if not self.over(mp):
if self.high:
self.high = None
else:
if self.high == None:
self.high = True
def click_down_over(self,mp):
gui.window.focused = self
over = self.over(mp)
if over == True and not self.high and self.can_click():
self.high = True
if over == "left" and self.can_click() and self.showleft:
self.textbox.k_left()
if over == "right" and self.can_click():
self.textbox.k_right()
def click_up_over(self,mp):
if self.high:
self.high = False
if self.can_click():
self.textbox.enter_down()
def can_click(self):
return self.textbox and not getattr(self.textbox,"kill",0) and self.textbox.can_continue()
class menu(fadesprite,gui.widget):
z = 5
fail = "none"
id_name = "invest_menu"
def over(self,mp):
oy = self.getpos()[1]
for o in self.options:
p2 = self.opos[o]
w,h = self.opt.get_width()//2,self.opt.get_height()//2
if mp[0]>=p2[0] and mp[0]<=p2[0]+w and mp[1]>=p2[1]+oy and mp[1]<=p2[1]+h+oy:
return o
def move_over(self,pos,rel,buttons):
if buttons[0]:
self.click_down_over(pos)
def click_down_over(self,mp):
gui.window.focused = self
o = self.over(mp)
if o is not None:
self.selected = o
def click_up(self,mp):
o = self.over(mp)
if self.selected==o and o is not None:
self.enter_down()
def __init__(self):
self.bg = None
oy = 192
fadesprite.__init__(self,x=0,y=oy)
gui.widget.__init__(self,[0,oy],[assets.sw,assets.sh])
self.load("general/black")
self.max_fade = int(float(assets.variables.get("_menu_fade_level",50)))
self.fade = 0
if self.max_fade == 0:
self.setfade(0)
else:
assets.cur_script.obs.append(fadeanim(start=0,end=self.max_fade,speed=3,wait=1,name=None,obs=[self]))
self.options = []
self.selected = ""
self.opt = assets.open_art("general/talkbuttons",key=[255,0,255])[0]
stx,sty = (assets.sw-self.opt.get_width())/2,(assets.sh-self.opt.get_height())/2
self.opos_c = {"examine":[0,0],"move":[1,0],
"talk":[0,1],"present":[1,1]}
self.opos_l = [["examine","move"],["talk","present"]]
self.opos = {"examine":[stx,sty],"move":[stx+self.opt.get_width()/2,sty],
"talk":[stx,sty+self.opt.get_height()/2],"present":[stx+self.opt.get_width()/2,sty+self.opt.get_height()/2]}
imgs = []
x=y=0
while y<self.opt.get_height():
while x<self.opt.get_width():
imgs.append(self.opt.subsurface([[x,y],[self.opt.get_width()//2,self.opt.get_height()//2]]))
x+=self.opt.get_width()//2
y+=self.opt.get_height()//2+1
x = 0
self.oimgs = {"examine":imgs[0],"move":imgs[1],"talk":imgs[2],"present":imgs[3]}
self.opthigh = assets.open_art("general/talkbuttons_high",key=[255,0,255])[0]
imgs = []
x=y=0
while y<self.opthigh.get_height():
while x<self.opthigh.get_width():
imgs.append(self.opthigh.subsurface([[x,y],[self.opthigh.get_width()//2,self.opthigh.get_height()//2]]))
x+=self.opthigh.get_width()//2
y+=self.opthigh.get_height()//2+1
x = 0
self.oimgshigh = {"examine":imgs[0],"move":imgs[1],"talk":imgs[2],"present":imgs[3]}
self.open_script = True
self.primary = True
def init_normal(self):
subscript("show_court_record_button")
def delete(self):
super(menu,self).delete()
subscript("hide_court_record_button")
def update(self):
if not self.options:
self.delete()
fadesprite.update(self)
self.screen_setting = "try_bottom"
return True
def get_coord(self):
try:
return self.opos_c[self.selected][:]
except:
return [0,0]
def k_right(self):
coord = self.get_coord()
coord[0]+=1
if coord[0]>1:
coord[0] = 0
sel = self.opos_l[coord[1]][coord[0]]
if sel in self.options:
self.selected = sel
subscript("sound_investigate_menu_select")
def k_left(self):
coord = self.get_coord()
coord[0]-=1
if coord[0]<0:
coord[0] = 1
sel = self.opos_l[coord[1]][coord[0]]
if sel in self.options:
self.selected = sel
subscript("sound_investigate_menu_select")
def k_up(self):
coord = self.get_coord()
coord[1]-=1
if coord[1]<0:
coord[1] = 1
sel = self.opos_l[coord[1]][coord[0]]
if sel in self.options:
self.selected = sel
subscript("sound_investigate_menu_select")
def k_down(self):
coord = self.get_coord()
coord[1]+=1
if coord[1]>1:
coord[1] = 0
sel = self.opos_l[coord[1]][coord[0]]
if sel in self.options:
self.selected = sel
subscript("sound_investigate_menu_select")
def enter_down(self):
if self.open_script:
print "INITIALIZE MENU SCENE"
assets.cur_script.init(self.scene+"."+self.selected)
else:
print "TRY TO JUMP TO LABEL"
assets.cur_script.goto_result(self.selected,backup=self.fail)
self.delete()
subscript("sound_investigate_menu_confirm")
def addm(self,opt):
if opt:
self.options.append(opt)
if not self.selected:
self.selected = opt
def delm(self,opt):
if opt in self.options:
self.options.remove(opt)
if self.selected == opt:
self.selected = None
def draw(self,dest):
if not self.bg:
for o in reversed(assets.cur_script.obs):
if isinstance(o,bg):
self.bg = o.img.copy()
break
self.screen_setting = "try_bottom"
if self.bg:
dest.blit(self.bg,self.getpos())
if not hasattr(self,"fade") or self.fade>=self.max_fade:
for o in self.options:
if self.selected == o:
dest.blit(self.oimgshigh[o],[self.opos[o][0],self.opos[o][1]+self.getpos()[1]])
else:
dest.blit(self.oimgs[o],[self.opos[o][0],self.opos[o][1]+self.getpos()[1]])
class listmenu(fadesprite,gui.widget):
fail = "none"
id_name = "list_menu_id"
def over(self,mp):
if getattr(self,"kill",0):
return False
x = (assets.sw-self.choice.img.get_width())/2
y = self.getpos()[1]+30
si = None
i = 0
for c in self.options:
if mp[0]>=x and mp[1]>=y and mp[0]<=x+self.choice.width and mp[1]<=y+self.choice.height:
si = i
i+=1
y+=self.choice.img.get_height()+5
return si
def move_over(self,pos,rel,buttons):
if getattr(self,"kill",0):
return False
if buttons[0]:
self.click_down_over(pos)
def click_down_over(self,mp):
if getattr(self,"kill",0):
return False
gui.window.focused = self
si = self.over(mp)
if si is not None:
self.si = si
self.selected = self.options[self.si]
def click_up(self,mp):
if getattr(self,"kill",0):
return False
si = self.over(mp)
if self.si==si and si is not None:
self.enter_down()
def __init__(self,tag=None):
self.pri = ulayers.index(self.__class__.__name__)
x,y = 0,192
gui.widget.__init__(self,[x,y],[assets.sw,assets.sh])
fadesprite.__init__(self,x=x,y=y)
self.load(assets.variables.get("_list_bg_image","general/black"))
self.max_fade = int(float(assets.variables.get("_menu_fade_level",50)))
self.fade = 0
if assets.num_screens == 2 and not vtrue(assets.variables.get("_double_screen_list_fade","false")):
self.setfade(255)
elif self.max_fade == 0:
self.setfade(0)
else:
assets.cur_script.obs.append(fadeanim(start=0,end=self.max_fade,speed=3,wait=1,name=None,obs=[self]))
self.options = []
self.si = 0
self.selected = ""
self.choice = fadesprite().load("general/talkchoice")
self.choice_high = fadesprite().load("general/talkchoice_high")
self.hidden = True
self.tag = tag
self.primary = True
def init_normal(self):
subscript("show_court_record_button")
def delete(self):
subscript("hide_court_record_button")
self.kill = 1
if hasattr(self,"bck"):
self.bck.kill = 1
def update(self):
fadesprite.update(self)
self.screen_setting = "try_bottom"
if self.hidden:
return False
if not hasattr(self,"bck") and vtrue(assets.variables.get("_list_back_button","true")) and not getattr(self,"noback",False):
self.bck = guiBack()
self.bck.pri = 1000
def k_space(b=self.bck):
self.delete()
subscript("sound_list_menu_cancel")
print "kill back button and self"
assets.variables["_selected"] = "Back"
self.bck.k_space = k_space
assets.cur_script.obs.append(self.bck)
if not self.options:
self.k_space()
return True
def k_up(self):
if getattr(self,"kill",0):
return False
self.si -= 1
if self.si<0:
self.si = len(self.options)-1
self.selected = self.options[self.si]
self.change_selected()
subscript("sound_list_menu_select")
def k_down(self):
if getattr(self,"kill",0):
return False
self.si += 1
if self.si>=len(self.options):
self.si = 0
self.selected = self.options[self.si]
self.change_selected()
subscript("sound_list_menu_select")
def enter_down(self):
if getattr(self,"kill",0):
return False
if not self.selected:
return
subscript("sound_list_menu_confirm")
if self.tag:
assets.lists[self.tag][self.selected["label"]] = 1
if self.selected["result"] != "Back":
assets.variables["_selected"] = self.selected["result"]
assets.cur_script.goto_result(self.selected["result"],backup=self.fail)
else:
assets.variables["_selected"] = "Back"
self.delete()
def change_selected(self):
scr = self.options[self.si].get("on_select",None)
if scr:
assets.cur_script.execute_macro(scr)
#subscript(scr)
def draw(self,dest):
if getattr(self,"kill",0):
return False
if not self.selected and self.options:
self.selected = self.options[self.si]
self.change_selected()
fadesprite.draw(self,dest)
x = (assets.sw-self.choice.img.get_width())/2
y = self.getpos()[1]+30
#self.choice.setfade(200)
#self.choice_high.setfade(200)
for c in self.options:
if 0:#self.selected == c:
img = self.choice_high.img.copy()
else:
img = self.choice.img.copy()
rt = c["label"]
checkmark = assets.variables.get("_list_checked_img","general/checkmark")
if "checkmark" in c:
checkmark = c["checkmark"]
try:
checkmark = sprite().load(checkmark)
except:
checkmark = None
if (not (checkmark and checkmark.width)) and self.tag and assets.lists[self.tag].get(rt,None):
rt = "("+rt+")"
txt = assets.get_image_font("list").render(rt,[110,20,20])
img.blit(txt,[(img.get_width()-txt.get_width())/2,
(img.get_height()-txt.get_height())/2])
dest.blit(img,[x,y])
if self.selected == c:
lwi = 2
color = color_str(assets.variables.get("_list_outline_color","ffaa45"))
pygame.draw.line(dest,color,[x-1,y+8],[x-1,y+1],lwi)
pygame.draw.line(dest,color,[x+1,y-2],[x+8,y-2],lwi)
pygame.draw.line(dest,color,[x+img.get_width(),y+8],[x+img.get_width(),y+1],lwi)
pygame.draw.line(dest,color,[x+img.get_width()-2,y-2],[x+img.get_width()-9,y-2],lwi)
pygame.draw.line(dest,color,[x+img.get_width(),y+img.get_height()-2],[x+img.get_width(),y+img.get_height()-9],lwi)
pygame.draw.line(dest,color,[x+img.get_width()-2,y+img.get_height()],[x+img.get_width()-9,y+img.get_height()],lwi)
pygame.draw.line(dest,color,[x-1,y+img.get_height()-2],[x-1,y+img.get_height()-9],lwi)
pygame.draw.line(dest,color,[x+1,y+img.get_height()],[x+8,y+img.get_height()],lwi)
if checkmark and checkmark.width and self.tag and assets.lists[self.tag].get(rt,None):
if "check_x" in c:
cx = int(c["check_x"])
else:
cx = int(assets.variables.get("_list_checked_x","-10"))
if "check_y" in c:
cy = int(c["check_y"])
else:
cy = int(assets.variables.get("_list_checked_y","-10"))
dest.blit(checkmark.base[0],[x+cx,y+cy])
y+=self.choice.img.get_height()+5
def k_space(self):
if getattr(self,"kill",0):
return False
if hasattr(self,"bck") or "Back" in self.options:
self.delete()
subscript("sound_list_menu_cancel")
class case_menu(fadesprite,gui.widget):
children = []
parent = None
def click_down_over(self,mp):
surf,pos = self.option_imgs[self.choice*3]
new,npos = self.option_imgs[self.choice*3+1]
save,spos = self.option_imgs[self.choice*3+2]
pos = pos[:]
pos[0]-=self.x
if mp[0]<pos[0]:
self.k_left()
elif mp[0]>pos[0]+surf.get_width():
self.k_right()
else:
if new and mp[1]>=npos[1] and mp[1]<=npos[1]+new.get_height():
self.enter_down()
elif save and mp[1]>=spos[1] and mp[1]<=spos[1]+save.get_height():
assets.game = self.path+"/"+self.options[self.choice]
assets.load_game_menu()
#assets.load_game(self.path+"/"+self.options[self.choice])
def get_script(self,fullpath):
dname = os.path.split(fullpath)[1]
for test in [[fullpath+"/intro.txt","intro"],[fullpath+"/"+dname+".txt",dname]]:
if os.path.exists(test[0]):
return test[1]
def __init__(self,path="games",**kwargs):
self.pri = kwargs.get("pri",ulayers.index(self.__class__.__name__))
self.reload=False
self.path = path
fadesprite.__init__(self,screen=2)
self.base = self.img = assets.Surface([64,64])
self.max_fade = 150
self.next = 0
self.width = assets.sw
self.height = assets.sh
self.options = []
order = assets.variables.get("_order_cases","alphabetical")
if order=="alphabetical":
for d in os.listdir(path):
full = os.path.join(path,d)
if os.path.isdir(full):
if self.get_script(full):
self.options.append(d)
self.options.sort()
elif order=="variable":
opts = {}
for v in assets.variables.keys():
if v.startswith("_case_"):
try:
num = int(v[6:])
opts[num] = assets.variables[v]
except:
continue
self.options = []
keys = opts.keys()
keys.sort()
for k in keys:
self.options.append(opts[k])
else:
raise script_error("_order_cases set to '%s',"%(order,)+\
"only valid values are 'alphabetical' or "+\
"'variable'")
self.init_options()
self.choice = 0
self.x = 0
try:
f = open(path+"/last")
self.choice = int(f.readlines()[0].strip())
f.close()
except:
pass
if self.choice>=len(self.options):
self.choice = 0
self.scrolling = False
self.arr = assets.open_art("general/arrow_right")[0]
self.tried_case = False
self.primary = True
def delete(self):
super(case_menu,self).delete()
def init_options(self):
self.option_imgs = []
base = assets.open_art("general/selection_chapter")[0].convert()
x = self.pos[0]+assets.sw/2-base.get_width()/2
y = self.pos[1]+assets.sh/2-base.get_height()/2
for o in self.options:
spr = base.copy()
title = o.replace("_"," ")
lines = [[]]
wd_sp = 2
for word in title.split(" "):
word = assets.get_font("gametitle").render(word,1,[200,100,100])
if sum([wd.get_width() for wd in lines[-1]])+wd_sp*len(lines[-1])+word.get_width()>160:
lines.append([])
lines[-1].append(word)
wd_y = spr.get_height()//2-(len(lines)*16)//2
for line in lines:
w = sum([wd.get_width() for wd in line])+wd_sp*len(line)
wd_x = (spr.get_width()-w)/2
for word in line:
spr.blit(word,[wd_x,wd_y])
wd_x += word.get_width()+wd_sp
wd_y += 16
self.option_imgs.append([spr,[x,y]])
fnt = assets.get_font("new_resume")
txt = fnt.render("New game",1,[200,100,100])
spr = pygame.transform.scale(base,[base.get_width(),base.get_height()//2])
spr.blit(txt,[(spr.get_width()-txt.get_width())/2,(spr.get_height()-txt.get_height())/2])
self.option_imgs.append([spr,[x,y+60]])
if os.path.exists(self.path+"/"+o+"/save.ns"):
txt = fnt.render("Resume Game",1,[200,100,100])
spr = pygame.transform.scale(base,[base.get_width(),base.get_height()//2])
spr.blit(txt,[(spr.get_width()-txt.get_width())/2,(spr.get_height()-txt.get_height())/2])
self.option_imgs.append([spr,[x,y+90]])
elif os.path.exists(self.path+"/"+o+"/save"):
txt = fnt.render("Resume Game",1,[200,100,100])
spr = pygame.transform.scale(base,[base.get_width(),base.get_height()//2])
spr.blit(txt,[(spr.get_width()-txt.get_width())/2,(spr.get_height()-txt.get_height())/2])
self.option_imgs.append([spr,[x,y+90]])
elif os.path.exists(self.path+"/"+o+"/autosave.ns"):
txt = fnt.render("Resume Game",1,[200,100,100])
spr = pygame.transform.scale(base,[base.get_width(),base.get_height()//2])
spr.blit(txt,[(spr.get_width()-txt.get_width())/2,(spr.get_height()-txt.get_height())/2])
self.option_imgs.append([spr,[x,y+90]])
else:
self.option_imgs.append([None,None])
x+=assets.sw
self.children = self.option_imgs
def update(self):
if self.reload:
self.option_imgs = []
self.__init__(self.path)
spd = (self.choice*256-self.x)/25.0
if abs(spd)>0 and abs(spd)<10:
spd = 10*abs(spd)/spd
spd *= assets.dt
if self.x<self.choice*assets.sw:
self.x+=spd
if self.x>self.choice*assets.sw:
self.x=self.choice*assets.sw
if self.x>self.choice*assets.sw:
self.x+=spd
if self.x<self.choice*assets.sw:
self.x=self.choice*assets.sw
return True
def k_right(self):
if self.choice<len(self.options)-1:
self.choice += 1
subscript("sound_case_menu_select")
self.case_screen()
def k_left(self):
if self.choice>0:
self.choice -= 1
subscript("sound_case_menu_select")
self.case_screen()
def case_screen(self):
if not self.options:
return
if not hasattr(self,"curgame"):
self.curgame = assets.game
if os.path.exists(os.path.join(self.path,self.options[self.choice],"case_screen.txt")):
scr = assets.Script()
scr.parent = assets.cur_script
assets.stack.append(scr)
assets.game=self.curgame+"/"+self.options[self.choice]
assets.registry = registry.combine_registries("./"+assets.game,assets.show_load)
print "init: g:%s choice:%s"%(assets.game,self.options[self.choice])
assets.cur_script.init("case_screen")
assets.cur_script.world = scr.parent.world
def enter_down(self):
f = open(os.path.join(self.path,"last"),"w")
f.write(str(self.choice))
f.close()
assets.start_game(self.path+"/"+self.options[self.choice],mode="nomenu")
def draw(self,dest):
if self.reload:
return
if not self.tried_case:
self.case_screen()
self.tried_case = 1
for s,p in self.option_imgs:
if not s: continue
dest.blit(s,[p[0]-self.x,p[1]])
if self.x==self.choice*assets.sw:
if self.choice<len(self.options)-1:
dest.blit(self.arr,[self.pos[0]+240,self.pos[1]+80])
if self.choice>0:
dest.blit(pygame.transform.flip(self.arr,1,0),[self.pos[0],self.pos[1]+80])
class examine_menu(sprite,gui.widget):
fail = "none"
def move_over(self,pos,rel,buttons):
if self.xscrolling:
return
if gui.window.focused == self:
self.mx,self.my = [pos[0],pos[1]-self.getpos()[1]]
self.highlight()
def click_down_over(self,mp):
if self.xscrolling:
return
gui.window.focused = self
if self.hide or self.selected == ["none"] or mp[0]<175 or mp[1]<159+self.getpos()[1]:
self.move_over(mp,None,None)
def click_up(self,mp):
if self.xscrolling:
return
if gui.window.focused == self:
self.enter_down()
gui.window.over = None
gui.window.focused = None
def __init__(self,hide=False,name="blah"):
self.name = name
self.pri = ulayers.index(self.__class__.__name__)
sprite.__init__(self)
self.pos = [0,192]
self.z = zlayers.index(self.__class__.__name__)
self.width = assets.sw
self.height = assets.sh
gui.widget.__init__(self,self.pos,[assets.sw,assets.sh])
self.img = assets.Surface([64,64])
self.regions = []
self.mouse = pygame.Surface([10,10])
self.mouse.fill([100,100,100])
self.selected = [None]
self.mx,self.my = assets.sw/2,assets.sh/2
self.check = assets.open_art("general/check"+assets.appendgba,key=[255,0,255])[0]
self.hide = hide
self.bg = []
if not assets.variables.get("_examine_use",None):
self.bg = [x for x in assets.cur_script.obs if isinstance(x,bg)]
else:
self.bg = [x for x in assets.cur_script.obs if getattr(x,"id_name",None) == assets.variables["_examine_use"]]
self.fg = assets.open_art("general/examinefg")[0]
self.xscroll = 0
self.xscrolling = 0
scroll_amt = assets.variables.get("_xscroll_"+self.name,0)
if scroll_amt==-1:
self.xscroll = -1
if self.getoffset()!=-256:
assets.cur_script.obs.append(scroll(amtx=-256,amty=0,speed=256))
self.blocking = not vtrue(assets.variables.get("_examine_skipupdate","0"))
self.klefth = self.krighth = self.kuph = self.kdownh = 0
self.screen_setting = "try_bottom"
self.primary = True
def init_normal(self):
subscript("show_court_record_button")
def delete(self):
self.kill = 1
if hasattr(self,"bck"):
self.bck.delete()
if hasattr(self,"scrollbut"):
self.scrollbut.delete()
subscript("hide_court_record_button")
def getoffset(self):
x = [o.pos[0] for o in self.bg]
if not x:
return 0
x.sort()
return x[0]
def screens(self):
screens = 1
xes = [o.pos[0] for o in self.bg]
xes.sort()
smallest = 0
if xes:
smallest = xes[0]
for mx in xes:
x = mx-smallest
if x>=assets.sw and screens<2:
screens = 2
if x>=assets.sw*2 and screens<3:
screens = 3
xes = [o[0] for o in self.regions]
xes.sort()
smallest = 0
if xes:
smallest = xes[0]
for mx in xes:
x = mx-smallest
if x>=assets.sw and screens<2:
screens = 2
if x>=assets.sw*2 and screens<3:
screens = 3
self.width = screens*assets.sw
return screens
def addregion(self,x,y,width,height,label):
reg = [int(x),int(y),int(width),int(height),label]
self.regions.append(reg)
self.highlight()
def highlight(self):
self.selected = [None]
for reg in self.regions:
if self.mx>reg[0]+self.getoffset() and self.my>reg[1] and \
self.mx<reg[0]+self.getoffset()+reg[2] and self.my<reg[1]+reg[3]:
self.selected = reg
return
def draw(self,dest):
self.highlight()
if not assets.variables.get("_examine_use",None):
[dest.blit(o.img,[o.pos[0],o.pos[1]+self.getpos()[1]]) for o in self.bg]
my = self.my+self.getpos()[1]
if vtrue(assets.variables.get("_examine_showcursor", "true")):
if assets.variables.get("_examine_cursor_img","").strip():
spr = sprite(0,0)
spr.load(assets.variables.get("_examine_cursor_img",""))
spr.pos[0] = self.mx-spr.width//2
spr.pos[1] = my-spr.height//2
spr.draw(dest)
else:
col = color_str(assets.variables.get("_examine_cursor_col","FFFFFF"))
pygame.draw.line(dest,col,[0,my],[self.mx-5,my])
pygame.draw.line(dest,col,[self.mx+5,my],[assets.sw,my])
pygame.draw.line(dest,col,[self.mx,self.getpos()[1]],[self.mx,my-5])
pygame.draw.line(dest,col,[self.mx,my+5],[self.mx,self.getpos()[1]+assets.sh])
pygame.draw.rect(dest,col,[[self.mx-5,my-5],[10,10]],1)
if vtrue(assets.variables.get("_examine_showbars", "true")):
dest.blit(self.fg,[0,self.getpos()[1]])
if self.selected != [None] and not self.hide:
dest.blit(self.check,[assets.sw-self.check.get_width(),self.getpos()[1]+assets.sh-self.check.get_height()])
def k_left(self,*args):
self.klefth = 1
def k_right(self,*args):
self.krighth = 1
def k_up(self,*args):
self.kuph = 1
def k_down(self,*args):
self.kdownh = 1
def update(self,*args):
if self.xscrolling:
assets.cur_script.obs.append(scroll(-self.xscrolling,0,speed=16))
self.xscrolling = 0
if hasattr(self,"scrollbut"):
self.scrollbut.delete()
del self.scrollbut
return self.blocking
keys = pygame.key.get_pressed()
spd = 3*assets.dt
print spd
d = [0,0]
if keys[pygame.K_LEFT] or pygame.jsleft():
d[0]-=spd
if keys[pygame.K_RIGHT] or pygame.jsright():
d[0]+=spd
if keys[pygame.K_UP] or pygame.jsup():
d[1]-=spd
if keys[pygame.K_DOWN] or pygame.jsdown():
d[1]+=spd
self.klefth = self.krighth = self.kuph = self.kdownh = 0
self.mx+=d[0]
self.my+=d[1]
if self.mx-5<0: self.mx=5
if self.mx+5>assets.sw: self.mx=assets.sw-5
if self.my-5<0: self.my=5
if self.my+5>assets.sh: self.my=assets.sh-5
if assets.variables.get("_examine_scrolling",None)=="perceive":
def add(x):
x.pos[0]-=d[0]
x.pos[1]-=d[1]
[add(x) for x in self.bg]
x = float(assets.variables.get("_examine_offsetx",0))
y = float(assets.variables.get("_examine_offsety",0))
x-=d[0]
y-=d[1]
assets.variables["_examine_offsetx"] = str(x)
assets.variables["_examine_offsety"] = str(y)
self.highlight()
return False
self.highlight()
if not hasattr(self,"bck") and not self.hide:
self.bck = guiBack()
self.bck.pri = 1000
def k_space(b=self.bck):
self.k_space()
self.bck.k_space = k_space
self.bck.update = lambda *x: False
assets.cur_script.obs.append(self.bck)
scrn = (-self.getoffset()//assets.sw)+1
self.xscroll = None
if scrn<self.screens():
self.xscroll = 1
elif scrn>1:
self.xscroll = -1
if not self.xscroll and hasattr(self,"scrollbut"):
self.scrollbut.delete()
del self.scrollbut
if self.xscroll and not hasattr(self,"scrollbut"):
self.scrollbut = guiScroll(self.xscroll)
self.scrollbut.parent = self
assets.cur_script.obs.append(self.scrollbut)
assets.variables["_xscroll_"+self.name] = self.xscroll
return self.blocking
def enter_down(self):
print self.selected,self.regions,self.mx,self.my
assets.variables["_examine_clickx"] = str(self.mx)
assets.variables["_examine_clicky"] = str(self.my)
print "FAIL ",self.fail
go = self.selected[-1]
if go == None:
go = self.fail
print assets.cur_script,"goto",go
assets.cur_script.goto_result(go,backup=self.fail)
self.delete()
def k_space(self):
if not self.hide:
self.delete()
subscript("sound_examine_menu_cancel")
class evidence_menu(fadesprite,gui.widget):
fail = "none"
def click_down_over(self,mp):
gui.window.focused = self
def hold_down_over(self,mp):
pass
def move_over(self,mp,rel,buttons):
pass
def click_up(self,mp):
mp[1]-=self.getpos()[1]
if self.mode == "overview" and mp[0]>=36 and mp[1]>=62 and mp[0]<=218 and mp[1]<=145:
rx = (218-36)//4
ry = (145-62)//2
sx,sy = self.sx,self.sy
self.sx = (mp[0]-36)/rx
self.sy = (mp[1]-62)/ry
try:
self.choose()
except:
self.sx,self.sy = sx,sy
return
self.back = False
self.back_button.unhighlight()
self.enter_down()
if mp[0]>=0 and mp[0]<=78 and mp[1]>=162 and mp[1]<=192:
if self.canback():
self.back = True
self.enter_down()
self.back = False
self.back_button.unhighlight()
#self.sx,self.sy = [0,0]
if mp[0]>=0 and mp[1]>=56 and mp[0]<=16 and mp[1]<=149:
if self.mode=="overview" and len(self.pages)>1:
self.page_prev()
subscript("sound_court_record_scroll")
if self.mode=="zoomed":
self.k_left()
if mp[0]>=238 and mp[1]>=56 and mp[1]<=149:
if self.mode=="overview" and len(self.pages)>1:
self.page_next()
subscript("sound_court_record_scroll")
if self.mode=="zoomed":
self.k_right()
if mp[0]>=177 and mp[1]<=29:
self.switch = True
self.enter_down()
self.switch = False
check = assets.open_art("general/check"+assets.appendgba)[0]
chk = [assets.sw-check.get_width(),assets.sh-check.get_height()]
if mp[0]>=chk[0] and mp[1]>=chk[1]:
self.do_check()
#~ self.enter_down()
#~ gui.window.over = None
#~ gui.window.focused = None
def load(self,*args,**kwargs):
fadesprite.load(self,*args,**kwargs)
def __init__(self,items=[],gba=True):
subscript("sound_court_record_display")
self.pri = ulayers.index(self.__class__.__name__)
x,y = 0,192
self.z = zlayers.index(self.__class__.__name__)
print "evidence z",self.z
fadesprite.__init__(self,x=x,y=y)
gui.widget.__init__(self,[x,y],[assets.sw,assets.sh])
self.items = items
self.load(assets.variables["ev_mode_bg_evidence"]+assets.appendgba)
self.cursor = assets.open_art(assets.variables["ev_cursor_img"])[0]
self.page = 0
self.sx,self.sy = [0,0]
self.examine = None
self.present = None
self.back_button = guiBack()
self.back_button.pri = 1000
def k_space(b=self.back_button):
b.delete()
self.delete()
assets.variables["_selected"] = "Back"
self.back_button.k_space = k_space
self.back = False
self.switch = False
self.chosen = None
self.mode = "overview" #overview, zoomed, check
if assets.gbamode: self.mode = "zoomed"
self.checking = None
self.ev_zoom = assets.open_art(assets.variables["ev_z_bg"]+assets.appendgba)[0]
self.scroll = 0
self.scroll_dir = 0
self.pages_set = assets.variables.get("_ev_pages","evidence profiles").split(" ")
for p in self.pages_set[:]:
if not vtrue(assets.variables.get("_%s_enabled"%p,"true")):
self.pages_set.remove(p)
if not self.pages_set:
self.delete()
#Loading saved position
self.item_set = assets.variables.get("_cr_current_item_set",self.pages_set[0])
if self.item_set not in self.pages_set:
self.item_set = self.pages_set[0]
self.layout()
if not self.pages:
self.item_set = self.pages_set[0]
self.layout()
self.page = int(assets.variables.get("_cr_current_page",0))
self.sx = int(assets.variables.get("_cr_current_selected_x",0))
self.sy = int(assets.variables.get("_cr_current_selected_y",0))
self.screen_setting = "try_bottom"
self.script = assets.cur_script
self.primary = True
def delete(self):
super(evidence_menu,self).delete()
subscript("hide_present_button2")
def update(self):
self.choose()
#present button
if not hasattr(self,"present_button"):
self.present_button = False
if self.can_present():
if not self.present_button:
subscript("show_present_button2")
self.present_button = True
else:
if self.present_button:
subscript("hide_present_button2")
self.present_button = False
if not getattr(self,"hidden",None):
return True #Don't update anything else
def layout(self):
self.pages = []
lines = []
line = []
for icon in self.items:
icon.reload()
if self.item_set != icon.page:
continue
line.append(icon)
if len(line)==4:
lines.append(line)
line = []
if len(lines)==2:
self.pages.append(lines)
lines = []
if line:
lines.append(line)
if lines:
self.pages.append(lines)
self.page = 0
self.sx = 0
self.sy = 0
def remember_vars(self):
assets.variables.set("_cr_current_item_set",self.item_set)
assets.variables.set("_cr_current_page",self.page)
assets.variables.set("_cr_current_selected_x",self.sx)
assets.variables.set("_cr_current_selected_y",self.sy)
def page_prev(self):
self.page-=1
if self.page<0:
self.page = len(self.pages)-1
page = self.pages[self.page]
self.sy = len(page)-1
self.sx = len(page[self.sy])-1
self.remember_vars()
def page_next(self):
self.page += 1
if self.page>len(self.pages)-1:
self.page = 0
self.sx = 0
page = self.pages[self.page]
if self.sy>len(page)-1:
self.sy = 0
self.remember_vars()
def set_bg(self):
defbg = assets.variables["ev_mode_bg_evidence"]
bg = assets.variables.get("ev_mode_bg_"+self.item_set,defbg)
self.load(bg+assets.appendgba)
def k_left(self):
if self.page>=len(self.pages): return
self.set_bg()
self.back = False
self.back_button.unhighlight()
self.sx-=1
if self.mode == "overview":
if self.sx<0:
self.page_prev()
page = self.pages[self.page]
if self.sy>len(page)-1:
self.sy = 0
while self.sx>len(page[self.sy])-1 and self.sx>0:
self.sx -= 1
subscript("sound_court_record_scroll")
elif self.mode == "zoomed":
if self.sx<0 and self.sy>0:
self.sx = 3
self.sy -= 1
if self.sx<0 and self.sy==0:
self.page_prev()
self.lastchoose = self.chosen_icon
scroll = 0
for p in self.pages:
for line in p:
for icon in line:
scroll += 1
if scroll>1:
self.scroll = 256
self.scroll_dir = 1
subscript("sound_court_record_scroll_zoomed")
self.choose()
def k_right(self):
if self.page>=len(self.pages): return
self.set_bg()
self.back = False
self.back_button.unhighlight()
self.sx+=1
if self.mode == "overview":
page = self.pages[self.page]
if self.sy>len(page)-1 or self.sx>len(page[self.sy])-1:
self.page += 1
if self.page>len(self.pages)-1:
self.page = 0
self.sx = 0
page = self.pages[self.page]
if self.sy>len(page)-1:
self.sy = 0
subscript("sound_court_record_scroll")
elif self.mode == "zoomed":
page = self.pages[self.page]
if self.sx>len(page[self.sy])-1:
self.sy += 1
self.sx = 0
if self.sy>len(page)-1:
self.sx = 0
self.sy = 0
self.page+=1
if self.page>=len(self.pages):
self.page= 0
self.lastchoose = self.chosen_icon
scroll = 0
for p in self.pages:
for line in p:
for icon in line:
scroll += 1
if scroll>1:
self.scroll = 256
self.scroll_dir = -1
subscript("sound_court_record_scroll_zoomed")
self.choose()
def k_up(self):
self.set_bg()
if self.mode == "overview":
if self.sy == 0:
self.switch = True
elif self.sy==1:
self.sy = 0
if self.back:
self.sy = 1
if self.page>=len(self.pages) or self.sy>len(self.pages[self.page])-1:
self.sy = 0
subscript("sound_court_record_scroll")
elif not self.back:
self.switch = True
self.back = False
self.back_button.unhighlight()
self.choose()
def k_down(self):
self.set_bg()
if self.mode == "overview":
self.back = False
self.back_button.unhighlight()
if not self.switch:
self.sy+=1
self.switch = False
if self.page>=len(self.pages) or self.sy>=len(self.pages[self.page]) and self.canback():
self.back = True
self.back_button.highlight()
subscript("sound_court_record_scroll")
elif self.mode == "zoomed":
if self.switch == False and self.canback():
self.back = True
self.back_button.highlight()
self.switch = False
self.choose()
def choose(self):
itback = assets.variables.get("ev_mode_bg_"+self.item_set,None)
if not itback:
itback = assets.variables.get("ev_mode_bg_evidence",None)
if self.back and self.canback():
self.load(itback+"_back"+assets.appendgba)
elif self.switch:
self.load(itback+"_profile"+assets.appendgba)
else:
self.load(itback+assets.appendgba)
if self.back or self.switch or self.page>=len(self.pages):
pass
else:
if self.sy>=len(self.pages[self.page]): self.sy = 0
if self.sx>len(self.pages[self.page][self.sy]): self.sx = len(self.pages[self.page][self.sy])-1
page = self.pages[self.page]
row = page[self.sy]
if self.sx>=len(row): self.sx = len(row)-1
col = row[self.sx]
self.chosen = col.id
self.chosen_icon = col
self.remember_vars()
def enter_down(self):
if self.switch:
self.k_z()
elif self.back and self.canback():
subscript("sound_court_record_cancel")
if self.mode == "overview":
self.delete()
elif self.mode == "zoomed":
if assets.gbamode: self.delete()
else: self.mode = "overview"
elif self.mode == "check":
self.mode = "overview"
else:
if self.mode == "overview":
self.mode = "zoomed"
subscript("sound_court_record_zoom")
elif self.mode == "zoomed":
#make sure we can check first
#self.mode = "check"
#self.mode = "overview"
self.do_check()
def do_check(self):
if not self.chosen: return
assets.variables["_selected"] = self.chosen
chk = assets.variables.get(self.chosen+"_check",None)
if chk:
assets.addscene(chk)
subscript("sound_court_record_check")
def can_present(self):
if not vtrue(assets.variables.get("_"+self.item_set+"_present","true")):
#print "_"+self.item_set+"_present","is false"
return
if not self.chosen:
#print "nothing chosen"
return
if self.back or self.switch:
#print "have back or switch",self.back,self.switch
return
if not assets.cur_script.cross=="proceed":
#print "no cur_script.cross"
return
if not vtrue(assets.variables.get("_allow_present_"+self.item_set,"true")):
#print "_allow_present_"+self.item_set,"is false"
return
if not vtrue(assets.variables.get(self.chosen+"_presentable","true")):
#print self.chosen+"_presentable","is false"
return
return True
def k_x(self):
if not self.can_present(): return
assets.variables["_selected"] = self.chosen
assets.cur_script.cross = "presenting"
self.delete()
for o in assets.cur_script.obs:
if isinstance(o,textbox):
o.delete()
if isinstance(o,uglyarrow):
o.delete()
assets.cur_script.goto_result((self.chosen+" "+assets.cur_script.statement).strip(),backup=self.fail)
def next_screen(self):
if len(self.pages_set)==1:
return ""
cur = self.pages_set.index(self.item_set)
cur += 1
if cur>=len(self.pages_set):
cur = 0
return self.pages_set[cur]
def k_z(self):
if len(self.pages_set)==1:
return
self.chosen = None
self.item_set = self.next_screen()
self.layout()
self.remember_vars()
#if not self.pages: self.item_set = modes[self.item_set]
#self.layout()
self.switch = False
subscript("sound_court_record_switch")
def k_space(self):
subscript("sound_court_record_cancel")
if self.mode=="zoomed":
if assets.gbamode: self.delete()
else: self.mode = "overview"
elif self.canback():
self.delete()
#assets.cur_script.cross = ""
#assets.cur_script.instatement = False
def canback(self):
show_back = vtrue(assets.variables.get("_cr_back_button", "true")) and not getattr(self,"noback",False)
if self.mode!="overview" or show_back:
return True
return False
def draw(self,dest):
if assets.gbamode: self.mode = "zoomed"
pos = self.getpos()
dest.blit(self.img,pos)
x,y=pos
if not assets.gbamode:
if vtrue(assets.variables["ev_show_mode_text"]):
dest.blit(assets.get_image_font("itemset").render(self.item_set.capitalize(),[255,255,255]),
[x+int(assets.variables["ev_mode_x"]),y+int(assets.variables["ev_mode_y"])])
name = ""
if self.chosen:
name = assets.variables.get(self.chosen+"_name",self.chosen).replace("$","")
if not assets.gbamode or self.mode != "zoomed":
dest.blit(assets.get_image_font("itemname").render(name,[255,255,255]),
[x+int(assets.variables["ev_currentname_x"]),y+int(assets.variables["ev_currentname_y"])])
if vtrue(assets.variables.get("_evidence_enabled","true")) and vtrue(assets.variables.get("_profiles_enabled","true")):
dest.blit(assets.get_font("itemset_big").render(
self.next_screen().capitalize(),1,[255,255,255]),
[x+int(assets.variables["ev_modebutton_x"]),y+int(assets.variables["ev_modebutton_y"])])
page = []
if self.pages:
page = self.pages[self.page]
if self.mode != "zoomed":
cx,cy=0,0
sx = pos[0]+int(assets.variables["ev_items_x"])
sy = pos[1]+int(assets.variables["ev_items_y"])
x,y = sx,sy
w = int(assets.variables["ev_spacing_x"])
h = int(assets.variables["ev_spacing_y"])
for line in page:
for icon in line:
icon.reload()
if not icon.small:
continue
dest.blit(icon.small,[x,y])
if not self.back and not self.switch and self.sx == cx and self.sy == cy:
dest.blit(self.cursor,[x-3,y-3])
x+=w
cx+=1
x=sx
cx=0
y+=h
cy+=1
if len(self.pages)>1:
arr = assets.open_art(assets.variables["ev_arrow_img"])[0]
dest.blit(arr,[pos[0]+int(assets.variables["ev_rarrow_x"]),
pos[1]+int(assets.variables["ev_rarrow_y"])])
dest.blit(pygame.transform.flip(arr,1,0),
[pos[0]+int(assets.variables["ev_larrow_x"]),
pos[1]+int(assets.variables["ev_larrow_y"])])
if self.mode == "zoomed":
showarrow = 0
for p in self.pages:
for line in p:
for icon in line:
showarrow += 1
if showarrow>1:
if not getattr(self,"arr",None):
self.arr = assets.open_art(assets.variables["ev_zarrow_img"])[0]
dest.blit(self.arr,[pos[0]+int(assets.variables["ev_zrarrow_x"]),
pos[1]+int(assets.variables["ev_zrarrow_y"])])
dest.blit(pygame.transform.flip(self.arr,1,0),
[pos[0]+int(assets.variables["ev_zlarrow_x"]),
pos[1]+int(assets.variables["ev_zlarrow_y"])])
if getattr(self,"chosen_icon",None) and getattr(self,"chosen",None):
if self.scroll:
self.scroll -= 16*assets.dt
if self.scroll<0:
self.scroll=0
self.draw_ev_zoom(self.lastchoose,[(256-self.scroll+pos[0])*self.scroll_dir,pos[1]],dest)
self.draw_ev_zoom(self.chosen_icon,[256*(-self.scroll_dir)+(256-self.scroll+pos[0])*self.scroll_dir,pos[1]],dest)
else:
self.draw_ev_zoom(self.chosen_icon,pos[:],dest)
chk = assets.variables.get(self.chosen+"_check",None)
if chk:
check = assets.open_art(assets.variables["ev_check_img"]+assets.appendgba)[0]
dest.blit(check,[pos[0]+assets.sw-check.get_width(),pos[1]+assets.sh-check.get_height()])
else:
self.mode = "overview"
if self.canback():
self.back_button.draw(dest)
def draw_ev_zoom(self,icon,pos,surf):
if not hasattr(self,icon.id+"_zoom_"+str(assets.gbamode)):
if assets.gbamode:
back_pos = [28,62]; tbpos = [106,78]; tbsize = [120,50]
else:
back_pos = [int(assets.variables["ev_z_icon_x"]),
int(assets.variables["ev_z_icon_y"])];
tbpos = [int(assets.variables["ev_z_textbox_x"]),
int(assets.variables["ev_z_textbox_y"])];
tbsize = [int(assets.variables["ev_z_textbox_w"]),
int(assets.variables["ev_z_textbox_h"])]
newsurf = assets.Surface([assets.sw,assets.sh]).convert_alpha()
newsurf.fill([0,0,0,0])
if assets.gbamode:
pos[1]-=10
newsurf.blit(self.ev_zoom,[int(assets.variables["ev_z_bg_x"]),
int(assets.variables["ev_z_bg_y"])])
name = icon.name
if assets.gbamode:
newsurf.blit(assets.get_image_font("itemname").render(name,[255,255,0]),[103,65])
tb = textblock(icon.desc,[tbpos[0],tbpos[1]],tbsize,color_str(assets.variables["ev_z_text_col"]))
newsurf.blit(icon.scaled,[back_pos[0],back_pos[1]])
tb.pos = [tbpos[0],tbpos[1]]
tb.draw(newsurf)
setattr(self,icon.id+"_zoom_"+str(assets.gbamode),newsurf)
else:
newsurf = getattr(self,icon.id+"_zoom_"+str(assets.gbamode))
surf.blit(newsurf,pos)
class textblock(sprite):
autoclear = True
def __init__(self,text="",pos=[0,0],size=[100,100],color=[255,255,255],surf=None):
super(textblock,self).__init__()
self.text = text
self.lines = [x.split(" ") for x in text.split("{n}")]
self.pos = pos
self.size = size
self.surf = surf
self.color = color
self.width,self.height = self.size
def update(self):
pass
def draw(self,dest):
x = self.pos[0]
y = self.pos[1]
i = 0
for line in self.lines:
for word in line:
nl = False
if word.strip():
wordi = assets.get_font("block").render(word,1,self.color)
else:
wordi = pygame.Surface([4,10]).convert_alpha()
wordi.fill([0,0,0,0])
if wordi.get_width()+x>self.pos[0]+self.size[0] or nl:
x = self.pos[0]
y += 10
if y>self.pos[1]+self.size[1]:
break
dest.blit(wordi,[x,y])
x += wordi.get_width()+4
i += 1
x = self.pos[0]
y += 10
class waitenter(sprite):
def __init__(self):
super(waitenter,self).__init__()
self.pri = ulayers.index(self.__class__.__name__)
def draw(self,dest): pass
def update(self):
return True
def enter_down(self):
self.delete()
class delay(sprite):
def __init__(self,ticks=1):
super(delay,self).__init__()
self.ticks = abs(ticks)
self.pri = ulayers.index(self.__class__.__name__)
def draw(self,dest): pass
def update(self):
if self.ticks<=0:
self.delete()
return False
self.ticks-=assets.dt
return True
class timer(sprite):
def __init__(self,ticks=1,run=None):
sprite.__init__(self)
self.ticks = abs(ticks)
self.pri = ulayers.index(self.__class__.__name__)
self.run = run
#self.script = assets.cur_script
def update(self):
self.ticks-=assets.dt
assets.variables["_timer_value_"+self.run] = str(self.ticks)
if self.ticks<=0:
self.delete()
if self.run:
subscript(self.run)
class effect(object):
id_name = "_effect_"
def __init__(self):
self.z = zlayers.index(self.__class__.__name__)
def delete(self):
self.kill = 1
class scroll(effect):
def __init__(self,amtx=1,amty=1,amtz=1,speed=1,wait=1,filter="top",ramp=-.005):
super(scroll,self).__init__()
self.aamtx,self.aamty,self.aamtz = amtx,amty,amtz
self.amtx = abs(amtx)
self.amty = abs(amty)
self.amtz = abs(amtz)
self.ramp = ramp
self.dx=self.dy=self.dz=0
if amtx==0 and amty:
self.dy=amty/abs(amty)
elif amty==0 and amtx:
self.dx = amtx/abs(amtx)
elif amty==amtx==0:
pass
else:
slope = amty/float(amtx)
if abs(amtx)>abs(amty):
self.dx = amtx/abs(amtx)
self.dy = amty/float(abs(amtx))
elif abs(amty)>abs(amtx):
self.dx = amtx/float(abs(amty))
self.dy = amty/abs(amty)
else:
self.dx=amtx/abs(amtx)
self.dy=amty/abs(amty)
if amtz:
self.dz = (amtz)/abs(amtz)*speed
self.dx*=speed
self.dy*=speed
self.pri = ulayers.index(self.__class__.__name__)
self.speed = speed
self.obs = assets.cur_script.obs
self.filter = filter
self.wait = wait
self.initialized = False
if not self.amtx and not self.amty and not self.amtz:
self.wait = False
def draw(self,dest): pass
def init_scroll(self):
if self.initialized:
return
self.initialized = True
self.stuff = []
for o in self.obs:
if not hasattr(o,"pos"):
continue
if self.filter=="top" and o.pos[1]>=192:
continue
if self.filter=="bottom" and o.pos[1]<192:
continue
d = {"ob":o,"start":None,"end":None}
if hasattr(o,"pos"):
d["start"] = o.pos[:]
d["end"] = [int(o.pos[0]+self.aamtx),int(o.pos[1]+self.aamty)]
self.stuff.append(d)
def update(self):
self.init_scroll()
ndx,ndy,ndz = self.dx*assets.dt,self.dy*assets.dt,self.dz*assets.dt
#print "before - ndx:",ndx,"self.amtx:",self.amtx
self.amtx-=abs(ndx)
#print "after self.amtx:",self.amtx
if self.amtx<0:
ndx+=self.amtx*(self.dx/abs(self.dx))
print "self.amtx<0 ndx:",ndx
self.amtx=0
self.amty-=abs(ndy)
if self.amty<0:
ndy+=self.amty*(self.dy/abs(self.dy))
self.amty=0
self.amtz-=abs(ndz)
if self.amtz<0:
ndz+=self.amtz*(self.dz/abs(self.dz))
self.amtz=0
for d in self.stuff:
if getattr(d["ob"],"kill",0): continue
if d["start"]:
d["ob"].pos[0]+=ndx
d["ob"].pos[1]+=ndy
if isinstance(d["ob"],mesh):
d["ob"].trans(z=ndz)
#Ramp not really working here
#self.dx+=self.ramp*ndx
#self.dy+=self.ramp*ndy
#self.dz+=self.ramp*ndz
if self.amtx<=0 and self.amty<=0 and self.amtz<=0:
for d in self.stuff:
if d["end"]:
d["ob"].pos[:] = d["end"]
self.delete()
return False
if self.wait:
return True
def control_last(self):
for o in reversed(assets.cur_script.obs):
if hasattr(o,"pos") and not getattr(o,"kill",0):
self.obs = [o]
return
if vtrue(assets.variables.get("_debug","false")):
raise missing_object("Scroll: no objects found to scroll")
def control(self,name):
self.filter = None
for o in reversed(assets.cur_script.obs):
if getattr(o,"id_name",None)==name:
self.obs = [o]
return
if vtrue(assets.variables.get("_debug","false")):
raise missing_object("Scroll: no object named "+str(name)+" found")
class zoomanim(effect):
def __init__(self,mag=1,frames=1,wait=1,name=None):
super(zoomanim,self).__init__()
self.mag=mag
self.pri = ulayers.index(self.__class__.__name__)
self.frames = frames
self.obs = assets.cur_script.obs
self.mag_per_frame = float(self.mag)/float(self.frames)
self.wait = wait
self.kill = 0
def draw(self,dest): pass
def update(self):
if self.kill: return False
self.frames -= assets.dt
if self.frames <= 0:
self.delete()
for o in self.obs:
if getattr(o,"kill",0): continue
if hasattr(o,"dim"):
o.dim += self.mag_per_frame*assets.dt
if self.wait:
return True
def control_last(self):
for o in reversed(assets.cur_script.obs):
if hasattr(o,"pos") and not getattr(o,"kill",0):
self.obs = [o]
return
if vtrue(assets.variables.get("_debug","false")):
raise missing_object("zoom: no objects found to zoom")
def control(self,name):
self.filter = None
for o in reversed(assets.cur_script.obs):
if getattr(o,"id_name",None)==name:
self.obs = [o]
return
if vtrue(assets.variables.get("_debug","false")):
raise missing_object("zoom: no object named "+str(name)+" found")
class rotateanim(effect):
def __init__(self,axis="z",degrees=90,speed=1,wait=1,name=None,obs=[]):
super(rotateanim,self).__init__()
self.axis = {"x":0,"y":1,"z":2,0:0,1:1,2:2}[axis]
self.degrees = degrees
self.pri = ulayers.index(self.__class__.__name__)
self.speed = speed
self.obs = obs
self.wait = wait
self.kill = 0
if name:
self.obs = [o for o in self.obs if getattr(o,"id_name",None)==name]
if not self.obs and vtrue(assets.variables.get("_debug","false")):
raise missing_object("rotate: no object named "+str(name)+" found")
def draw(self,dest): pass
def update(self):
if self.kill: return False
amt = self.speed*assets.dt
if self.degrees>0:
self.degrees-=amt
if self.degrees<=0:
self.delete()
amt+=self.degrees
amt = -amt
elif self.degrees<0:
self.degrees+=amt
if self.degrees>=0:
self.delete()
amt-=self.degrees
else:
self.delete()
for o in self.obs:
if getattr(o,"kill",0): continue
if hasattr(o,"rot"):
o.rot[self.axis] += amt
if hasattr(o,"rotate"):
o.rotate(self.axis,amt)
if self.wait:
return True
class fadeanim(effect):
def __init__(self,start=0,end=100,speed=1,wait=1,name=None,obs=[]):
super(fadeanim,self).__init__()
self.start = start
self.end = end
self.pri = ulayers.index(self.__class__.__name__)
self.speed = speed
self.obs = obs
self.wait = wait
self.kill = 0
if name:
self.obs = [o for o in self.obs if getattr(o,"id_name",None)==name]
if not self.obs and vtrue(assets.variables.get("_debug","false")):
raise missing_object("fade: no object named "+str(name)+" found")
self.update()
def draw(self,dest): pass
def update(self):
if self.kill: return False
amt = self.speed*assets.dt
if self.start<self.end:
self.start+=amt
if self.start>self.end:
amt -= (self.start-self.end)
self.delete()
elif self.start>self.end:
self.start-=amt
if self.start<self.end:
amt-=(self.end-self.start)
self.delete()
else:
self.delete()
for o in self.obs:
if getattr(o,"kill",0): continue
if hasattr(o,"setfade"):
o.setfade(int((self.start/100.0)*255.0))
if self.wait:
return True
#~ invert = 0
#~ tint = None
#~ greyscale = 1
class tintanim(effect):
def __init__(self,start="ffffff",end="000000",speed=1,wait=1,name=None,obs=[]):
super(tintanim,self).__init__()
self.start = color_str(start)
self.end = color_str(end)
self.pri = ulayers.index(self.__class__.__name__)
self.speed = speed
self.obs = obs
self.wait = wait
self.kill = 0
if name:
print name
self.obs = [o for o in self.obs if getattr(o,"id_name",None)==name]
print self.obs
if not self.obs and vtrue(assets.variables.get("_debug","false")):
raise missing_object("tint: no object named "+str(name)+" found")
self.update()
def draw(self,dest): pass
def update(self):
if self.kill: return False
amt = self.speed*assets.dt
col = self.start
done = 0
for r in range(3):
if col[r]<self.end[r]:
col[r]+=amt
if col[r]>self.end[r]:
amt -= (col[r]-self.end[r])
done+=1
elif col[r]>self.end[r]:
col[r]-=amt
if col[r]<self.end[r]:
amt-=(self.end[r]-col[r])
done+=1
else:
done+=1
if done==3:
self.delete()
for o in self.obs:
if getattr(o,"kill",0): continue
if hasattr(o,"setfade"):
o.tint = [x/255.0 for x in col]
if self.wait:
return True
class invertanim(effect):
def __init__(self,start=0,end=1,speed=1,wait=0,name=None,obs=[],**kw):
super(invertanim,self).__init__()
self.start = start
self.end = end
self.pri = ulayers.index("fadeanim")
self.speed = speed
self.obs = obs
self.wait = wait
self.kill = 0
if name:
self.obs = [o for o in self.obs if getattr(o,"id_name",None)==name]
if not self.obs and vtrue(assets.variables.get("_debug","false")):
raise missing_object("invert: no object named "+str(name)+" found")
self.update()
def draw(self,dest): pass
def update(self):
if self.kill: return False
amt = self.speed
if self.start<self.end:
self.start+=amt
if self.start>self.end:
amt -= (self.start-self.end)
self.delete()
elif self.start>self.end:
self.start-=amt
if self.start<self.end:
amt-=(self.end-self.start)
self.delete()
else:
self.delete()
for o in self.obs:
if getattr(o,"kill",0): continue
if hasattr(o,"setfade"):
o.invert = self.start
if self.wait:
return True
class greyscaleanim(effect):
def __init__(self,start=0,end=1,speed=1,wait=0,name=None,obs=[],**kw):
super(greyscaleanim,self).__init__()
self.start = start
self.end = end
self.pri = ulayers.index("fadeanim")
self.speed = speed
self.obs = obs
self.wait = wait
self.kill = 0
if name:
self.obs = [o for o in self.obs if getattr(o,"id_name",None)==name]
if not self.obs and vtrue(assets.variables.get("_debug","false")):
raise missing_object("greyscale: no object named "+str(name)+" found")
self.update()
def draw(self,dest): pass
def update(self):
if self.kill: return False
amt = self.speed
if self.start<self.end:
self.start+=amt
if self.start>self.end:
amt -= (self.start-self.end)
self.delete()
elif self.start>self.end:
self.start-=amt
if self.start<self.end:
amt-=(self.end-self.start)
self.delete()
else:
self.delete()
for o in self.obs:
if getattr(o,"kill",0): continue
if hasattr(o,"setfade"):
o.greyscale = self.start
if self.wait:
return True
class flash(effect):
def __init__(self):
super(flash,self).__init__()
self.pri = ulayers.index(self.__class__.__name__)
self.ttl = 5
self.color = [255,255,255]
self.surf = pygame.Surface(pygame.screen.get_size())
if vtrue(assets.variables.get("_flash_sound","false")):
assets.play_sound("Slash.ogg")
def draw(self,dest):
self.surf.fill(self.color)
dest.blit(self.surf,[0,0])
def update(self):
self.ttl -= assets.dt
if self.ttl<=0: self.delete()
return True
class shake(effect):
def __init__(self,screen_setting="top"):
super(shake,self).__init__()
self.pri = ulayers.index(self.__class__.__name__)
self.ttl = 15
self.offset = 15
if vtrue(assets.variables.get("_shake_sound","false")):
assets.play_sound("Shock.ogg")
self.wait = True
self.screen_setting = screen_setting
def draw(self,dest):
o = int(self.offset)
if self.screen_setting == "top":
dest.subsurface([[0,0],[256,192]]).blit(dest.subsurface([[0,0],[256,192]]).copy(),[random.randint(-o,o),random.randint(-o,o)])
elif self.screen_setting == "both":
dest.blit(dest.copy(),[random.randint(-o,o),random.randint(-o,o)])
def update(self):
self.offset -= abs(self.offset / self.ttl)
if self.offset < 1: self.offset = 1
self.ttl -= assets.dt
if self.ttl<=0: self.delete()
return self.wait
class guiBack(sprite,gui.widget):
def click_down_over(self,mp):
self.k_space()
def __init__(self,image=None,x=None,y=None,z=None,name=None):
sprite.__init__(self)
gui.widget.__init__(self)
self.pri = ulayers.index(self.__class__.__name__)
if not image:
image = "general/back"
self.image = image
self.unhighlight()
self.pos = [0,assets.sh+assets.sh-self.img.get_height()]
if x is not None:
self.pos[0] = x
if y is not None:
self.pos[1] = y
if z is not None:
self.z = z
if name is not None:
self.id_name = name
gui.widget.__init__(self,self.pos,self.img.get_size())
self.screen_setting = "try_bottom"
def highlight(self):
self.load(self.image+"_high"+assets.appendgba)
def unhighlight(self):
self.load(self.image+assets.appendgba)
def k_space(self):
self.delete()
subscript("sound_back_button_cancel")
def update(self):
return True
class guiScroll(sprite,gui.widget):
def click_down_over(self,mp):
self.k_z()
def __init__(self,direction):
sprite.__init__(self,flipx=direction+1)
gui.widget.__init__(self)
self.pri = ulayers.index(self.__class__.__name__)
self.load("general/examine_scroll")
self.pos = [assets.sw//2-self.img.get_width()//2,assets.sh*2-self.img.get_height()]
gui.widget.__init__(self,self.pos,self.img.get_size())
self.direction = direction
self.screen_setting = "try_bottom"
def k_z(self):
self.delete()
self.parent.xscrolling = self.direction*assets.sw
subscript("sound_examine_scroll")
#del self.parent.scrollbut
def update(self):
return False
class guiWait(sprite):
id_name = "_guiWait_"
def __init__(self,run=None, mute=False):
sprite.__init__(self)
gui.widget.__init__(self)
self.width = 0
self.height = 0
self.pri = ulayers.index(self.__class__.__name__)
self.pos = [0,0]
self.run = run
self.script = assets.cur_script
self.mute = mute #Mute sound while waiting
if self.mute:
assets.pause_sound()
def delete(self):
self.kill = 1
if self.mute:
assets.resume_sound()
def update(self):
if self.run:
ns = self.script.execute_macro(self.run)
return True
class saved(fadesprite):
def __init__(self,ticks=150,text="Saving...",block=True):
super(saved,self).__init__()
self.id_name = "_saved_"
self.text = text
self.ticks = abs(ticks)
self.start = self.ticks
self.pri = ulayers.index(self.__class__.__name__)
self.pos[0]=0
self.pos[1]=0
self.block = block
self.width=0
self.height=0
def draw(self,dest):
txt1 = assets.get_font("itemset_big").render(self.text,1,[230,230,230])
txt2 = assets.get_font("itemset_big").render(self.text,1,[30,30,30])
txt2 = pygame.transform.scale(txt2,[txt2.get_width()-4,txt2.get_height()-4])
dest.blit(txt1,self.pos)
dest.blit(txt2,[self.pos[0]+2,self.pos[1]+2])
def update(self):
if self.ticks<=0:
self.delete()
return False
self.ticks-=assets.dt
return self.block
class error_msg(gui.pane):
def __repr__(self):
return self.msg
def delete(self):
self.kill = 1
def click_down_over(self,mp):
self.delete()
def __init__(self,msg,line,lineno,script):
self.id_name = "_error_msg_"
self.pri = ulayers.index(self.__class__.__name__)
self.z = zlayers.index(self.__class__.__name__)
gui.pane.__init__(self)
msg+="\nscene:'"+script.scene+"', line '"+str(lineno)+"'"
msg+="\ncurrent game:"+assets.game
self.msg = msg
msg_lines = [""]
for c in msg:
msg_lines[-1]+=c
if (len(msg_lines[-1])>35 or c == "\n") and msg_lines[-1].strip():
if len(msg_lines[-1])>35:
msg_lines[-1]+=" - "
msg_lines.append("")
msg_lines.append(" Click to continue ")
for msg_line in msg_lines:
msg = gui.editbox(None,msg_line)
msg.draw(assets.Surface([64,64]))
msg.draw_back=False
def click_down_over(self,*args):
pass
msg.click_down_over = click_down_over
msg.event = click_down_over
msg.width = 0
self.children.append(msg)
self.lineno = lineno
self.script = script
self.width=256
self.height=len(msg_lines)*20
if vtrue(assets.variables.get("_production","false")):
self.delete()
def update(self):
return True
class script_code(gui.pane):
def __repr__(self):
return self.msg
def delete(self):
self.kill = 1
def __init__(self,script):
gui.pane.__init__(self)
self.rpos = [0,0]
self.width = 256
self.height = 192
self.pri = -10000
self.z = 10000
self.children.append(gui.label(script.scene))
self.lines = gui.scrollpane([10,20])
self.lines.rpos = [0,40]
self.lines.width = 240
self.lines.height = 140
self.children.append(self.lines)
scroll_i = None
for i,line in enumerate(script.scriptlines+["END OF SCRIPT"]):
color = [0,0,0]
text = line
if i==script.si:
color = [255,0,0]
text = "> "+line
line = gui.label(text)
line.textcol = color
if i==script.si:
scroll_i = line
self.lines.pane.children.append(line)
if scroll_i:
self.lines.updatescroll()
self.lines.scroll_to_object(scroll_i)
self.children.append(gui.button(self,"delete"))
def update(self):
return True
class movie:
def __init__(self,name,sound=None):
self.movie = assets.open_movie(name)
self.movie.set_volume(0)
self.size = self.movie.get_size()
self.surf = pygame.Surface(self.size).convert()
self.movie.set_display(self.surf)
self.sound = sound
if sound:
self.sound = assets.play_sound(sound)
self.pri = ulayers.index(self.__class__.__name__)
self.z = zlayers.index(self.__class__.__name__)
self.paused = 0
self.id_name = name
def delete(self):
self.kill = 1
def update(self):
self.paused = 0
self.movie.play()
if self.sound:
self.sound.unpause()
if self.movie.get_busy(): return True
del self.movie
self.delete()
if self.sound:
self.sound.stop()
def draw(self,dest):
dest.blit(self.surf,[0,0])
if not self.paused:
self.paused = 1
elif self.paused == 1:
if self.movie.get_busy():
self.movie.pause()
if self.sound:
self.sound.pause()
|
crxtrdude/pywright
|
core/core.py
|
Python
|
bsd-3-clause
| 165,103
|
from copy import copy
from datetime import datetime
import json
import string
from django.conf import settings
from django.db import models
import commonware.log
import jinja2
from tower import ugettext as _
from uuidfield.fields import UUIDField
import amo
import amo.models
from access.models import Group
from addons.models import Addon
from bandwagon.models import Collection
from reviews.models import Review
from tags.models import Tag
from translations.fields import save_signal, TranslatedField
from users.helpers import user_link
from users.models import UserProfile
from versions.models import Version
log = commonware.log.getLogger('devhub')
table_name = lambda n: n + settings.LOG_TABLE_SUFFIX
class RssKey(models.Model):
key = UUIDField(db_column='rsskey', auto=True, unique=True)
addon = models.ForeignKey(Addon, null=True, unique=True)
user = models.ForeignKey(UserProfile, null=True, unique=True)
created = models.DateField(default=datetime.now)
class Meta:
db_table = 'hubrsskeys'
class BlogPost(amo.models.ModelBase):
title = models.CharField(max_length=255)
date_posted = models.DateField(default=datetime.now)
permalink = models.CharField(max_length=255)
class Meta:
db_table = 'blogposts'
class HubPromo(amo.models.ModelBase):
VISIBILITY_CHOICES = (
(0, 'Nobody'),
(1, 'Visitors'),
(2, 'Developers'),
(3, 'Visitors and Developers'),
)
heading = TranslatedField()
body = TranslatedField()
visibility = models.SmallIntegerField(choices=VISIBILITY_CHOICES)
class Meta:
db_table = 'hubpromos'
def __unicode__(self):
return unicode(self.heading)
def flush_urls(self):
return ['*/developers*']
models.signals.pre_save.connect(save_signal, sender=HubPromo,
dispatch_uid='hubpromo_translations')
class HubEvent(amo.models.ModelBase):
name = models.CharField(max_length=255, default='')
url = models.URLField(max_length=255, default='')
location = models.CharField(max_length=255, default='')
date = models.DateField(default=datetime.now)
class Meta:
db_table = 'hubevents'
def __unicode__(self):
return self.name
def flush_urls(self):
return ['*/developers*']
class AddonLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by addon.
"""
addon = models.ForeignKey(Addon)
activity_log = models.ForeignKey('ActivityLog')
class Meta:
db_table = table_name('log_activity_addon')
ordering = ('-created',)
class CommentLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by comment.
"""
activity_log = models.ForeignKey('ActivityLog')
comments = models.CharField(max_length=255)
class Meta:
db_table = table_name('log_activity_comment')
ordering = ('-created',)
class VersionLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by version.
"""
activity_log = models.ForeignKey('ActivityLog')
version = models.ForeignKey(Version)
class Meta:
db_table = table_name('log_activity_version')
ordering = ('-created',)
class UserLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by user.
Note: This includes activity performed unto the user.
"""
activity_log = models.ForeignKey('ActivityLog')
user = models.ForeignKey(UserProfile)
class Meta:
db_table = table_name('log_activity_user')
ordering = ('-created',)
class GroupLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by access group.
"""
activity_log = models.ForeignKey('ActivityLog')
group = models.ForeignKey(Group)
class Meta:
db_table = table_name('log_activity_group')
ordering = ('-created',)
class ActivityLogManager(amo.models.ManagerBase):
def for_addons(self, addons):
if isinstance(addons, Addon):
addons = (addons,)
vals = (AddonLog.objects.filter(addon__in=addons)
.values_list('activity_log', flat=True))
if vals:
return self.filter(pk__in=list(vals))
else:
return self.none()
def for_version(self, version):
vals = (VersionLog.objects.filter(version=version)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_group(self, group):
return self.filter(grouplog__group=group)
def for_user(self, user):
vals = (UserLog.objects.filter(user=user)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_developer(self):
return self.exclude(action__in=amo.LOG_ADMINS + amo.LOG_HIDE_DEVELOPER)
def admin_events(self):
return self.filter(action__in=amo.LOG_ADMINS)
def editor_events(self):
return self.filter(action__in=amo.LOG_EDITORS)
def review_queue(self):
qs = self._by_type()
return (qs.filter(action__in=amo.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID))
def total_reviews(self, theme=False):
"""Return the top users, and their # of reviews."""
qs = self._by_type()
return (qs.values('user', 'user__display_name', 'user__username')
.filter(action__in=([amo.LOG.THEME_REVIEW.id] if theme
else amo.LOG_REVIEW_QUEUE))
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def monthly_reviews(self, theme=False):
"""Return the top users for the month, and their # of reviews."""
qs = self._by_type()
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
return (qs.values('user', 'user__display_name', 'user__username')
.filter(created__gte=created_date,
action__in=([amo.LOG.THEME_REVIEW.id] if theme
else amo.LOG_REVIEW_QUEUE))
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def user_approve_reviews(self, user):
qs = self._by_type()
return qs.filter(action__in=amo.LOG_REVIEW_QUEUE, user__id=user.id)
def current_month_user_approve_reviews(self, user):
now = datetime.now()
ago = datetime(now.year, now.month, 1)
return self.user_approve_reviews(user).filter(created__gte=ago)
def user_position(self, values_qs, user):
try:
return next(i for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id) + 1
except StopIteration:
return None
def total_reviews_user_position(self, user, theme=False):
return self.user_position(self.total_reviews(theme), user)
def monthly_reviews_user_position(self, user, theme=False):
return self.user_position(self.monthly_reviews(theme), user)
def _by_type(self):
qs = super(ActivityLogManager, self).get_query_set()
table = table_name('log_activity_addon')
return qs.extra(
tables=[table],
where=['%s.activity_log_id=%s.id'
% (table, table_name('log_activity'))])
class SafeFormatter(string.Formatter):
"""A replacement for str.format that escapes interpolated values."""
def get_field(self, *args, **kw):
# obj is the value getting interpolated into the string.
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(amo.models.ModelBase):
TYPES = sorted([(value.id, key) for key, value in amo.LOG.items()])
user = models.ForeignKey('users.UserProfile', null=True)
action = models.SmallIntegerField(choices=TYPES, db_index=True)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = table_name('log_activity')
ordering = ('-created',)
def f(self, *args, **kw):
"""Calls SafeFormatter.format and returns a Markup string."""
# SafeFormatter escapes everything so this is safe.
return jinja2.Markup(self.formatter.format(*args, **kw))
@property
def arguments(self):
try:
# d is a structure:
# ``d = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
d = json.loads(self._arguments)
except:
log.debug('unserializing data from addon_log failed: %s' % self.id)
return None
objs = []
for item in d:
# item has only one element.
model_name, pk = item.items()[0]
if model_name in ('str', 'int', 'null'):
objs.append(pk)
else:
(app_label, model_name) = model_name.split('.')
model = models.loading.get_model(app_label, model_name)
# Cope with soft deleted models.
if hasattr(model, 'with_deleted'):
objs.extend(model.with_deleted.filter(pk=pk))
else:
objs.extend(model.objects.filter(pk=pk))
return objs
@arguments.setter
def arguments(self, args=[]):
"""
Takes an object or a tuple of objects and serializes them and stores it
in the db as a json string.
"""
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, basestring):
serialize_me.append({'str': arg})
elif isinstance(arg, (int, long)):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
# Instead of passing an addon instance you can pass a tuple:
# (Addon, 3) for Addon with pk=3
serialize_me.append(dict(((unicode(arg[0]._meta), arg[1]),)))
else:
serialize_me.append(dict(((unicode(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return amo.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = amo.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
# We need to copy arguments so we can remove elements from it
# while we loop over self.arguments.
arguments = copy(self.arguments)
addon = None
review = None
version = None
collection = None
tag = None
group = None
for arg in self.arguments:
if isinstance(arg, Addon) and not addon:
addon = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
if isinstance(arg, Review) and not review:
review = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), _('Review'))
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = _('Version {0}')
version = self.f(u'<a href="{1}">%s</a>' % text,
arg.version, arg.get_url_path())
arguments.remove(arg)
if isinstance(arg, Collection) and not collection:
collection = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.tag_text)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
user = user_link(self.user)
try:
kw = dict(addon=addon, review=review, version=version,
collection=collection, tag=tag, user=user, group=group)
return self.f(format, *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __unicode__(self):
return self.to_string()
def __html__(self):
return self
class SubmitStep(models.Model):
addon = models.ForeignKey(Addon)
step = models.IntegerField()
class Meta:
db_table = 'submit_step'
|
SuriyaaKudoIsc/olympia
|
apps/devhub/models.py
|
Python
|
bsd-3-clause
| 13,462
|
#!/usr/bin/env python
"""Module containing definition of enum class PortDistributionType."""
from enum import Enum
class PortDistributionType(Enum):
"""
Enum for every possible distribution type of port values.
"""
SPAR = 0
"""Source arbitrary port ranges distribution."""
SPEM = 1
"""Source exact match ports distribution."""
DPAR = 2
"""Destination arbitrary port ranges distribution."""
DPEM = 3
"""Destination exact match ports distribution."""
|
classbench-ng/classbench-ng
|
lib/tuples_analyzer/calculation_parameters/enums/port_distribution_type.py
|
Python
|
gpl-3.0
| 497
|
# -*- coding: utf-8 -*-
# Django
from django.views.generic import View, TemplateView
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
# CRMA
from .models import Subscription, EmailScheduler
from .models import cancel_subscription
from .utils import decode_id
class UnsubscribeCompleted(TemplateView):
template_name = 'crma/unsubscribe_completed.html'
class UnsubscribeView(View):
success_url = 'crma_unsubscribe_completed'
def get(self, request, *args, **kwargs):
key = kwargs['key'].lower()
subs_info = get_object_or_404(Subscription, unsubscribe_key=key)
cancel_subscription(subs_info)
return redirect(reverse(self.success_url))
class ViewWebMail(View):
template_name = 'crma/mail_viewer.html'
def get(self, request, *args, **kwargs):
# Read the schedule item
scheduler_id = decode_id(kwargs['scheduler_id'])
token = kwargs['scheduler_token']
item = get_object_or_404(EmailScheduler, id=scheduler_id)
return item.render(request, token=token, template=self.template_name)
|
emencia/emencia-crma
|
crma/views.py
|
Python
|
gpl-2.0
| 1,158
|
#-*-coding=utf-8-*-
class SupportEncodings(object):
"""
Given the support encoding of piconv
"""
supports = []
def __init__(self):
self.supports = ['ASCII','UTF-8','UTF-16','UTF-32',\
'BIG5','GBK','GB2312','GB18030','EUC-JP', 'SHIFT_JIS', 'ISO-2022-JP'\
'WINDOWS-1252']
def get_support_encodings(self):
return self.supports
def get_all_coded_character_set(self):
return ['']
"""
437, 500, 500V1, 850, 851, 852, 855, 856, 857, 860, 861, 862, 863, 864, 865,
866, 866NAV, 869, 874, 904, 1026, 1046, 1047, 8859_1, 8859_2, 8859_3, 8859_4,
8859_5, 8859_6, 8859_7, 8859_8, 8859_9, 10646-1:1993, 10646-1:1993/UCS4,
ANSI_X3.4-1968, ANSI_X3.4-1986, ANSI_X3.4, ANSI_X3.110-1983, ANSI_X3.110,
ARABIC, ARABIC7, ARMSCII-8, ASCII, ASMO-708, ASMO_449, BALTIC, BIG-5,
BIG-FIVE, BIG5-HKSCS, BIG5, BIG5HKSCS, BIGFIVE, BRF, BS_4730, CA, CN-BIG5,
CN-GB, CN, CP-AR, CP-GR, CP-HU, CP037, CP038, CP273, CP274, CP275, CP278,
CP280, CP281, CP282, CP284, CP285, CP290, CP297, CP367, CP420, CP423, CP424,
CP437, CP500, CP737, CP770, CP771, CP772, CP773, CP774, CP775, CP803, CP813,
CP819, CP850, CP851, CP852, CP855, CP856, CP857, CP860, CP861, CP862, CP863,
CP864, CP865, CP866, CP866NAV, CP868, CP869, CP870, CP871, CP874, CP875,
CP880, CP891, CP901, CP902, CP903, CP904, CP905, CP912, CP915, CP916, CP918,
CP920, CP921, CP922, CP930, CP932, CP933, CP935, CP936, CP937, CP939, CP949,
CP950, CP1004, CP1008, CP1025, CP1026, CP1046, CP1047, CP1070, CP1079,
CP1081, CP1084, CP1089, CP1097, CP1112, CP1122, CP1123, CP1124, CP1125,
CP1129, CP1130, CP1132, CP1133, CP1137, CP1140, CP1141, CP1142, CP1143,
CP1144, CP1145, CP1146, CP1147, CP1148, CP1149, CP1153, CP1154, CP1155,
CP1156, CP1157, CP1158, CP1160, CP1161, CP1162, CP1163, CP1164, CP1166,
CP1167, CP1250, CP1251, CP1252, CP1253, CP1254, CP1255, CP1256, CP1257,
CP1258, CP1282, CP1361, CP1364, CP1371, CP1388, CP1390, CP1399, CP4517,
CP4899, CP4909, CP4971, CP5347, CP9030, CP9066, CP9448, CP10007, CP12712,
CP16804, CPIBM861, CSA7-1, CSA7-2, CSASCII, CSA_T500-1983, CSA_T500,
CSA_Z243.4-1985-1, CSA_Z243.4-1985-2, CSA_Z243.419851, CSA_Z243.419852,
CSDECMCS, CSEBCDICATDE, CSEBCDICATDEA, CSEBCDICCAFR, CSEBCDICDKNO,
CSEBCDICDKNOA, CSEBCDICES, CSEBCDICESA, CSEBCDICESS, CSEBCDICFISE,
CSEBCDICFISEA, CSEBCDICFR, CSEBCDICIT, CSEBCDICPT, CSEBCDICUK, CSEBCDICUS,
CSEUCKR, CSEUCPKDFMTJAPANESE, CSGB2312, CSHPROMAN8, CSIBM037, CSIBM038,
CSIBM273, CSIBM274, CSIBM275, CSIBM277, CSIBM278, CSIBM280, CSIBM281,
CSIBM284, CSIBM285, CSIBM290, CSIBM297, CSIBM420, CSIBM423, CSIBM424,
CSIBM500, CSIBM803, CSIBM851, CSIBM855, CSIBM856, CSIBM857, CSIBM860,
CSIBM863, CSIBM864, CSIBM865, CSIBM866, CSIBM868, CSIBM869, CSIBM870,
CSIBM871, CSIBM880, CSIBM891, CSIBM901, CSIBM902, CSIBM903, CSIBM904,
CSIBM905, CSIBM918, CSIBM921, CSIBM922, CSIBM930, CSIBM932, CSIBM933,
CSIBM935, CSIBM937, CSIBM939, CSIBM943, CSIBM1008, CSIBM1025, CSIBM1026,
CSIBM1097, CSIBM1112, CSIBM1122, CSIBM1123, CSIBM1124, CSIBM1129, CSIBM1130,
CSIBM1132, CSIBM1133, CSIBM1137, CSIBM1140, CSIBM1141, CSIBM1142, CSIBM1143,
CSIBM1144, CSIBM1145, CSIBM1146, CSIBM1147, CSIBM1148, CSIBM1149, CSIBM1153,
CSIBM1154, CSIBM1155, CSIBM1156, CSIBM1157, CSIBM1158, CSIBM1160, CSIBM1161,
CSIBM1163, CSIBM1164, CSIBM1166, CSIBM1167, CSIBM1364, CSIBM1371, CSIBM1388,
CSIBM1390, CSIBM1399, CSIBM4517, CSIBM4899, CSIBM4909, CSIBM4971, CSIBM5347,
CSIBM9030, CSIBM9066, CSIBM9448, CSIBM12712, CSIBM16804, CSIBM11621162,
CSISO4UNITEDKINGDOM, CSISO10SWEDISH, CSISO11SWEDISHFORNAMES,
CSISO14JISC6220RO, CSISO15ITALIAN, CSISO16PORTUGESE, CSISO17SPANISH,
CSISO18GREEK7OLD, CSISO19LATINGREEK, CSISO21GERMAN, CSISO25FRENCH,
CSISO27LATINGREEK1, CSISO49INIS, CSISO50INIS8, CSISO51INISCYRILLIC,
CSISO58GB1988, CSISO60DANISHNORWEGIAN, CSISO60NORWEGIAN1, CSISO61NORWEGIAN2,
CSISO69FRENCH, CSISO84PORTUGUESE2, CSISO85SPANISH2, CSISO86HUNGARIAN,
CSISO88GREEK7, CSISO89ASMO449, CSISO90, CSISO92JISC62991984B, CSISO99NAPLPS,
CSISO103T618BIT, CSISO111ECMACYRILLIC, CSISO121CANADIAN1, CSISO122CANADIAN2,
CSISO139CSN369103, CSISO141JUSIB1002, CSISO143IECP271, CSISO150,
CSISO150GREEKCCITT, CSISO151CUBA, CSISO153GOST1976874, CSISO646DANISH,
CSISO2022CN, CSISO2022JP, CSISO2022JP2, CSISO2022KR, CSISO2033,
CSISO5427CYRILLIC, CSISO5427CYRILLIC1981, CSISO5428GREEK, CSISO10367BOX,
CSISOLATIN1, CSISOLATIN2, CSISOLATIN3, CSISOLATIN4, CSISOLATIN5, CSISOLATIN6,
CSISOLATINARABIC, CSISOLATINCYRILLIC, CSISOLATINGREEK, CSISOLATINHEBREW,
CSKOI8R, CSKSC5636, CSMACINTOSH, CSNATSDANO, CSNATSSEFI, CSN_369103,
CSPC8CODEPAGE437, CSPC775BALTIC, CSPC850MULTILINGUAL, CSPC862LATINHEBREW,
CSPCP852, CSSHIFTJIS, CSUCS4, CSUNICODE, CSWINDOWS31J, CUBA, CWI-2, CWI,
CYRILLIC, DE, DEC-MCS, DEC, DECMCS, DIN_66003, DK, DS2089, DS_2089, E13B,
EBCDIC-AT-DE-A, EBCDIC-AT-DE, EBCDIC-BE, EBCDIC-BR, EBCDIC-CA-FR,
EBCDIC-CP-AR1, EBCDIC-CP-AR2, EBCDIC-CP-BE, EBCDIC-CP-CA, EBCDIC-CP-CH,
EBCDIC-CP-DK, EBCDIC-CP-ES, EBCDIC-CP-FI, EBCDIC-CP-FR, EBCDIC-CP-GB,
EBCDIC-CP-GR, EBCDIC-CP-HE, EBCDIC-CP-IS, EBCDIC-CP-IT, EBCDIC-CP-NL,
EBCDIC-CP-NO, EBCDIC-CP-ROECE, EBCDIC-CP-SE, EBCDIC-CP-TR, EBCDIC-CP-US,
EBCDIC-CP-WT, EBCDIC-CP-YU, EBCDIC-CYRILLIC, EBCDIC-DK-NO-A, EBCDIC-DK-NO,
EBCDIC-ES-A, EBCDIC-ES-S, EBCDIC-ES, EBCDIC-FI-SE-A, EBCDIC-FI-SE, EBCDIC-FR,
EBCDIC-GREEK, EBCDIC-INT, EBCDIC-INT1, EBCDIC-IS-FRISS, EBCDIC-IT,
EBCDIC-JP-E, EBCDIC-JP-KANA, EBCDIC-PT, EBCDIC-UK, EBCDIC-US, EBCDICATDE,
EBCDICATDEA, EBCDICCAFR, EBCDICDKNO, EBCDICDKNOA, EBCDICES, EBCDICESA,
EBCDICESS, EBCDICFISE, EBCDICFISEA, EBCDICFR, EBCDICISFRISS, EBCDICIT,
EBCDICPT, EBCDICUK, EBCDICUS, ECMA-114, ECMA-118, ECMA-128, ECMA-CYRILLIC,
ECMACYRILLIC, ELOT_928, ES, ES2, EUC-CN, EUC-JISX0213, EUC-JP-MS, EUC-JP,
EUC-KR, EUC-TW, EUCCN, EUCJP-MS, EUCJP-OPEN, EUCJP-WIN, EUCJP, EUCKR, EUCTW,
FI, FR, GB, GB2312, GB13000, GB18030, GBK, GB_1988-80, GB_198880,
GEORGIAN-ACADEMY, GEORGIAN-PS, GOST_19768-74, GOST_19768, GOST_1976874,
GREEK-CCITT, GREEK, GREEK7-OLD, GREEK7, GREEK7OLD, GREEK8, GREEKCCITT,
HEBREW, HP-GREEK8, HP-ROMAN8, HP-ROMAN9, HP-THAI8, HP-TURKISH8, HPGREEK8,
HPROMAN8, HPROMAN9, HPTHAI8, HPTURKISH8, HU, IBM-803, IBM-856, IBM-901,
IBM-902, IBM-921, IBM-922, IBM-930, IBM-932, IBM-933, IBM-935, IBM-937,
IBM-939, IBM-943, IBM-1008, IBM-1025, IBM-1046, IBM-1047, IBM-1097, IBM-1112,
IBM-1122, IBM-1123, IBM-1124, IBM-1129, IBM-1130, IBM-1132, IBM-1133,
IBM-1137, IBM-1140, IBM-1141, IBM-1142, IBM-1143, IBM-1144, IBM-1145,
IBM-1146, IBM-1147, IBM-1148, IBM-1149, IBM-1153, IBM-1154, IBM-1155,
IBM-1156, IBM-1157, IBM-1158, IBM-1160, IBM-1161, IBM-1162, IBM-1163,
IBM-1164, IBM-1166, IBM-1167, IBM-1364, IBM-1371, IBM-1388, IBM-1390,
IBM-1399, IBM-4517, IBM-4899, IBM-4909, IBM-4971, IBM-5347, IBM-9030,
IBM-9066, IBM-9448, IBM-12712, IBM-16804, IBM037, IBM038, IBM256, IBM273,
IBM274, IBM275, IBM277, IBM278, IBM280, IBM281, IBM284, IBM285, IBM290,
IBM297, IBM367, IBM420, IBM423, IBM424, IBM437, IBM500, IBM775, IBM803,
IBM813, IBM819, IBM848, IBM850, IBM851, IBM852, IBM855, IBM856, IBM857,
IBM860, IBM861, IBM862, IBM863, IBM864, IBM865, IBM866, IBM866NAV, IBM868,
IBM869, IBM870, IBM871, IBM874, IBM875, IBM880, IBM891, IBM901, IBM902,
IBM903, IBM904, IBM905, IBM912, IBM915, IBM916, IBM918, IBM920, IBM921,
IBM922, IBM930, IBM932, IBM933, IBM935, IBM937, IBM939, IBM943, IBM1004,
IBM1008, IBM1025, IBM1026, IBM1046, IBM1047, IBM1089, IBM1097, IBM1112,
IBM1122, IBM1123, IBM1124, IBM1129, IBM1130, IBM1132, IBM1133, IBM1137,
IBM1140, IBM1141, IBM1142, IBM1143, IBM1144, IBM1145, IBM1146, IBM1147,
IBM1148, IBM1149, IBM1153, IBM1154, IBM1155, IBM1156, IBM1157, IBM1158,
IBM1160, IBM1161, IBM1162, IBM1163, IBM1164, IBM1166, IBM1167, IBM1364,
IBM1371, IBM1388, IBM1390, IBM1399, IBM4517, IBM4899, IBM4909, IBM4971,
IBM5347, IBM9030, IBM9066, IBM9448, IBM12712, IBM16804, IEC_P27-1, IEC_P271,
INIS-8, INIS-CYRILLIC, INIS, INIS8, INISCYRILLIC, ISIRI-3342, ISIRI3342,
ISO-2022-CN-EXT, ISO-2022-CN, ISO-2022-JP-2, ISO-2022-JP-3, ISO-2022-JP,
ISO-2022-KR, ISO-8859-1, ISO-8859-2, ISO-8859-3, ISO-8859-4, ISO-8859-5,
ISO-8859-6, ISO-8859-7, ISO-8859-8, ISO-8859-9, ISO-8859-9E, ISO-8859-10,
ISO-8859-11, ISO-8859-13, ISO-8859-14, ISO-8859-15, ISO-8859-16, ISO-10646,
ISO-10646/UCS2, ISO-10646/UCS4, ISO-10646/UTF-8, ISO-10646/UTF8, ISO-CELTIC,
ISO-IR-4, ISO-IR-6, ISO-IR-8-1, ISO-IR-9-1, ISO-IR-10, ISO-IR-11, ISO-IR-14,
ISO-IR-15, ISO-IR-16, ISO-IR-17, ISO-IR-18, ISO-IR-19, ISO-IR-21, ISO-IR-25,
ISO-IR-27, ISO-IR-37, ISO-IR-49, ISO-IR-50, ISO-IR-51, ISO-IR-54, ISO-IR-55,
ISO-IR-57, ISO-IR-60, ISO-IR-61, ISO-IR-69, ISO-IR-84, ISO-IR-85, ISO-IR-86,
ISO-IR-88, ISO-IR-89, ISO-IR-90, ISO-IR-92, ISO-IR-98, ISO-IR-99, ISO-IR-100,
ISO-IR-101, ISO-IR-103, ISO-IR-109, ISO-IR-110, ISO-IR-111, ISO-IR-121,
ISO-IR-122, ISO-IR-126, ISO-IR-127, ISO-IR-138, ISO-IR-139, ISO-IR-141,
ISO-IR-143, ISO-IR-144, ISO-IR-148, ISO-IR-150, ISO-IR-151, ISO-IR-153,
ISO-IR-155, ISO-IR-156, ISO-IR-157, ISO-IR-166, ISO-IR-179, ISO-IR-193,
ISO-IR-197, ISO-IR-199, ISO-IR-203, ISO-IR-209, ISO-IR-226, ISO/TR_11548-1,
ISO646-CA, ISO646-CA2, ISO646-CN, ISO646-CU, ISO646-DE, ISO646-DK, ISO646-ES,
ISO646-ES2, ISO646-FI, ISO646-FR, ISO646-FR1, ISO646-GB, ISO646-HU,
ISO646-IT, ISO646-JP-OCR-B, ISO646-JP, ISO646-KR, ISO646-NO, ISO646-NO2,
ISO646-PT, ISO646-PT2, ISO646-SE, ISO646-SE2, ISO646-US, ISO646-YU,
ISO2022CN, ISO2022CNEXT, ISO2022JP, ISO2022JP2, ISO2022KR, ISO6937,
ISO8859-1, ISO8859-2, ISO8859-3, ISO8859-4, ISO8859-5, ISO8859-6, ISO8859-7,
ISO8859-8, ISO8859-9, ISO8859-9E, ISO8859-10, ISO8859-11, ISO8859-13,
ISO8859-14, ISO8859-15, ISO8859-16, ISO11548-1, ISO88591, ISO88592, ISO88593,
ISO88594, ISO88595, ISO88596, ISO88597, ISO88598, ISO88599, ISO88599E,
ISO885910, ISO885911, ISO885913, ISO885914, ISO885915, ISO885916,
ISO_646.IRV:1991, ISO_2033-1983, ISO_2033, ISO_5427-EXT, ISO_5427,
ISO_5427:1981, ISO_5427EXT, ISO_5428, ISO_5428:1980, ISO_6937-2,
ISO_6937-2:1983, ISO_6937, ISO_6937:1992, ISO_8859-1, ISO_8859-1:1987,
ISO_8859-2, ISO_8859-2:1987, ISO_8859-3, ISO_8859-3:1988, ISO_8859-4,
ISO_8859-4:1988, ISO_8859-5, ISO_8859-5:1988, ISO_8859-6, ISO_8859-6:1987,
ISO_8859-7, ISO_8859-7:1987, ISO_8859-7:2003, ISO_8859-8, ISO_8859-8:1988,
ISO_8859-9, ISO_8859-9:1989, ISO_8859-9E, ISO_8859-10, ISO_8859-10:1992,
ISO_8859-14, ISO_8859-14:1998, ISO_8859-15, ISO_8859-15:1998, ISO_8859-16,
ISO_8859-16:2001, ISO_9036, ISO_10367-BOX, ISO_10367BOX, ISO_11548-1,
ISO_69372, IT, JIS_C6220-1969-RO, JIS_C6229-1984-B, JIS_C62201969RO,
JIS_C62291984B, JOHAB, JP-OCR-B, JP, JS, JUS_I.B1.002, KOI-7, KOI-8, KOI8-R,
KOI8-RU, KOI8-T, KOI8-U, KOI8, KOI8R, KOI8U, KSC5636, L1, L2, L3, L4, L5, L6,
L7, L8, L10, LATIN-9, LATIN-GREEK-1, LATIN-GREEK, LATIN1, LATIN2, LATIN3,
LATIN4, LATIN5, LATIN6, LATIN7, LATIN8, LATIN9, LATIN10, LATINGREEK,
LATINGREEK1, MAC-CENTRALEUROPE, MAC-CYRILLIC, MAC-IS, MAC-SAMI, MAC-UK, MAC,
MACCYRILLIC, MACINTOSH, MACIS, MACUK, MACUKRAINIAN, MIK, MS-ANSI, MS-ARAB,
MS-CYRL, MS-EE, MS-GREEK, MS-HEBR, MS-MAC-CYRILLIC, MS-TURK, MS932, MS936,
MSCP949, MSCP1361, MSMACCYRILLIC, MSZ_7795.3, MS_KANJI, NAPLPS, NATS-DANO,
NATS-SEFI, NATSDANO, NATSSEFI, NC_NC0010, NC_NC00-10, NC_NC00-10:81,
NF_Z_62-010, NF_Z_62-010_(1973), NF_Z_62-010_1973, NF_Z_62010,
NF_Z_62010_1973, NO, NO2, NS_4551-1, NS_4551-2, NS_45511, NS_45512,
OS2LATIN1, OSF00010001, OSF00010002, OSF00010003, OSF00010004, OSF00010005,
OSF00010006, OSF00010007, OSF00010008, OSF00010009, OSF0001000A, OSF00010020,
OSF00010100, OSF00010101, OSF00010102, OSF00010104, OSF00010105, OSF00010106,
OSF00030010, OSF0004000A, OSF0005000A, OSF05010001, OSF100201A4, OSF100201A8,
OSF100201B5, OSF100201F4, OSF100203B5, OSF1002011C, OSF1002011D, OSF1002035D,
OSF1002035E, OSF1002035F, OSF1002036B, OSF1002037B, OSF10010001, OSF10010004,
OSF10010006, OSF10020025, OSF10020111, OSF10020115, OSF10020116, OSF10020118,
OSF10020122, OSF10020129, OSF10020352, OSF10020354, OSF10020357, OSF10020359,
OSF10020360, OSF10020364, OSF10020365, OSF10020366, OSF10020367, OSF10020370,
OSF10020387, OSF10020388, OSF10020396, OSF10020402, OSF10020417, PT, PT2,
PT154, R8, R9, RK1048, ROMAN8, ROMAN9, RUSCII, SE, SE2, SEN_850200_B,
SEN_850200_C, SHIFT-JIS, SHIFT_JIS, SHIFT_JISX0213, SJIS-OPEN, SJIS-WIN,
SJIS, SS636127, STRK1048-2002, ST_SEV_358-88, T.61-8BIT, T.61, T.618BIT,
TCVN-5712, TCVN, TCVN5712-1, TCVN5712-1:1993, THAI8, TIS-620, TIS620-0,
TIS620.2529-1, TIS620.2533-0, TIS620, TS-5881, TSCII, TURKISH8, UCS-2,
UCS-2BE, UCS-2LE, UCS-4, UCS-4BE, UCS-4LE, UCS2, UCS4, UHC, UJIS, UK,
UNICODE, UNICODEBIG, UNICODELITTLE, US-ASCII, US, UTF-7, UTF-8, UTF-16,
UTF-16BE, UTF-16LE, UTF-32, UTF-32BE, UTF-32LE, UTF7, UTF8, UTF16, UTF16BE,
UTF16LE, UTF32, UTF32BE, UTF32LE, VISCII, WCHAR_T, WIN-SAMI-2, WINBALTRIM,
WINDOWS-31J, WINDOWS-874, WINDOWS-936, WINDOWS-1250, WINDOWS-1251,
WINDOWS-1252, WINDOWS-1253, WINDOWS-1254, WINDOWS-1255, WINDOWS-1256,
WINDOWS-1257, WINDOWS-1258, WINSAMI2, WS2, YU
"""
|
coodoing/piconv
|
support_encodings.py
|
Python
|
apache-2.0
| 13,095
|
# Guillaume Valadon <guillaume@valadon.net>
"""
Scapy *BSD native support
"""
|
CodeNameGhost/shiva
|
thirdparty/scapy/arch/bpf/__init__.py
|
Python
|
mit
| 79
|
from glob import glob
import os
from os import path
import itertools
import re
from Bio import AlignIO
import pandas
import sys
import copy
import argparse
from slr import *
species_RE = re.compile("([A-Z]+)")
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.izip_longest(*args, fillvalue=fillvalue)
argparser = argparse.ArgumentParser()
argparser.add_argument('--clade', metavar='clade', type=str, required=True)
argparser.add_argument('--slrroot', metavar='slr_root', type=str, required=True)
argparser.add_argument('--alnroot', metavar='aln_root', type=str, required=True)
argparser.add_argument('--outfile', metavar='out_file', type=str, required=True)
args = argparser.parse_args()
clade = args.clade
alndir = args.alnroot
slrroot = args.slrroot
all_ids = []
all_data = [] # pandas.DataFrame(columns=colnames)
outfile = open(args.outfile, 'w')
for aln_fn in glob(path.join(alndir, clade, "*", "*_prank.best.fas")):
basename = path.basename(aln_fn).rpartition('_')[0]
prefix = basename.partition('_')[0][:2]
slr_fn = path.join(slrroot, clade, prefix, basename+'_matched.res')
if not path.exists(slr_fn):
print slr_fn, "doesn't exist!"
continue
slr = open(slr_fn)
slr.readline()
print >>outfile, slr.readline()
|
jergosh/slr_pipeline
|
bin/scan_slr.py
|
Python
|
gpl-2.0
| 1,316
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Conditional statements related codes.
Branches, conditions, truth checks.
"""
from .CodeHelpers import decideConversionCheckNeeded, generateExpressionCode
from .Emission import SourceCodeCollector
from .ErrorCodes import (
getErrorExitBoolCode,
getReleaseCode,
getTakeReferenceCode,
)
from .LabelCodes import getBranchingCode, getGotoCode, getLabelCode
def generateConditionCode(condition, emit, context):
if condition.mayRaiseExceptionBool(BaseException):
compare_name = context.allocateTempName("condition_result", "nuitka_bool")
else:
compare_name = context.allocateTempName("condition_result", "bool")
generateExpressionCode(
to_name=compare_name, expression=condition, emit=emit, context=context
)
getBranchingCode(
condition=compare_name.getCType().getTruthCheckCode(compare_name),
emit=emit,
context=context,
)
getReleaseCode(compare_name, emit, context)
def generateConditionalAndOrCode(to_name, expression, emit, context):
# This is a complex beast, handling both "or" and "and" expressions,
# and it needs to micro manage details.
# pylint: disable=too-many-locals
if expression.isExpressionConditionalOr():
prefix = "or_"
else:
prefix = "and_"
true_target = context.allocateLabel(prefix + "left")
false_target = context.allocateLabel(prefix + "right")
end_target = context.allocateLabel(prefix + "end")
old_true_target = context.getTrueBranchTarget()
old_false_target = context.getFalseBranchTarget()
truth_name = context.allocateTempName(prefix + "left_truth", "int")
left_name = context.allocateTempName(prefix + "left_value", to_name.c_type)
right_name = context.allocateTempName(prefix + "right_value", to_name.c_type)
left_value = expression.subnode_left
generateExpressionCode(
to_name=left_name, expression=left_value, emit=emit, context=context
)
# We need to treat this mostly manually here. We remember to release
# this, and we better do this manually later.
needs_ref1 = context.needsCleanup(left_name)
if expression.isExpressionConditionalOr():
context.setTrueBranchTarget(true_target)
context.setFalseBranchTarget(false_target)
else:
context.setTrueBranchTarget(false_target)
context.setFalseBranchTarget(true_target)
left_name.getCType().emitTruthCheckCode(
to_name=truth_name,
value_name=left_name,
emit=emit,
)
needs_check = left_value.mayRaiseExceptionBool(BaseException)
if needs_check:
getErrorExitBoolCode(
condition="%s == -1" % truth_name,
needs_check=True,
emit=emit,
context=context,
)
getBranchingCode(condition="%s == 1" % truth_name, emit=emit, context=context)
getLabelCode(false_target, emit)
# So it's not the left value, then lets release that one right away, it
# is not needed, but we remember if it should be added above.
getReleaseCode(release_name=left_name, emit=emit, context=context)
right_value = expression.subnode_right
# Evaluate the "right" value then.
generateExpressionCode(
to_name=right_name, expression=right_value, emit=emit, context=context
)
# Again, remember the reference count to manage it manually.
needs_ref2 = context.needsCleanup(right_name)
if needs_ref2:
context.removeCleanupTempName(right_name)
if not needs_ref2 and needs_ref1:
getTakeReferenceCode(right_name, emit)
to_name.getCType().emitAssignConversionCode(
to_name=to_name,
value_name=right_name,
needs_check=decideConversionCheckNeeded(to_name, right_value),
emit=emit,
context=context,
)
getGotoCode(end_target, emit)
getLabelCode(true_target, emit)
if not needs_ref1 and needs_ref2:
getTakeReferenceCode(left_name, emit)
to_name.getCType().emitAssignConversionCode(
to_name=to_name,
value_name=left_name,
needs_check=decideConversionCheckNeeded(to_name, left_value),
emit=emit,
context=context,
)
getLabelCode(end_target, emit)
if needs_ref1 or needs_ref2:
context.addCleanupTempName(to_name)
context.setTrueBranchTarget(old_true_target)
context.setFalseBranchTarget(old_false_target)
def generateConditionalCode(to_name, expression, emit, context):
true_target = context.allocateLabel("condexpr_true")
false_target = context.allocateLabel("condexpr_false")
end_target = context.allocateLabel("condexpr_end")
old_true_target = context.getTrueBranchTarget()
old_false_target = context.getFalseBranchTarget()
context.setTrueBranchTarget(true_target)
context.setFalseBranchTarget(false_target)
generateConditionCode(
condition=expression.subnode_condition, emit=emit, context=context
)
getLabelCode(true_target, emit)
generateExpressionCode(
to_name=to_name,
expression=expression.subnode_expression_yes,
emit=emit,
context=context,
)
needs_ref1 = context.needsCleanup(to_name)
# Must not clean this up in other expression.
if needs_ref1:
context.removeCleanupTempName(to_name)
real_emit = emit
emit = SourceCodeCollector()
generateExpressionCode(
to_name=to_name,
expression=expression.subnode_expression_no,
emit=emit,
context=context,
)
needs_ref2 = context.needsCleanup(to_name)
# TODO: Need to buffer generated code, so we can emit extra reference if
# not same.
if needs_ref1 and not needs_ref2:
getGotoCode(end_target, real_emit)
getLabelCode(false_target, real_emit)
for line in emit.codes:
real_emit(line)
emit = real_emit
getTakeReferenceCode(to_name, emit)
context.addCleanupTempName(to_name)
elif not needs_ref1 and needs_ref2:
getTakeReferenceCode(to_name, real_emit)
getGotoCode(end_target, real_emit)
getLabelCode(false_target, real_emit)
for line in emit.codes:
real_emit(line)
emit = real_emit
else:
getGotoCode(end_target, real_emit)
getLabelCode(false_target, real_emit)
for line in emit.codes:
real_emit(line)
emit = real_emit
getLabelCode(end_target, emit)
context.setTrueBranchTarget(old_true_target)
context.setFalseBranchTarget(old_false_target)
|
kayhayen/Nuitka
|
nuitka/codegen/ConditionalCodes.py
|
Python
|
apache-2.0
| 7,335
|
from __future__ import print_function
import os
import warnings
from pytest import mark
from translate.convert import oo2po, po2oo, test_convert
from translate.misc import wStringIO
from translate.storage import po
class TestPO2OO(object):
def setup_method(self, method):
warnings.resetwarnings()
def teardown_method(self, method):
warnings.resetwarnings()
def convertoo(self, posource, ootemplate, language="en-US"):
"""helper to exercise the command line function"""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(ootemplate)
assert po2oo.convertoo(inputfile, outputfile, templatefile, targetlanguage=language, timestamp=0)
return outputfile.getvalue()
def roundtripstring(self, entitystring):
oointro, oooutro = r'svx source\dialog\numpages.src 0 string RID_SVXPAGE_NUM_OPTIONS STR_BULLET 0 en-US ', ' 2002-02-02 02:02:02' + '\r\n'
oosource = oointro + entitystring + oooutro
ooinputfile = wStringIO.StringIO(oosource)
ooinputfile2 = wStringIO.StringIO(oosource)
pooutputfile = wStringIO.StringIO()
oo2po.convertoo(ooinputfile, pooutputfile, ooinputfile2, targetlanguage='en-US')
posource = pooutputfile.getvalue()
poinputfile = wStringIO.StringIO(posource)
ootemplatefile = wStringIO.StringIO(oosource)
oooutputfile = wStringIO.StringIO()
po2oo.convertoo(poinputfile, oooutputfile, ootemplatefile, targetlanguage="en-US")
ooresult = oooutputfile.getvalue().decode('utf-8')
print("original oo:\n", oosource, "po version:\n", posource, "output oo:\n", ooresult)
assert ooresult.startswith(oointro) and ooresult.endswith(oooutro)
return ooresult[len(oointro):-len(oooutro)]
def check_roundtrip(self, oosource):
"""Checks that the round-tripped string is the same as the original"""
assert self.roundtripstring(oosource) == oosource
@mark.skipif(os.name == 'nt', reason="test or storage broken on Windows")
def test_convertoo(self):
"""checks that the convertoo function is working"""
oobase = r'svx source\dialog\numpages.src 0 string RID_SVXPAGE_NUM_OPTIONS STR_BULLET 0 %s %s 20050924 09:13:58' + '\r\n'
posource = '''#: numpages.src#RID_SVXPAGE_NUM_OPTIONS.STR_BULLET.string.text\nmsgid "Simple String"\nmsgstr "Dimpled Ring"\n'''
ootemplate = oobase % ('en-US', 'Simple String')
ooexpected = oobase % ('zu', 'Dimpled Ring')
newoo = self.convertoo(posource, ootemplate, language="zu")
assert newoo.decode('utf-8') == ootemplate + ooexpected
def test_pofilter(self):
"""Tests integration with pofilter"""
#Some bad po with a few errors:
posource = b'#: sourcefile.bla#ID_NUMBER.txet.gnirts\nmsgid "<tag cow=\\"3\\">Mistake."\nmsgstr " <etiket koei=\\"3\\">(fout) "'
filter = po2oo.filter
pofile = po.pofile()
pofile.parse(posource)
assert not filter.validelement(pofile.units[0], "dummyname.po", "exclude-all")
def test_roundtrip_simple(self):
"""checks that simple strings make it through a oo->po->oo roundtrip"""
self.check_roundtrip('Hello')
self.check_roundtrip('"Hello"')
self.check_roundtrip('"Hello Everybody"')
def test_roundtrip_escape(self):
"""checks that escapes in strings make it through a oo->po->oo roundtrip"""
self.check_roundtrip(r'"Simple Escape \ \n \\ \: \t \r "')
self.check_roundtrip(r'"More escapes \\n \\t \\r \\: "')
self.check_roundtrip(r'"More escapes \\\n \\\t \\\r \\\: "')
self.check_roundtrip(r'"More escapes \\\\n \\\\t \\\\r \\\\: "')
self.check_roundtrip(r'"End Line Escape \"')
self.check_roundtrip(r'"\\rangle \\langle')
self.check_roundtrip(r'\\\\<')
self.check_roundtrip(r'\\\<')
self.check_roundtrip(r'\\<')
self.check_roundtrip(r'\<')
def test_roundtrip_quotes(self):
"""checks that (escaped) quotes in strings make it through a oo->po->oo roundtrip"""
self.check_roundtrip(r"""'Quote Escape "" '""")
self.check_roundtrip(r'''"Single-Quote ' "''')
self.check_roundtrip(r'''"Single-Quote Escape \' "''')
self.check_roundtrip(r"""'Both Quotes "" '' '""")
def test_roundtrip_spaces(self):
# FIXME: this test fails because the resultant PO file returns as po.isempty since .isblank returns true
# which is caused by isblankmsgtr returning True. Its a complete mess which would mean unravelling lots
# of yuch in pypo. Until we have time to do that unravelling we're diabling this test. You can reenable
# once we've fixed that.
"""checks that (escaped) quotes in strings make it through a oo->po->oo roundtrip"""
self.check_roundtrip(" ")
self.check_roundtrip(u"\u00a0")
def test_default_timestamp(self):
"""test to ensure that we revert to the default timestamp"""
oointro, oooutro = r'svx source\dialog\numpages.src 0 string RID_SVXPAGE_NUM_OPTIONS STR_BULLET 0 en-US Text ', '\r\n'
posource = '''#: numpages.src#RID_SVXPAGE_NUM_OPTIONS.STR_BULLET.string.text\nmsgid "Text"\nmsgstr "Text"\n'''
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(oointro + '20050924 09:13:58' + oooutro)
assert po2oo.convertoo(inputfile, outputfile, templatefile, targetlanguage="en-US")
assert outputfile.getvalue().decode('utf-8') == oointro + '2002-02-02 02:02:02' + oooutro
def test_escape_conversion(self):
"""test to ensure that we convert escapes correctly"""
oosource = r'svx source\dialog\numpages.src 0 string RID_SVXPAGE_NUM_OPTIONS STR_BULLET 0 en-US Column1\tColumn2\r\n 2002-02-02 02:02:02' + '\r\n'
posource = '''#: numpages.src#RID_SVXPAGE_NUM_OPTIONS.STR_BULLET.string.text\nmsgid "Column1\\tColumn2\\r\\n"\nmsgstr "Kolom1\\tKolom2\\r\\n"\n'''
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(oosource)
assert po2oo.convertoo(inputfile, outputfile, templatefile, targetlanguage="af-ZA")
assert b"\tKolom1\\tKolom2\\r\\n\t" in outputfile.getvalue()
def test_helpcontent_escapes(self):
"""test to ensure that we convert helpcontent escapes correctly"""
# Note how this test specifically uses incorrect spacing in the
# translation. The extra space before 'hid' and an extra space before
# the closing tag should not confuse us.
oosource = r'helpcontent2 source\text\shared\3dsettings_toolbar.xhp 0 help par_idN1056A 0 en-US \<ahelp hid=\".\"\>The 3D-Settings toolbar controls properties of selected 3D objects.\</ahelp\> 2002-02-02 02:02:02' + '\r\n'
posource = r'''#: 3dsettings_toolbar.xhp#par_idN1056A.help.text
msgid ""
"<ahelp hid=\".\">The 3D-Settings toolbar controls properties of selected 3D "
"ob jects.</ahelp>"
msgstr ""
"<ahelp hid=\".\" >Zeee 3DDDD-Settings toolbar controls properties of selected 3D "
"objects.</ahelp>"
'''
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(oosource)
assert po2oo.convertoo(inputfile, outputfile, templatefile, targetlanguage="af-ZA")
assert br"\<ahelp hid=\".\" \>Zeee 3DDDD-Settings toolbar controls properties of selected 3D objects.\</ahelp\>" in outputfile.getvalue()
def test_helpcontent_escapes2(self):
"""test to ensure that we convert helpcontent escapes correctly"""
oosource = r'helpcontent2 source\text\scalc\05\empty_cells.xhp 0 help par_id2629474 0 en-US A1: <empty> 2002-02-02 02:02:02' + '\r\n'
posource = r'''#: empty_cells.xhp#par_id2629474.help.text
msgid "A1: <empty>"
msgstr "Aa1: <empty>"
'''
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(oosource)
assert po2oo.convertoo(inputfile, outputfile, templatefile, targetlanguage="af-ZA")
assert b"Aa1: <empty>" in outputfile.getvalue()
class TestPO2OOCommand(test_convert.TestConvertCommand, TestPO2OO):
"""Tests running actual po2oo commands on files"""
convertmodule = po2oo
def test_help(self, capsys):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self, capsys)
options = self.help_check(options, "--source-language=LANG")
options = self.help_check(options, "--language=LANG")
options = self.help_check(options, "-T, --keeptimestamp")
options = self.help_check(options, "--nonrecursiveoutput")
options = self.help_check(options, "--nonrecursivetemplate")
options = self.help_check(options, "--filteraction")
options = self.help_check(options, "--skipsource")
options = self.help_check(options, "--threshold=PERCENT")
options = self.help_check(options, "--fuzzy")
options = self.help_check(options, "--nofuzzy")
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--multifile=MULTIFILESTYLE", last=True)
def merge2oo(self, oosource, posource):
"""helper that merges po translations to oo source through files"""
outputoo = convertor.convertstore(inputpo)
return outputoo
def convertoo(self, posource, ootemplate, language="en-US"):
"""helper to exercise the command line function"""
self.create_testfile(os.path.join("input", "svx", "source", "dialog.po"), posource)
self.create_testfile("input.oo", ootemplate)
self.run_command("input", "output.oo", template="input.oo", language=language, keeptimestamp=True)
return self.read_testfile("output.oo")
|
diorcety/translate
|
translate/convert/test_po2oo.py
|
Python
|
gpl-2.0
| 10,029
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.forms.formsets import formset_factory
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from shuup.admin.form_part import FormPart, TemplatedFormDef
from shuup.admin.modules.products.forms import (
SimpleVariationChildForm, SimpleVariationChildFormSet,
VariableVariationChildrenForm, VariationVariablesDataForm
)
from shuup.admin.toolbar import PostActionButton
from shuup.core.models import ProductMode, ProductVariationVariable
from shuup.utils.excs import Problem
from .edit_parent import (
ProductChildrenBaseFormPart, ProductParentBaseToolbar,
ProductParentBaseView
)
class VariationChildrenFormPart(ProductChildrenBaseFormPart):
invalid_modes = [ProductMode.VARIATION_CHILD, ProductMode.PACKAGE_PARENT, ProductMode.SUBSCRIPTION]
priority = 0
def get_form_defs(self):
product = self.object
if product.mode in self.invalid_modes:
raise ValueError("Invalid mode")
elif product.mode == ProductMode.VARIABLE_VARIATION_PARENT:
form = VariableVariationChildrenForm
template_name = "shuup/admin/products/variation/_variable_variation_children.jinja"
else:
form = formset_factory(SimpleVariationChildForm, SimpleVariationChildFormSet, extra=5, can_delete=True)
template_name = "shuup/admin/products/variation/_simple_variation_children.jinja"
form_defs = super(VariationChildrenFormPart, self).get_form_defs(form, template_name)
for form_def in form_defs:
yield form_def
class VariationVariablesFormPart(FormPart):
form_def_name = "variables"
priority = 1
def get_form_defs(self):
yield TemplatedFormDef(
"variables",
VariationVariablesDataForm,
template_name="shuup/admin/products/variation/_variation_variables.jinja",
required=False,
kwargs={"parent_product": self.object}
)
def form_valid(self, form):
try:
var_form = form["variables"]
except KeyError:
return
var_form.save()
class ProductVariationViewToolbar(ProductParentBaseToolbar):
def __init__(self, view):
super(ProductVariationViewToolbar, self).__init__(view)
if self.parent_product.variation_children.exists():
self.append(PostActionButton(
post_url=self.request.path,
name="command",
value="unvariate",
confirm=_("Are you sure? This will unlink all children and remove all variation variables."),
text=_("Clear variation"),
extra_css_class="btn-danger",
icon="fa fa-times"
))
if (
self.parent_product.mode == ProductMode.VARIABLE_VARIATION_PARENT or
ProductVariationVariable.objects.filter(product=self.parent_product).exists()
):
self.append(PostActionButton(
post_url=self.request.path,
name="command",
value="simplify",
confirm=_("Are you sure? This will remove all variation variables, "
"converting children to direct links."),
text=_("Convert to simple variation"),
icon="fa fa-exchange",
extra_css_class="btn-info"
))
class ProductVariationView(ProductParentBaseView):
template_name = "shuup/admin/products/variation/edit.jinja"
form_part_classes = [VariationChildrenFormPart, VariationVariablesFormPart]
toolbar_class = ProductVariationViewToolbar
def dispatch(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.variation_parent_id:
# Redirect to the parent instead.
return HttpResponseRedirect(
reverse("shuup_admin:shop_product.edit_variation", kwargs={"pk": self.object.variation_parent_id})
)
return super(ProductVariationView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProductVariationView, self).get_context_data(**kwargs)
context["title"] = _("Edit Variation: %s") % self.object
context["is_variation"] = self.object.is_variation_parent()
return context
def dispatch_command(self, request, command):
product = self.object
if command == "unvariate":
product.clear_variation()
messages.success(self.request, _("Variation cleared."))
elif command == "simplify":
product.simplify_variation()
messages.success(self.request, _("Variation simplified."))
else:
raise Problem("Unknown command: %s" % command)
return HttpResponseRedirect(self.get_success_url())
|
suutari-ai/shoop
|
shuup/admin/modules/products/views/edit_variation.py
|
Python
|
agpl-3.0
| 5,246
|
import os
from unittest import mock
from django.conf import settings
from zerver.lib.test_classes import ZulipTestCase
from zproject.email_backends import get_forward_address
class EmailLogTest(ZulipTestCase):
def test_generate_and_clear_email_log(self) -> None:
with self.settings(EMAIL_BACKEND="zproject.email_backends.EmailLogBackEnd"), mock.patch(
"zproject.email_backends.EmailBackend.send_messages"
), self.assertLogs(level="INFO") as m, self.settings(DEVELOPMENT_LOG_EMAILS=True):
result = self.client_get("/emails/generate/")
self.assertEqual(result.status_code, 302)
self.assertIn("emails", result["Location"])
result = self.client_get("/emails/")
self.assert_in_success_response(["All the emails sent in the Zulip"], result)
result = self.client_get("/emails/clear/")
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
self.assertIn("manually generate most of the emails by clicking", str(result.content))
output_log = (
"INFO:root:Emails sent in development are available at http://testserver/emails"
)
self.assertEqual(m.output, [output_log for i in range(15)])
def test_forward_address_details(self) -> None:
forward_address = "forward-to@example.com"
result = self.client_post("/emails/", {"forward_address": forward_address})
self.assert_json_success(result)
self.assertEqual(get_forward_address(), forward_address)
with self.settings(EMAIL_BACKEND="zproject.email_backends.EmailLogBackEnd"):
with mock.patch("zproject.email_backends.EmailBackend.send_messages"):
result = self.client_get("/emails/generate/")
self.assertEqual(result.status_code, 302)
self.assertIn("emails", result["Location"])
result = self.client_get(result["Location"])
self.assert_in_success_response([forward_address], result)
os.remove(settings.FORWARD_ADDRESS_CONFIG_FILE)
|
rht/zulip
|
zerver/tests/test_email_log.py
|
Python
|
apache-2.0
| 2,144
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-09-18 14:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cerimonial', '0016_auto_20180918_1149'),
]
|
interlegis/saap
|
saap/cerimonial/migrations/0017_auto_20180918_1152.py
|
Python
|
gpl-3.0
| 308
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Constants used for communicating with the Python devappserver2 runtime."""
SERVER_SOFTWARE = 'Development/2.0'
# Internal AppEngine prefix for Headers (Environment variables)
# used in production. See apphosting/base/http_proto.cc for the full list.
APPENGINE_HEADER_PREFIX = 'X-Appengine-'
APPENGINE_ENVIRON_PREFIX = 'HTTP_X_APPENGINE_'
# Prefix for Headers (Environment variables) used in Dev AppServer only.
APPENGINE_DEV_HEADER_PREFIX = APPENGINE_HEADER_PREFIX + 'Dev-'
APPENGINE_DEV_ENVIRON_PREFIX = APPENGINE_ENVIRON_PREFIX + 'DEV_'
# These values are passed as part of UPRequest proto in Prod.
# Propagation rule: Cut the prefix.
ENVIRONS_TO_PROPAGATE = set([
'BACKEND_ID',
'DEFAULT_VERSION_HOSTNAME',
'USER_ID',
'USER_IS_ADMIN',
'USER_EMAIL',
'USER_NICKNAME',
'USER_ORGANIZATION',
'REMOTE_ADDR',
'REQUEST_ID_HASH',
'REQUEST_LOG_ID',
'SERVER_NAME',
'SERVER_PORT',
'SERVER_PROTOCOL',
])
REQUEST_ID_HEADER = APPENGINE_DEV_HEADER_PREFIX + 'Request-Id'
REQUEST_ID_ENVIRON = APPENGINE_DEV_ENVIRON_PREFIX + 'REQUEST_ID'
# TODO: rename to SCRIPT_ENVIRON
SCRIPT_HEADER = APPENGINE_DEV_ENVIRON_PREFIX + 'SCRIPT'
# TODO: rename to REQUEST_TYPE_ENVIRON
# A request header where the value is a string containing the request type, e.g.
# background.
REQUEST_TYPE_HEADER = APPENGINE_DEV_ENVIRON_PREFIX + 'REQUEST_TYPE'
# A response header used by the runtime to indicate that an uncaught error has
# ocurred and that a user-specified error handler should be used if available.
ERROR_CODE_HEADER = '%sError-Code' % APPENGINE_HEADER_PREFIX
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/http_runtime_constants.py
|
Python
|
bsd-3-clause
| 2,201
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
gtoonstra/airflow-hovercraft
|
hovercraft/operators/__init__.py
|
Python
|
apache-2.0
| 565
|
# This file is part of remsh
# Copyright 2009, 2010 Dustin J. Mitchell
# See COPYING for license information
import os
from remsh.xport.base import Error, Xport
class FDXport(Xport):
"""
Base class for file-descriptor-based transports. Not for use by users.
"""
def __init__(self, fd):
self.fd = fd
def read(self):
return os.read(self.fd, 32768)
def write(self, data):
while data:
bcount = os.write(self.fd, data)
if bcount < len(data):
data = data[bcount:]
else:
break
def close(self):
os.close(self.fd)
self.fd = -1
def __del__(self):
if self.fd >= 0:
os.close(self.fd)
self.fd = -1
|
djmitche/remsh
|
py/remsh/xport/fd.py
|
Python
|
mit
| 769
|
# Copyright 2010 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by NSN
# Copyright 2010-2012 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import SocketServer
import atexit
import codecs
import os
import shutil
import socket
import subprocess
import tempfile
import threading
import signal
import sys
import robotide.utils as utils
from Queue import Empty, Queue
from robot.output.loggerhelper import LEVELS
from robot.utils.encoding import SYSTEM_ENCODING
from robotide.context import IS_WINDOWS
from robotide.contrib.testrunner import TestRunnerAgent
from robotide.controller.testexecutionresults import TestExecutionResults
ATEXIT_LOCK = threading.RLock()
class TestRunner(object):
def __init__(self, project):
self._output_dir = None
self._process = None
self._server = None
self._server_thread = None
self._pause_on_failure = False
self._pid_to_kill = None
self._results = TestExecutionResults()
self.port = None
self._project = project
self.profiles = {}
def enable(self, result_handler):
self._start_listener_server(result_handler)
self._create_temporary_directory()
def _create_temporary_directory(self):
self._output_dir = tempfile.mkdtemp(".d", "RIDE")
atexit.register(self._remove_temporary_directory)
# this plugin creates a temporary directory which _should_
# get reaped at exit. Sometimes things happen which might
# cause it to not get deleted. Maybe this would be a good
# place to check for temporary directories that match the
# signature and delete them if they are more than a few
# days old...
def _remove_temporary_directory(self):
with ATEXIT_LOCK:
if os.path.exists(self._output_dir):
shutil.rmtree(self._output_dir)
def add_profile(self, name, item):
self.profiles[name] = item
def get_profile(self, name):
return self.profiles[name]
def get_profile_names(self):
return sorted(self.profiles.keys())
def _start_listener_server(self, result_handler):
def handle(*args):
self._result_handler(*args)
result_handler(*args)
self._server = RideListenerServer(RideListenerHandler, handle)
self._server_thread = threading.Thread(
target=self._server.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
self.port = self._server.server_address[1]
def _result_handler(self, event, *args):
if event == 'pid':
self._pid_to_kill = int(args[0])
if event == 'port' and self._process:
self._process.set_port(args[0])
if event == 'start_test':
longname = args[1]['longname']
testname = args[0]
self._results.set_running(self._get_test_controller(longname,
testname))
if event == 'end_test':
longname = args[1]['longname']
testname = args[0]
if args[1]['status'] == 'PASS':
self._results.set_passed(self._get_test_controller(longname,
testname))
else:
self._results.set_failed(self._get_test_controller(longname,
testname))
def _get_test_controller(self, longname, testname = None):
ret = self._project.find_controller_by_longname(longname, testname)
return ret
def clear_server(self):
self._server = None
def shutdown_server(self):
if self._server:
self._server.shutdown()
def test_execution_started(self):
self._results.test_execution_started()
def kill_process(self):
if self._process:
self._process.kill(force=True)
def set_pause_on_failure(self, pause):
self._pause_on_failure = pause
self._send_pause_on_failure_information()
def _send_pause_on_failure_information(self):
if self._process:
self._process.pause_on_failure(self._pause_on_failure)
def send_stop_signal(self):
if self._process:
self._process.kill(killer_pid=self._pid_to_kill)
def send_pause_signal(self):
if self._process:
self._process.pause()
def send_continue_signal(self):
if self._process:
self._process.resume()
def send_step_next_signal(self):
if self._process:
self._process.step_next()
def send_step_over_signal(self):
if self._process:
self._process.step_over()
def run_command(self, command, cwd):
self._pid_to_kill = None
self._process = Process(cwd)
self._process.run_command(command)
def get_command(self, profile, pythonpath, monitor_width, names_to_run):
'''Return the command (as a list) used to run the test'''
command = profile.get_command_prefix()[:]
argfile = os.path.join(self._output_dir, "argfile.txt")
command.extend(["--argumentfile", argfile])
command.extend(["--listener", self._get_listener_to_cmd()])
command.append(self._get_suite_source_for_command())
self._write_argfile(argfile, self._create_standard_args(
command, profile, pythonpath, monitor_width, names_to_run))
return command
@staticmethod
def get_message_log_level(command):
min_log_level_number = LEVELS['INFO']
if '-L' in command:
switch = '-L'
elif '--loglevel' in command:
switch = '--loglevel'
else:
return min_log_level_number
i = command.index(switch)
if len(command) == i:
return
level = command[i+1].upper().split(':')[0]
return LEVELS.get(level, min_log_level_number)
def _get_listener_to_cmd(self):
path = os.path.abspath(TestRunnerAgent.__file__)
if path[-1] in ['c', 'o']:
path = path[:-1]
return '%s:%s:%s' % (path, self.port, self._pause_on_failure)
def _get_suite_source_for_command(self):
cur = os.path.abspath(os.path.curdir)
source = os.path.abspath(self._project.suite.source)
if not utils.is_same_drive(cur, source):
return source
return os.path.abspath(self._project.suite.source)
def _create_standard_args(
self, command, profile, pythonpath, monitor_width, names_to_run):
standard_args = []
standard_args.extend(profile.get_custom_args())
self._add_tmp_outputdir_if_not_given_by_user(command, standard_args)
self._add_pythonpath_if_in_settings_and_not_given_by_user(
command, standard_args, pythonpath)
standard_args.extend(["--monitorcolors", "off"])
standard_args.extend(["--monitorwidth", monitor_width])
for suite, test in names_to_run:
standard_args += ['--suite', suite, '--test', test]
return standard_args
def _add_tmp_outputdir_if_not_given_by_user(self, command, standard_args):
if "--outputdir" not in command and "-d" not in command:
standard_args.extend(["--outputdir", self._output_dir])
@staticmethod
def _add_pythonpath_if_in_settings_and_not_given_by_user(
command, standard_args, pythonpath):
if '--pythonpath' in command:
return
if '-P' in command:
return
if not pythonpath:
return
standard_args.extend(['--pythonpath', ':'.join(pythonpath)])
@staticmethod
def _write_argfile(argfile, args):
f = codecs.open(argfile, "w", "utf-8")
f.write("\n".join(args))
f.close()
def get_output_and_errors(self, profile):
stdout, stderr, returncode = self._process.get_output(), \
self._process.get_errors(), self._process.get_returncode()
error, log_message = profile.format_error(stderr, returncode)
return stdout, error, log_message
def is_running(self):
return self._process and self._process.is_alive()
def command_ended(self):
self._process = None
class Process(object):
def __init__(self, cwd):
self._process = None
self._error_stream = None
self._output_stream = None
self._cwd = cwd
self._port = None
self._sock = None
self._kill_called = False
def run_command(self, command):
# We need to supply stdin for subprocess, because otherways in pythonw
# subprocess will try using sys.stdin which causes an error in windows
subprocess_args = dict(bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=self._cwd.encode(SYSTEM_ENCODING))
if IS_WINDOWS:
startupinfo = subprocess.STARTUPINFO()
try:
import _subprocess
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
except ImportError:
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess_args['startupinfo'] = startupinfo
else:
subprocess_args['preexec_fn'] = os.setsid
subprocess_args['shell'] = True
self._process = subprocess.Popen(command.encode(SYSTEM_ENCODING),
**subprocess_args)
self._process.stdin.close()
self._output_stream = StreamReaderThread(self._process.stdout)
self._error_stream = StreamReaderThread(self._process.stderr)
self._output_stream.run()
self._error_stream.run()
self._kill_called = False
def set_port(self, port):
self._port = port
def get_output(self):
return self._output_stream.pop()
def get_errors(self):
return self._error_stream.pop()
def get_returncode(self):
return self._process.returncode
def is_alive(self):
return self._process.poll() is None
def wait(self):
self._process.wait()
def kill(self, force=False, killer_pid=None):
if not self._process:
return
if force:
self._process.kill()
self.resume() # Send so that RF is not blocked
if IS_WINDOWS and not self._kill_called and self._port is not None:
self._signal_kill_with_listener_server()
self._kill_called = True
else:
self._kill(killer_pid or self._process.pid)
def _signal_kill_with_listener_server(self):
self._send_socket('kill')
def _send_socket(self, data):
if self._port is None:
return # Silent failure..
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', self._port))
sock.send(data)
finally:
sock.close()
def pause(self):
self._send_socket('pause')
def pause_on_failure(self, pause):
if pause:
self._send_socket('pause_on_failure')
else:
self._send_socket('do_not_pause_on_failure')
def resume(self):
self._send_socket('resume')
def step_next(self):
self._send_socket('step_next')
def step_over(self):
self._send_socket('step_over')
def _kill(self, pid):
if pid:
try:
if os.name == 'nt' and sys.version_info < (2,7):
import ctypes
ctypes.windll.kernel32.TerminateProcess(
int(self._process._handle), -1)
else:
os.kill(pid, signal.SIGINT)
except OSError:
pass
class StreamReaderThread(object):
def __init__(self, stream):
self._queue = Queue()
self._thread = None
self._stream = stream
def run(self):
self._thread = threading.Thread(target=self._enqueue_output,
args=(self._stream,))
self._thread.daemon = True
self._thread.start()
def _enqueue_output(self, out):
for line in iter(out.readline, b''):
self._queue.put(line)
def pop(self):
result = ""
for _ in xrange(self._queue.qsize()):
try:
result += self._queue.get_nowait()
except Empty:
pass
return result.decode('UTF-8')
# The following two classes implement a small line-buffered socket
# server. It is designed to run in a separate thread, read data
# from the given port and update the UI -- hopefully all in a
# thread-safe manner.
class RideListenerServer(SocketServer.TCPServer):
"""Implements a simple line-buffered socket server"""
allow_reuse_address = True
def __init__(self, RequestHandlerClass, callback):
SocketServer.TCPServer.__init__(self, ("",0), RequestHandlerClass)
self.callback = callback
class RideListenerHandler(SocketServer.StreamRequestHandler):
def handle(self):
decoder = TestRunnerAgent.StreamHandler(self.request.makefile('r'))
while True:
try:
(name, args) = decoder.load()
self.server.callback(name, *args)
except (EOFError, IOError):
# I should log this...
break
|
caio2k/RIDE
|
src/robotide/contrib/testrunner/testrunner.py
|
Python
|
apache-2.0
| 14,659
|
"""
Illustrates how to attach events to all instrumented attributes
and listen for change events.
"""
from sqlalchemy import event, orm
def configure_listener(class_, key, inst):
def append(instance, value, initiator):
instance.receive_change_event("append", key, value, None)
def remove(instance, value, initiator):
instance.receive_change_event("remove", key, value, None)
def set_(instance, value, oldvalue, initiator):
instance.receive_change_event("set", key, value, oldvalue)
event.listen(inst, 'append', append)
event.listen(inst, 'remove', remove)
event.listen(inst, 'set', set_)
if __name__ == '__main__':
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
class Base(object):
def receive_change_event(self, verb, key, value, oldvalue):
s = "Value '%s' %s on attribute '%s', " % (value, verb, key)
if oldvalue:
s += "which replaced the value '%s', " % oldvalue
s += "on object %s" % self
print s
Base = declarative_base(cls=Base)
event.listen(Base, 'attribute_instrument', configure_listener)
class MyMappedClass(Base):
__tablename__ = "mytable"
id = Column(Integer, primary_key=True)
data = Column(String(50))
related_id = Column(Integer, ForeignKey("related.id"))
related = relationship("Related", backref="mapped")
def __str__(self):
return "MyMappedClass(data=%r)" % self.data
class Related(Base):
__tablename__ = "related"
id = Column(Integer, primary_key=True)
data = Column(String(50))
def __str__(self):
return "Related(data=%r)" % self.data
# classes are instrumented. Demonstrate the events !
m1 = MyMappedClass(data='m1', related=Related(data='r1'))
m1.data = 'm1mod'
m1.related.mapped.append(MyMappedClass(data='m2'))
del m1.data
|
ioram7/keystone-federado-pgid2013
|
build/sqlalchemy/examples/custom_attributes/listen_for_events.py
|
Python
|
apache-2.0
| 2,055
|
from __future__ import absolute_import
import six
import logging
logger = logging.getLogger(__name__)
from .. import py3_errmsg
try:
import enaml
except ImportError:
if six.PY3:
logger.exception(py3_errmsg)
else:
raise
else:
with enaml.imports():
from .view import PlotView, PlotControls
from .model import ScalarCollection
|
NSLS-II/replay
|
replay/scalar/__init__.py
|
Python
|
bsd-3-clause
| 370
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from miasm2.expression.expression import *
from miasm2.arch.msp430.regs import *
from miasm2.arch.msp430.arch import mn_msp430
from miasm2.ir.ir import ir
# Utils
def hex2bcd(val):
"Return val as BCD"
try:
return int("%x" % val, 10)
except ValueError:
raise NotImplementedError("Not defined behaviour")
def bcd2hex(val):
"Return the hex value of a BCD"
try:
return int("0x%d" % val, 16)
except ValueError:
raise NotImplementedError("Not defined behaviour")
def reset_sr_res():
return [ExprAff(res, ExprInt_fromsize(7, 0))]
def update_flag_zf(a):
return [ExprAff(zf, ExprCond(a, ExprInt_from(zf, 0), ExprInt_from(zf, 1)))]
def update_flag_nf(a):
return [ExprAff(nf, a.msb())]
def update_flag_pf(a):
return [ExprAff(pf, ExprOp('parity', a & ExprInt_from(a, 0xFF)))]
def update_flag_cf_inv_zf(a):
return [ExprAff(cf, ExprCond(a, ExprInt_from(cf, 1), ExprInt_from(cf, 0)))]
def update_flag_zn_r(a):
e = []
e += update_flag_zf(a)
e += update_flag_nf(a)
e += reset_sr_res()
return e
def update_flag_sub_cf(a, b, c):
return [ExprAff(cf,
((((a ^ b) ^ c) ^ ((a ^ c) & (a ^ b))).msb()) ^ ExprInt1(1))]
def update_flag_add_cf(a, b, c):
return [ExprAff(cf, (((a ^ b) ^ c) ^ ((a ^ c) & (~(a ^ b)))).msb())]
def update_flag_add_of(a, b, c):
return [ExprAff(of, (((a ^ c) & (~(a ^ b)))).msb())]
def update_flag_sub_of(a, b, c):
return [ExprAff(of, (((a ^ c) & (a ^ b))).msb())]
def mng_autoinc(a, b, size):
e = []
if not (isinstance(a, ExprOp) and a.op == "autoinc"):
return e, a, b
a_r = a.args[0]
e.append(ExprAff(a_r, a_r + ExprInt_from(a_r, size / 8)))
a = ExprMem(a_r, size)
if isinstance(b, ExprMem) and a_r in b.arg:
b = ExprMem(b.arg + ExprInt16(size / 8), b.size)
return e, a, b
# Mnemonics
def mov_b(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 8)
if isinstance(b, ExprMem):
b = ExprMem(b.arg, 8)
a = a[:8]
else:
a = a[:8].zeroExtend(16)
e.append(ExprAff(b, a))
return e, []
def mov_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
e.append(ExprAff(b, a))
if b == ir.pc:
e.append(ExprAff(ir.IRDst, a))
return e, []
def and_b(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 8)
c = a[:8] & b[:8]
e.append(ExprAff(b, c.zeroExtend(16)))
e += update_flag_zn_r(c)
e += update_flag_cf_inv_zf(c)
e += [ExprAff(of, ExprInt1(0))]
return e, []
def and_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
c = a & b
e.append(ExprAff(b, c))
e += update_flag_zn_r(c)
e += update_flag_cf_inv_zf(c)
e += [ExprAff(of, ExprInt1(0))]
return e, []
def bic_b(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 8)
c = (a[:8] ^ ExprInt8(0xff)) & b[:8]
c = c.zeroExtend(b.size)
e.append(ExprAff(b, c))
return e, []
def bic_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
c = (a ^ ExprInt16(0xffff)) & b
e.append(ExprAff(b, c))
return e, []
def bis_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
c = a | b
e.append(ExprAff(b, c))
return e, []
def bit_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
c = a & b
e += update_flag_zn_r(c)
e += update_flag_cf_inv_zf(c)
e.append(ExprAff(of, ExprInt1(0)))
return e, []
"""
def sub_b(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 8)
c = b - a
e.append(ExprAff(b, c))
e += update_flag_zn_r(c)
e += update_flag_sub_cf(b, a, c)
return None, e, []
"""
def sub_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
c = b - a
e.append(ExprAff(b, c))
e += update_flag_zn_r(c)
e += update_flag_sub_cf(b, a, c)
# micrcorruption
# e += update_flag_sub_of(a, b, c)
# e += update_flag_sub_of(b, a, c)
return e, []
def add_b(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 8)
if isinstance(b, ExprMem):
b = ExprMem(b.arg, 8)
else:
b = b[:8]
a = a[:8]
c = b + a
e.append(ExprAff(b, c))
e += update_flag_zn_r(c)
e += update_flag_add_cf(a, b, c)
e += update_flag_add_of(a, b, c)
return e, []
def add_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
c = b + a
e.append(ExprAff(b, c))
e += update_flag_zn_r(c)
e += update_flag_add_cf(a, b, c)
e += update_flag_add_of(a, b, c)
return e, []
def dadd_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
# TODO: microcorruption no carryflag
c = ExprOp("bcdadd", b, a) # +zeroExtend(cf, 16))
e.append(ExprAff(b, c))
# e += update_flag_zn_r(c)
# micrcorruption
e += update_flag_zf(a)
# e += update_flag_nf(a)
e += reset_sr_res()
e.append(ExprAff(cf, ExprOp("bcdadd_cf", b, a))) # +zeroExtend(cf, 16))))
# of : undefined
return e, []
def xor_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
c = b ^ a
e.append(ExprAff(b, c))
e += update_flag_zn_r(c)
e += update_flag_cf_inv_zf(c)
e.append(ExprAff(of, b.msb() & a.msb()))
return e, []
def push_w(ir, instr, a):
e = []
e.append(ExprAff(ExprMem(SP - ExprInt16(2), 16), a))
e.append(ExprAff(SP, SP - ExprInt16(2)))
return e, []
def call(ir, instr, a):
e, a, dummy = mng_autoinc(a, None, 16)
n = ExprId(ir.get_next_label(instr), 16)
e.append(ExprAff(ExprMem(SP - ExprInt16(2), 16), n))
e.append(ExprAff(SP, SP - ExprInt16(2)))
e.append(ExprAff(PC, a))
e.append(ExprAff(ir.IRDst, a))
return e, []
def swpb(ir, instr, a):
e = []
x, y = a[:8], a[8:16]
e.append(ExprAff(a, ExprCompose([(y, 0, 8),
(x, 8, 16)])))
return e, []
def cmp_w(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 16)
c = b - a
e += update_flag_zn_r(c)
e += update_flag_sub_cf(b, a, c)
e += update_flag_sub_of(b, a, c)
return e, []
def cmp_b(ir, instr, a, b):
e, a, b = mng_autoinc(a, b, 8)
c = b[:8] - a[:8]
e += update_flag_zn_r(c)
e += update_flag_sub_cf(b[:8], a[:8], c)
e += update_flag_sub_of(b[:8], a[:8], c)
return e, []
def jz(ir, instr, a):
n = ExprId(ir.get_next_label(instr), 16)
e = []
e.append(ExprAff(PC, ExprCond(zf, a, n)))
e.append(ExprAff(ir.IRDst, ExprCond(zf, a, n)))
return e, []
def jnz(ir, instr, a):
n = ExprId(ir.get_next_label(instr), 16)
e = []
e.append(ExprAff(PC, ExprCond(zf, n, a)))
e.append(ExprAff(ir.IRDst, ExprCond(zf, n, a)))
return e, []
def jl(ir, instr, a):
n = ExprId(ir.get_next_label(instr), 16)
e = []
e.append(ExprAff(PC, ExprCond(nf ^ of, a, n)))
e.append(ExprAff(ir.IRDst, ExprCond(nf ^ of, a, n)))
return e, []
def jc(ir, instr, a):
n = ExprId(ir.get_next_label(instr), 16)
e = []
e.append(ExprAff(PC, ExprCond(cf, a, n)))
e.append(ExprAff(ir.IRDst, ExprCond(cf, a, n)))
return e, []
def jnc(ir, instr, a):
n = ExprId(ir.get_next_label(instr), 16)
e = []
e.append(ExprAff(PC, ExprCond(cf, n, a)))
e.append(ExprAff(ir.IRDst, ExprCond(cf, n, a)))
return e, []
def jge(ir, instr, a):
n = ExprId(ir.get_next_label(instr), 16)
e = []
e.append(ExprAff(PC, ExprCond(nf ^ of, n, a)))
e.append(ExprAff(ir.IRDst, ExprCond(nf ^ of, n, a)))
return e, []
def jmp(ir, instr, a):
e = []
e.append(ExprAff(PC, a))
e.append(ExprAff(ir.IRDst, a))
return e, []
def rrc_w(ir, instr, a):
e = []
c = ExprCompose([(a[1:16], 0, 15),
(cf, 15, 16)])
e.append(ExprAff(a, c))
e.append(ExprAff(cf, a[:1]))
# e += update_flag_zn_r(c)
# micrcorruption
e += update_flag_zf(a)
# e += update_flag_nf(a)
e += reset_sr_res()
e.append(ExprAff(of, ExprInt1(0)))
return e, []
def rra_w(ir, instr, a):
e = []
c = ExprCompose([(a[1:16], 0, 15),
(a[15:16], 15, 16)])
e.append(ExprAff(a, c))
# TODO: error in disasm microcorruption?
# e.append(ExprAff(cf, a[:1]))
# e += update_flag_zn_r(c)
# micrcorruption
e += update_flag_zf(a)
# e += update_flag_nf(a)
e += reset_sr_res()
e.append(ExprAff(of, ExprInt1(0)))
return e, []
def sxt(ir, instr, a):
e = []
c = a[:8].signExtend(16)
e.append(ExprAff(a, c))
e += update_flag_zn_r(c)
e += update_flag_cf_inv_zf(c)
e.append(ExprAff(of, ExprInt1(0)))
return e, []
mnemo_func = {
"mov.b": mov_b,
"mov.w": mov_w,
"and.b": and_b,
"and.w": and_w,
"bic.b": bic_b,
"bic.w": bic_w,
"bis.w": bis_w,
"bit.w": bit_w,
"sub.w": sub_w,
"add.b": add_b,
"add.w": add_w,
"push.w": push_w,
"dadd.w": dadd_w,
"xor.w": xor_w,
"call": call,
"swpb": swpb,
"cmp.w": cmp_w,
"cmp.b": cmp_b,
"jz": jz,
"jnz": jnz,
"jl": jl,
"jc": jc,
"jnc": jnc,
"jmp": jmp,
"jge": jge,
"rrc.w": rrc_w,
"rra.w": rra_w,
"sxt": sxt,
}
composed_sr = ExprCompose([
(cf, 0, 1),
(zf, 1, 2),
(nf, 2, 3),
(gie, 3, 4),
(cpuoff, 4, 5),
(osc, 5, 6),
(scg0, 6, 7),
(scg1, 7, 8),
(of, 8, 9),
(res, 9, 16),
])
def ComposeExprAff(dst, src):
e = []
for x, start, stop in dst.args:
e.append(ExprAff(x, src[start:stop]))
return e
class ir_msp430(ir):
def __init__(self, symbol_pool=None):
ir.__init__(self, mn_msp430, None, symbol_pool)
self.pc = PC
self.sp = SP
self.IRDst = ExprId('IRDst', 16)
def mod_pc(self, instr, instr_ir, extra_ir):
pass
def get_ir(self, instr):
# print instr#, args
args = instr.args
instr_ir, extra_ir = mnemo_func[instr.name](self, instr, *args)
self.mod_sr(instr, instr_ir, extra_ir)
return instr_ir, extra_ir
def mod_sr(self, instr, instr_ir, extra_ir):
for i, x in enumerate(instr_ir):
x = ExprAff(x.dst, x.src.replace_expr({SR: composed_sr}))
instr_ir[i] = x
if x.dst != SR:
continue
xx = ComposeExprAff(composed_sr, x.src)
instr_ir[i:i+1] = xx
for i, x in enumerate(instr_ir):
x = ExprAff(x.dst, x.src.replace_expr(
{self.pc: ExprInt16(instr.offset + instr.l)}))
instr_ir[i] = x
if extra_ir:
raise NotImplementedError('not fully functional')
|
kod3r/miasm
|
miasm2/arch/msp430/sem.py
|
Python
|
gpl-2.0
| 10,533
|
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.common import ovs_lib
from neutron.agent.ovsdb import api
from neutron.agent.ovsdb import impl_idl
from neutron.tests import base as test_base
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
# NOTE(twilson) functools.partial does not work for this
def trpatch(*args, **kwargs):
def wrapped(fn):
return mock.patch.object(impl_idl.NeutronOVSDBTransaction,
*args, **kwargs)(fn)
return wrapped
class ImplIdlTestCase(base.BaseSudoTestCase):
def setUp(self):
super(ImplIdlTestCase, self).setUp()
self.config(group='OVS', ovsdb_interface='native')
self.ovs = ovs_lib.BaseOVS()
self.brname = test_base.get_rand_device_name(net_helpers.BR_PREFIX)
# Make sure exceptions pass through by calling do_post_commit directly
mock.patch.object(
impl_idl.NeutronOVSDBTransaction, "post_commit",
side_effect=impl_idl.NeutronOVSDBTransaction.do_post_commit,
autospec=True).start()
def _add_br(self):
# NOTE(twilson) we will be raising exceptions with add_br, so schedule
# cleanup before that.
self.addCleanup(self.ovs.delete_bridge, self.brname)
ovsdb = self.ovs.ovsdb
with ovsdb.transaction(check_error=True) as tr:
tr.add(ovsdb.add_br(self.brname))
return tr
def _add_br_and_test(self):
self._add_br()
ofport = self.ovs.db_get_val("Interface", self.brname, "ofport")
self.assertTrue(int(ofport))
self.assertTrue(ofport > -1)
def test_post_commit_vswitchd_completed_no_failures(self):
self._add_br_and_test()
@trpatch("vswitchd_has_completed", return_value=True)
@trpatch("post_commit_failed_interfaces", return_value=["failed_if1"])
@trpatch("timeout_exceeded", return_value=False)
def test_post_commit_vswitchd_completed_failures(self, *args):
self.assertRaises(impl_idl.VswitchdInterfaceAddException, self._add_br)
@trpatch("vswitchd_has_completed", return_value=False)
def test_post_commit_vswitchd_incomplete_timeout(self, *args):
# Due to timing issues we may rarely hit the global timeout, which
# raises RuntimeError to match the vsctl implementation
self.ovs.vsctl_timeout = 3
self.assertRaises((api.TimeoutException, RuntimeError), self._add_br)
|
igor-toga/local-snat
|
neutron/tests/functional/agent/ovsdb/test_impl_idl.py
|
Python
|
apache-2.0
| 3,064
|
import sublime
import sublime_plugin
import os
import subprocess
# 定义【Subl】命令
class SublCommand(sublime_plugin.WindowCommand):
# 执行【Subl】命令
def run(self, args = []):
# 替换参数中的环境变量
env = self.window.extract_variables()
args = [sublime.expand_variables(x, env) for x in args]
# 获取【sublime】执行路径
executable_path = sublime.executable_path()
# 获取【OSX】下的【subl】目录
if sublime.platform() == 'osx':
app_path = executable_path[:executable_path.rfind(".app/") + 5]
executable_path = app_path + "Contents/SharedSupport/bin/subl"
# 运行【subl】命令
subprocess.Popen([executable_path] + args)
# 修复在【Windows】下窗口推动焦点
if sublime.platform() == "windows":
def fix_focus():
window = sublime.active_window()
view = window.active_view()
window.run_command('focus_neighboring_group')
window.focus_view(view)
sublime.set_timeout(fix_focus, 300)
|
edonet/package
|
Erik/subl_command.py
|
Python
|
isc
| 1,145
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Proyecto_Merka.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
AngelValera/proyectoIV
|
manage.py
|
Python
|
mit
| 257
|
from plumbum import local
import benchbuild as bb
from benchbuild.environments.domain.declarative import ContainerImage
from benchbuild.source import HTTP
from benchbuild.utils.cmd import make, tar
class LibreSSL(bb.Project):
""" OpenSSL """
NAME = 'libressl'
DOMAIN = 'encryption'
GROUP = 'benchbuild'
BINARIES = [
"aeadtest", "aes_wrap", "asn1test", "base64test", "bftest", "bntest",
"bytestringtest", "casttest", "chachatest", "cipherstest", "cts128test",
"destest", "dhtest", "dsatest", "ecdhtest", "ecdsatest", "ectest",
"enginetest", "evptest", "exptest", "gcm128test", "gost2814789t",
"hmactest", "ideatest", "igetest", "md4test", "md5test", "mdc2test",
"mont", "pbkdf2", "pkcs7test", "poly1305test", "pq_test", "randtest",
"rc2test", "rc4test", "rmdtest", "sha1test", "sha256test", "sha512test",
"shatest", "ssltest", "timingsafe", "utf8test"
]
SOURCE = [
HTTP(
remote={
'2.1.6.': (
'http://ftp.openbsd.org/pub/OpenBSD/LibreSSL/'
'libressl-2.1.6.tar.gz'
)
},
local='libressl.tar.gz'
)
]
CONTAINER = ContainerImage().from_('benchbuild:alpine')
def compile(self):
libressl_source = local.path(self.source_of('libressl.tar.gz'))
libressl_version = self.version_of('libressl.tar.gz')
self.cflags += ["-fPIC"]
clang = bb.compiler.cc(self)
tar("xfz", libressl_source)
unpack_dir = local.path(f'libressl-{libressl_version}')
configure = local[unpack_dir / "configure"]
_configure = bb.watch(configure)
_make = bb.watch(make)
with local.cwd(unpack_dir):
with local.env(CC=str(clang)):
_configure(
"--disable-asm", "--disable-shared", "--enable-static",
"--disable-dependency-tracking", "--with-pic=yes"
)
_make("-j8")
make_tests = make["-Ctests", "-j8"]
_make_tests = bb.watch(make_tests)
_make_tests(LibreSSL.BINARIES)
def run_tests(self):
libressl_version = self.version_of('libressl.tar.gz')
unpack_dir = local.path(f'libressl-{libressl_version}')
with local.cwd(unpack_dir / "tests"):
for binary in LibreSSL.BINARIES:
bb.wrap(local.cwd / binary, self)
with local.cwd(unpack_dir):
_make = bb.watch(make)
_make("V=1", "check", "-i")
|
PolyJIT/benchbuild
|
benchbuild/projects/benchbuild/openssl.py
|
Python
|
mit
| 2,575
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 7 11:13:26 2016
@author: eman
"""
from __future__ import division
from __future__ import absolute_import
import numpy as np
from scipy import linalg
from scipy.sparse import coo_matrix, csr_matrix, block_diag, vstack
from manifold_learning.se import sim_potential
from utils.graph_construction import create_feature_mat, compute_adjacency, \
create_laplacian
from utils.eigenvalue_decomposition import EigSolver
class ManifoldAlignment(object):
""" Manifold Alignment for aligning two heterogenous datasets
Parameters (TODO)
----------
Returns (TODO)
-------
References (TODO)
----------
* Dr. Chang Wang and his collaborators
https://sites.google.com/site/changwangnk/home/ma-html
* Dr. Devis Tuia and his collaborators
http://isp.uv.es/code/ssma.htm
* Dr. Czaja and his collaborators
http://arxiv.org/pdf/1102.4086.pdf
http://goo.gl/Gd5HxU
"""
def __init__(self,
# eigenvalue solver initials
eig_solver = 'arpack',
norm_lap = False,
tol = 1E-12,
# eigenvalue tuner initials
feature = False,
alpha = 17.78,
mu = 1.0,
beta = 1.0,
# knn problem initials
n_neighbors = 2,
nn_algo = 'brute',
metric = 'euclidean',
n_jobs = 1,
affinity = 'nearest_neighbors',
weight = 'heat',
gamma = 1.0,
trees = 10,
# potential matrix initials
normalization = 'dis',
sp_neighbors = 4,
sp_affinity = 'heat',
eta = 1.0,
# manifold alignment problem parameters
ma_method = 'wang11',
n_components = 2,
# general problem parameters
sparse_mat = False,
lap_method = 'personal',
random_state=0):
# eigenvalue solver initialization
self.n_components = n_components
self.eig_solver = eig_solver
self.norm_lap = norm_lap
self.tol = tol
# eigenvalue tuner initialization
self.feature = feature
self.normalization = normalization
self.alpha = alpha
self.beta = beta
self.mu = mu
# knn problem initialization
self.n_neighbors = n_neighbors
self.nn_algo = nn_algo
self.weight = weight
self.metric = metric
self.n_jobs = n_jobs
self.affinity = affinity
self.gamma = gamma
self.trees = trees
# potential matrix initials
self.sp_neighbors = sp_neighbors
self.sp_affinity = sp_affinity
self.eta = eta
# manifold alignment problem initialization
self.ma_method = ma_method
# general problem parameters
self.sparse_mat = sparse_mat
self.lap_method = lap_method
self.random_state = random_state
def fit(self, X, Y):
"""Fit function for the alignment scheme. This is where I
construct all of the necessary matrices
"""
# check inputs for specified MA class criteria
# TODO: check X, Y input as dictionary class
# TODO: check X, Y keys as list class
# TODO: check X, Y keys lists are nonempty
# TODO: check X, Y for specific keys and nothing else
# extract the data from the x, y data inputs
self.X_data_, X_datasets, \
self.Y_data_, Y_datasets \
= self._data(X, Y)
# compute the geometric laplacian term
L_g, D_g, W_g, W_datasets = self._compute_weights(X_datasets)
# find the approapriate potential matrices
V_s, V_d = self._compute_potential()
_, self.embedding_ = self._spectral_embedding()
return self
def transform(self, X, n_components=None):
# find the projection functions
self.eigVecs_ = self._find_projections(X['label'],
n_components=n_components)
Xproj = {}
# project the training samples w/ new embedding
Xproj['train'] = self._project_data(X['label'])
# project test samples with new embedding
Xproj['test'] = self._project_data(X['test'])
# project data
return Xproj
def _find_projections(self, X, n_components=None):
# find the number of projected components
if n_components:
proj_components = n_components
else:
proj_components = self.n_components
# get the eigenvectors associated with each dataset
E = []
start_idx = 0
for end_idx in self.d_dims_['dataset']:
end_idx += start_idx
E.append(self.embedding_[start_idx:end_idx,:proj_components+1])
start_idx += end_idx
# return a list of projection functions
return E
def _project_data(self, X):
return [np.dot(XeigVec.T, Xdata.T).T for Xdata, XeigVec \
in zip(X,self.eigVecs_)]
def _spectral_embedding(self):
# TODO: create feature-based implementation
# tune the eigenvalue problem
A, B = self._embedding_tuner()
# Fit the parameters to solve the generalized eigenvalue problem
eig_model = EigSolver(n_components=self.n_components,
eig_solver=self.eig_solver,
sparse=self.sparse_mat,
tol=self.tol,
norm_laplace=self.norm_lap)
if not self.sparse_mat:
return eig_model.find_eig(A=A.toarray(), B=B.toarray())
else:
return eig_model.find_eig(A=A, B=B)
def _embedding_tuner(self):
if self.ma_method == 'wang11':
A = self.L_g + self.mu * self.V_s
B = self.V_d
elif self.ma_method in ['wang']:
A = self.L_g + self.mu * self.V_s
B = self.D_g
elif self.ma_method in ['ssma']:
A = self.mu * self.L_g + (1- self.mu) * self.V_s
B = self.V_d
elif self.ma_method in ['sema']:
A = self.L_g + self.mu * self.V_s
B = self.D_g + self.V_d
else:
raise ValueError('Unrecognized Manifold Alignment problem.')
return create_feature_mat(self.X_data_, A, sparse=True), \
create_feature_mat(self.X_data_, B, sparse=True)
def _data(self, X, Y):
# get dimensions of the input variables
self.n_samples_, self.d_dims_ = self._get_dimensions(X, Y)
# create X['dataset'] key with stacks of labeled and
# unlabeled data
# vertically concatenate labeled and unlabeled data
X_datasets = create_stacks(X['label'], X['unlabel'])
self.X_train = X['label']
# find the similarity and dissimilarity matrices
#TODO: figure out how to stack a list of sparse objects
# on a list of arrays. (eg label (2xmxn), unlabel (2))
Y_unlabel = create_unlabeled(X['unlabel'], sparse=None)
Y_datasets = create_stacks(Y['label'], Y_unlabel)
# create block diagonal matrices from datasets
# TODO: create feature-based option
return block_diag(X_datasets), X_datasets, \
coo_matrix(np.vstack(Y_datasets)), Y_datasets
def _compute_potential(self,
X=None,
X_datasets=None,
X_datasets_imgs=None):
# construct the similarity and dissimilarity potential matrix
if self.ma_method in ['wang', 'ssma', 'wang11', 'kema', 'sema']:
# compute the similarity potential
self.V_s, _ = sim_potential(self.Y_data_,
potential='sim',
norm_lap=self.norm_lap,
method=self.lap_method,
sparse_mat=self.sparse_mat)
# compute the dissimilarity potential
self.V_d, _ = sim_potential(self.Y_data_,
potential='dis',
norm_lap=self.norm_lap,
method=self.lap_method,
sparse_mat=self.sparse_mat)
# TODO: create spatial-spectral potential
'''Any method w/ Spatial-Spectral Potential
will have to loop through each of the laplacian
datasets and construct spatial-spectral terms for each
individually. Then block them all together.
Parameters - X, X_datasets, X_images
'''
else:
raise ValueError('Unrecognized manifold alignment method.')
return self.V_s, self.V_d
def _adjacency_blocks(self, X):
"""Create adjacency block matrices from adjacency matrices
Parameters
----------
X - a k list of (n x m) dense array entries
Returns
-------
W - a sparse diagonal (n*k) x (n*k) matrix
W_datasets - a list of sparse (n x m) adjacency matrix entries
"""
# create adjacency matrices
W_datasets = [compute_adjacency(dataset,
n_neighbors=self.n_neighbors,
affinity=self.affinity,
weight=self.weight,
sparse=self.sparse_mat,
neighbors_algorithm=self.nn_algo,
metric=self.metric,
trees=self.trees,
gamma=self.gamma) \
for dataset in X]
''' TODO: list comprehensions of list comprehensions for different
values in the adjacency matrix. e.g. different weight, affinity,
n_neighbors, metric, trees, gamma, algorithm for each matrix.
'''
# return
return block_diag(W_datasets), W_datasets
def _laplacian_blocks(self, Adjacency):
"""Creates block diagonal matrices from adjacency matrices
Parameters:
-----------
W_datasets - a list of k (n x n) sparse matrices
Returns
-------
L - a sparse (n*k x n*k) Laplacian matrix
L_datasets - a list of k (n x n) sparse Laplacian matrices
D - a sparse (n*k x n*k) Diagonal matrix
D_datasets - a list of k (n x n) sparse Diagonal matrices
References
----------
1) Clever trick with the dual output list comprehension
stackoverflow:
http://goo.gl/ojpRQg
It don't think it's faster to compute the lists separately
because I'm doing some heavier calculations behind the scenes
as compared to actual number creating.
"""
L_datasets, D_datasets = zip(*[create_laplacian(dataset,
norm_lap=self.norm_lap,
method=self.lap_method,
sparse=self.sparse_mat) \
for dataset in Adjacency])
return block_diag(L_datasets), L_datasets, \
block_diag(D_datasets), D_datasets
def _compute_weights(self, X_datasets):
""" Creates the Weighted adjacency matrix
"""
# create adjacency block matrices
# compute the geometric adjacency matrix from the data
self.W_g, W_datasets = self._adjacency_blocks(X_datasets)
# create Laplacian and Degree block matrices
self.L_g, L_datasets, self.D_g, D_datasets \
= self._laplacian_blocks(W_datasets)
return self.L_g, self.D_g, self.W_g, W_datasets
def _get_dimensions(self, X, Y):
"""This function will extract the number of samples and number of
dimensions from the X and Y values.
Parameters:
----------
X - dictionary of values
e.g. X['labeled'] -> a list of values
X['unlabeled'] -> a list of values
Returns
-------
n_samples : dictionary, keys - 'label', 'unlabel', 'data', 'dataset'
"""
# initialize data structures for saving the variables
n_samples = {}; d_dims = {}
n_samples['label'] = []; d_dims['label'] = []
n_samples['unlabel'] = []; d_dims['unlabel'] = []
n_samples['dataset'] = []; d_dims['dataset'] = []
n_samples['test'] = []; d_dims['test'] = []
n_samples['data'] = []; d_dims['data'] = []
for labeled_data, unlabeled_data, test_data in zip(X['label'],
X['unlabel'],
X['test']):
# grab dimensions from labeled data strucutres
n_labeled, d_labeled = np.shape(labeled_data)
# save values
n_samples['label'].append(n_labeled)
d_dims['label'].append(d_labeled)
# grab dimensions from unlabeled data structures
n_unlabeled, d_unlabeled = np.shape(unlabeled_data)
# save values
n_samples['unlabel'].append(n_unlabeled)
d_dims['unlabel'].append(d_unlabeled)
# save labeled and unlabeled values
n_samples['dataset'].append(n_labeled+n_unlabeled)
d_dims['dataset'].append(d_labeled)
# grab dimensions from unlabeled data structures
n_test, d_test = np.shape(test_data)
n_samples['test'].append(n_test)
d_dims['test'].append(d_test)
n_samples['data'] = np.sum(n_samples['dataset'])
d_dims['data'] = np.sum(d_dims['dataset'])
return n_samples, d_dims
# create matrix stacks
def create_stacks(X1, X2, sparse=None):
"""Utility function - horizontally stacks two lists together. Assumes
that the two list entries are (n x m) and (n' x m') where m=m' but not
necessarily n = n'.
Parameters:
----------
X1 - a list of (n x m) entries
X2 - a list of (n' x m) entries
sparse_mat - determines whether the arrays are dense or sparse
(coo). (default=None)
Returns:
--------
X - a list of (n + n') x (m) entries
"""
# create an empty list
if sparse: # sparse arrays creation
X = [vstack([x1, x2], format='coo') for \
x1, x2 in zip(X1, X2)]
else: # dense arrays creation
X = [np.vstack([x1, x2]) for x1, x2 in zip(X1, X2)]
return X
# create labeled data matrix
def create_unlabeled(X, sparse=None):
"""Utility function that creates a vector of all zeros that mimics
the unlabeled samples as a list.
Parameters:
----------
X - a list of (nxm) entries of data points
Returns:
--------
Y - a list of (nxm) entries of zeros
"""
if sparse: # sparse array creation
return [coo_matrix((np.shape(data)[0], 1),
dtype=np.float) for data in X]
else:
return [np.zeros((np.shape(data)[0], 1),
dtype=np.float) for data in X]
|
jejjohnson/manifold_learning
|
src/python/manifold_alignment/ssma.py
|
Python
|
mit
| 15,928
|
"""
NAME:
beta_functions.py
Smaller b values means falloff is slower
PURPOSE:
Contruct Membership Beta Functions (MBFs) parameters for each
hydrometeor type and fuzzy set based on scattering simulations (Dolan and
Rutledge, 2009)
# Basic form of a Membership Beta Function is:
#
# 1
# ---------------------
# beta(x) = | |^b
# 1+ | ((x-m)/a)^2 |
# | |
#
# Where x = input data (Zh, Zdr, etc)
# m = center
# a = width
# b = slope
Anticipate having 6 input data fields and 10 fuzzy sets (HID types)
Input data fields: HID types: Fuzzy set:
------------------- ---------- ----------
Z_h Drizzle 1
Z_dr Rain 2
K_dp Ice Crystals 3
LDR Aggregates 4
rho_hv Wet Snow 5
Temperature Vertical Ice 6
Low Density Graupel 7
High Density Graupel 8
Hail 9
Big Drops 10
ORIGNAL AUTHOR:
Kyle C. Wiens
PRINCIPAL CONTACT:
Brenda Dolan
bdolan@atmos.colostate.edu
ORIGINAL DATE:
26 April 2002.
MODIFICATIONS:
28 April 2002: Changed the fuzzy sets to include vertical ice and
get rid of low/high density snow.
15 May 2002: Changed KDP MBF for vertical ice to be unity from -0.2
to -0.6. Used to be from -0.6 to 0.0. This didn't make much difference.
19 February 2003: Changed MBFs for wet/dry graupel and Wet Snow to
conform to Liu and Chandrasekar (2000). Changed MBF for vertical ice to
conform to Carey and Rutledge (1998).
29 January 2009: Changed the MBFS to the theory-based S-band values. NOTE:
LDR VALUES WERE NOT MODIFIED.
08 February 2012: Changed the categories and the MBF values based on scattering
simulations. NOTE: LDR values from simulations were added. BD
10 June 2012: Adjusted MBFs for performance. BD
25 January 2015: Pythonized (TJL)
05 August 2015: Python 3 compatible (TJL)
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
# Beta function parameters stored in individual CSVs in separate directory
CSV_DIR = os.sep.join([os.path.dirname(__file__),
'beta_function_parameters'])+'/'
################################
# Helper Functions Below
################################
def _get_beta_example(var, mbf_sets, field, i):
"""Internal beta function calculator"""
return 1.0/(1.0 + (((var - mbf_sets[field]['m'][i]) /
mbf_sets[field]['a'][i])**2)**mbf_sets[field]['b'][i])
def plot_beta_functions(mbf_sets, band, temp_factor, pdir='./', n_types=10):
"""
Optional plotting routine that produces basic beta function curves
for every hydrometeor species
mbf_sets = Membership beta function sets
band = 'X', 'C' or 'S'
temp_factor = Factor to modify depth of T effects
fdir = Path to plot locations
n_types = Number of hydrometeor species
"""
label = ['Drizzle', 'Rain', 'Ice_Crystals', 'Aggregates',
'Wet_Snow', 'Vertical_Ice', 'Low-Density_Graupel',
'High-Density_Graupel', 'Hail', 'Big_Drops']
# 'Large_Hail','SmHail_Rain','LgHail_Rain']
# DUMMY ARRAYS TO PLOT OVER
dz = np.arange(701)/10.
zdr = -2.0 + np.arange(801)/100.
kdp = -2.0 + np.arange(901)/100.
ldr = -60.0 + np.arange(5900)/100.
rho_hv = 0.75 + np.arange(2501)/10000.
T = -50. + np.arange(1001)/10.
for i in range(n_types):
dz_beta = _get_beta_example(dz, mbf_sets, 'Zh_set', i)
kdp_beta = _get_beta_example(kdp, mbf_sets, 'Kdp_set', i)
zdr_beta = _get_beta_example(zdr, mbf_sets, 'Zdr_set', i)
ldr_beta = _get_beta_example(ldr, mbf_sets, 'LDR_set', i)
rho_beta = _get_beta_example(rho_hv, mbf_sets, 'rho_set', i)
T_beta = _get_beta_example(T, mbf_sets, 'T_set', i)
fig, ax = plt.subplots(3, 2, figsize=(8, 5))
fig.subplots_adjust(top=0.90, bottom=0.1, left=0.05, right=0.95,
hspace=0.45)
ax = ax.flatten()
fig.suptitle('%s band %s, temp_factor: %.1f' % (band,
label[i], temp_factor))
ax[0].plot(dz, dz_beta, linewidth=2)
ax[0].set_title('dBZ')
ax[1].plot(kdp, kdp_beta, linewidth=2)
ax[1].set_title('Kdp')
ax[2].plot(zdr, zdr_beta, linewidth=2)
ax[2].set_title('Zdr')
ax[3].plot(ldr, ldr_beta, linewidth=2)
ax[3].set_title('LDR')
ax[3].set_ylim(0, 1)
ax[4].plot(rho_hv, rho_beta, linewidth=2)
ax[4].set_title(r'$\rho$$_{hv}$')
ax[5].plot(T, T_beta, linewidth=2)
ax[5].set_title('T')
plt.savefig(pdir+'%s_beta_%s.png' % (label[i], band))
plt.close()
def get_beta_set(filename, factor=1.0):
"""
Function to read the CSVs that contain the beta function parameters for
each variable and frequency band
filename = Name of file
factor = Modifier to effect of variable (normally only used for T)
"""
beta_parameters = pd.read_csv(filename, index_col=0)
set_values = {}
for label in ['m', 'a', 'b']:
set_values[label] = np.array(beta_parameters.loc[:, label]) / factor
return set_values
################################
def get_mbf_sets_summer(use_temp=True, plot_flag=False, n_types=10,
temp_factor=1, band='S', fdir=CSV_DIR, verbose=False,
pdir='./'):
"""
Define structures to hold the beta function parameters for each input
variable and fuzzy set. Valid for summer precip.
HID types: Fuzzy set:
-------------------------------
Drizzle 1
Rain 2
Ice Crystals 3
Aggregates 4
Wet Snow 5
Vertical Ice 6
Low-Density Graupel 7
High-Density Graupel 8
Hail 9
Big Drops 10
Arguments:
use_temp = Flag to create T beta function sets
plot_flag = Flag to turn on optional beta function plots
band = 'C' or 'S'
temp_factor = Factor to modify depth of T effects; > 1 will broaden the
slopes of T MBFs
fdir = Path to CSV locations
n_types = Number of hydrometeor species
verbose = Set to True to get text updates
pdir = Directory to stash beta function plots (if made)
Returns:
mbf_sets = Membership beta function sets
"""
me = '-->get_mbf_sets_summer '
if verbose:
print(me + ' 10 Category Summer %s-band HID' % band)
# Horizontal Reflectivity (Zh)
fname = band + '-band_Reflectivity.csv'
Zh_set = get_beta_set(fdir+fname)
# Differential Reflectivity (Zdr)
fname = band + '-band_Differential_Reflectivity.csv'
Zdr_set = get_beta_set(fdir+fname)
# Specific Differential Phase (Kdp)
fname = band + '-band_Specific_Differential_Phase.csv'
Kdp_set = get_beta_set(fdir+fname)
# Linear depolarization ratio (LDR)
fname = band + '-band_Linear_Depolarization_Ratio.csv'
LDR_set = get_beta_set(fdir+fname)
# Correlation coefficient at zero lag (rho)
fname = band + '-band_Correlation_Coefficient.csv'
rho_set = get_beta_set(fdir+fname)
# Temperature (T)
if use_temp:
fname = band + '-band_Temperature.csv'
T_set = get_beta_set(fdir+fname, factor=temp_factor)
if verbose:
print(me+'Using Temperature in HID')
else:
T_set = None
if verbose:
print(me+'No Temperature used in HID')
# Now populate the full mbf_sets dictionary
mbf_sets = {'Zh_set': Zh_set, 'Zdr_set': Zdr_set, 'Kdp_set': Kdp_set,
'LDR_set': LDR_set, 'rho_set': rho_set, 'T_set': T_set}
if plot_flag:
plot_beta_functions(mbf_sets, band, temp_factor, pdir=pdir,
n_types=n_types)
return mbf_sets
################################
if __name__ == '__main__':
a = cdf_betas_sum(plot_flag=True, use_temp=True, temp_factor=2, band='C')
|
jjhelmus/CSU_RadarTools
|
csu_radartools/beta_functions.py
|
Python
|
gpl-2.0
| 8,691
|
def test_noop_one():
pass
|
jctanner/issuetests
|
tests/test_noop.py
|
Python
|
mit
| 30
|
from django.core.mail import EmailMessage
class SMSMessage(EmailMessage):
"""
https://docs.djangoproject.com/en/1.8/topics/email/#the-emailmessage-class
"""
def __init__(self, body, from_number, to, connection=None):
bcc = None
attachments = None
headers = None
cc = None
reply_to = None
super().__init__(None, body, from_number, to, bcc, connection, attachments, headers, cc, reply_to)
def get_connection(self, fail_silently=False):
from smsish.sms import get_sms_connection as get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
|
RyanBalfanz/django-smsish
|
smsish/sms/message.py
|
Python
|
mit
| 632
|
# coding: utf-8
print "BOM BOOM!"
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.7/Lib/lib2to3/tests/data/bom.py
|
Python
|
mit
| 38
|
#!/usr/bin/env python3
import numpy as np
import scipy
from .Exceptions import TriAllelicError,InfoKeyError
from ponytools.Allele import Allele
def Fst(alt_freq_i,alt_freq_j):
'''
Calculates Fst from alternate allele freqs
'''
if alt_freq_i == alt_freq_j == 1:
return 0
if alt_freq_i == alt_freq_j == 0:
return 0
ref_freq_i = 1 - alt_freq_i
ref_freq_j = 1 - alt_freq_j
# Calculate population heterozygosity
H_i = 2 * alt_freq_i * ref_freq_i
H_j = 2 * alt_freq_j * ref_freq_j
H_s = (H_i + H_j) / 2
# Calculate Average Allele Frequencies
avg_alt_freq = (alt_freq_i + alt_freq_j)/2
avg_ref_freq = (ref_freq_i + ref_freq_j)/2
H_t = 2 * avg_alt_freq * avg_ref_freq
# Return Fst
return (H_t - H_s)/(H_t)
class Variant(object):
genomap = { # shared genomap
'./.' : -1, '0/0' : 0, '0/1' : 1, '1/0' : 1, '1/1' : 2,
-1 : -1, 0 : 0, 1 : 1, 2 : 2,
'.|.' : -1, '0|0' : 0, '0|1' : 1, '1|0' : 1, '1|1' : 2,
}
def __init__(self, chrom, pos, id, ref, alt, qual='.', fltr='PASS', info='.', fmt='GT:GQ', genos=None):
'''
A fairly lightweight representation of a VCF compatible variant.
Parameters
----------
chrom, pos, id, ref, alt, qual, filter, info, format, *genotypes
Returns
-------
Variant instance
'''
self.chrom = str(chrom)
self.pos = int(pos)
self.id = str(id)
self.ref = str(ref)
self.alt = str(alt)
if qual != '.':
self.qual = float(qual)
else:
self.qual = qual
self.fltr = str(fltr)
self.info = str(info)
self.fmt = str(fmt)
self.genos = genos
@classmethod
def from_iter(cls,iter):
'''
Creates a variant from a iterable or arguments.
It assumes that the items are in order, e.g. from a VCF line
'''
chrom,pos,id,ref,alt,qual,fltr,info,fmt,*genotypes = iter
self = cls(chrom,pos,id,ref,alt,qual,fltr,info,fmt,genos=genotypes)
return self
@classmethod
def from_str(cls,string):
chrom,pos,id,ref,alt,qual,fltr,info,fmt,*genotypes = string.strip().split()
self = cls(chrom,pos,id,ref,alt,qual,fltr,info,fmt,genos=genotypes)
return self
@classmethod
def from_dict(cls,dict):
chrom = dict['chrom']
pos = int(dict['pos'])
id = dict['id']
ref = dict['ref']
alt = dict['alt']
qual = dict['qual']
fltr = dict['fltr']
info = dict['info']
fmt = dict['fmt']
genos = dict['genos']
self = cls(chrom,pos,id,ref,alt,qual,fltr,info,fmt,genos=genos)
return self
@property
def is_phased(self):
''' return bool on phase state '''
if '|' in self.genos[0]:
return True
else:
return False
@property
def is_biallelic(self):
''' return bool true if variant is biallelic '''
if ',' in self.alt or ',' in self.ref:
return False
else:
return True
@property
def coor(self):
return (self.chrom, self.pos)
def add_info(self,key,val):
'''
Add to the INFO field
Parameters
----------
key:str
The key of the info item
val:str
The value of the info item
Returns
-------
None
'''
if self.info == '.' or self.info is None:
self.info = "{}={}".format(key,val)
else:
self.info += ";{}={}".format(key,val)
def get_info(self,key):
'''
Returns the value of a key in the INFO field,
if key is not in the info field, an InfoKeyError
exception is raised.
Parameters
----------
key : str
The key of the item wanted in the INFO field
Returns
-------
the value for key from INFO field
'''
# Handle when INFO field is blank
if self.info == '.' or self.info is None:
raise InfoKeyError('Nothing in INFO field')
info_dict = dict()
for x in map(lambda x: x.split('='),self.info.split(';')):
if len(x) == 1:
info_dict[x[0]] = 1
else:
info_dict[x[0]] = x[1]
try:
return info_dict[key]
except KeyError as e:
raise InfoKeyError('{} not in INFO field for {}'.format(key,self.id))
def __getitem__(self,item):
try:
return self.__dict__[item]
except KeyError as e:
pass
try:
return self.genos[item]
except KeyError as e:
pass
raise Exception("Value not found")
def __cmp__(self,other):
if self.chrom < other.chrom:
return -1
if self.chrom > other.chrom:
return 1
if self.chrom == other.chrom:
if self.pos < other.pos:
return -1
if self.pos > other.pos:
return 1
else:
return 0
def __repr__(self):
return "<PonyTools Variant at: {} {}>".format(self.chrom,self.pos)
def __str__(self):
'''
returns a string of variant suitable for VCF printing
'''
return "\t".join(map(str,[self.chrom, self.pos, self.id, self.ref,
self.alt, self.qual, self.fltr, self.info,
self.fmt ]+ self.genos ))
def __eq__(self,var):
if (self.chrom == var.chrom
and self.pos == var.pos
and self.id == var.id
and self.ref == var.ref
and self.alt == var.alt
and self.qual == var.qual
and self.fltr == var.fltr
and self.info == var.info
and self.fmt == var.fmt
and self.genos == var.genos):
return True
else:
return False
def alt_freq(self,samples_i=None,max_missing=0.3):
'''
Computes the ALTERNATE allele frequency.
Parameters
----------
samples_i : str iterable (individual ids)
If not None, will compute MAF for subset of individuals.
'''
genos = self.alleles()
if samples_i is not None:
genos = [genos[x] for x in samples_i]
num_mis = sum([x.count('.') for x in genos])
if (num_mis / (2*len(genos))) > max_missing:
return None
num_ref = sum([x.count('0') for x in genos])
num_alt = sum([x.count('1') for x in genos])
n = (2*len(genos)) - num_mis
return num_alt/n
def call_rate(self,samples_i=None):
'''
Computes the SNP Call Rate.
Parameters
----------
samples_i : str iterable (individual ids)
If not None, will compute Call Rate for subset of individuals.
'''
genos = self.alleles()
if samples_i is not None:
genos = [genos[x] for x in samples_i]
num_mis = sum([x.count('.') for x in genos])
n = (2*len(genos))
return 1-(num_mis/n)
def heterozygosity(self,samples_i=None,max_missing=0.3):
'''
Compute the heterozygositey of the SNP
for indivduials indices in samples_i
parameters
----------
samples_i : int iterable (individual ids)
If not None, will computer heterozygosity for
subset of individuals
'''
alt_freq = self.alt_freq(samples_i=samples_i,max_missing=max_missing)
if alt_freq is None:
return None
ref_freq = 1 - alt_freq
return 2 * alt_freq * ref_freq
def alleles(self,samples_i=None,samples_id=None,format_field='GT',transform=None):
''' return alleles *in order* for samples '''
if not transform:
transform = lambda x:x
if transform.__name__ == 'vcf2allele':
transform = transform(self.ref,self.alt)
gt_index = self.fmt.split(":").index(format_field)
if samples_i:
return [ transform(self.genos[sample_i].split(':')[gt_index]) for sample_i in samples_i]
else:
return [transform(geno.split(':')[gt_index]) for geno in self.genos]
def r2(self,variant,samples_i,samples_j):
'''
Returns the r2 value with another variant
'''
# Conform variants
variant.conform(self.ref)
# find common individuals
geno1 = np.array(self.alleles(transform=Allele.vcf2geno,samples_i=samples_i))
geno2 = np.array(variant.alleles(transform=Allele.vcf2geno,samples_i=samples_j))
non_missing = (geno1!=-1)&(geno2!=-1)
return (scipy.stats.pearsonr(geno1[non_missing],geno2[non_missing]))[0]**2
def discordance(self,variant,samples_i,samples_j):
'''
Returns a tuple (x,y,[z]) containing:
x : the number of discordant genotypes
y : the number of conpared genotypes
z : (optional) a vector containing per sample measures
'''
variant.conform(self.ref)
geno1 = np.array(self.alleles(transform=Allele.vcf2geno,samples_i=samples_i))
geno2 = np.array(variant.alleles(transform=Allele.vcf2geno,samples_i=samples_j))
non_missing = (geno1!=-1)&(geno2!=-1)
discordant = sum(geno1[non_missing]!=geno2[non_missing])
return (discordant,sum(non_missing))
def __sub__(self,variant):
''' returns the distance between SNPs '''
if self.chrom != variant.chrom:
return np.inf
else:
return abs(int(self.pos) - int(variant.pos))
@property
def is_polymorphic(self):
if self.ref in ('A','C','G','T') and self.alt in ('A','C','G','T'):
return True
else:
return False
def conform(self,reference_genotype,GT_index=0):
'''
Conforms a variant to a reference genotype.
Parameters
----------
reference_genotype: str
must be current alt or ref genotype. This
becomes the new reference.
Notes
-----
This happens IN PLACE.
'''
# Just return if the genotype is already conformed
if self.ref == reference_genotype:
return self
elif self.alt == self.ref and self.ref != reference_genotype:
# Sometimes PLINK will assign both the alt and reference
# to the same thing, just make everyone alternate
self.ref = reference_genotype
# Make the translation table
trans = str.maketrans(
{'0':'1',
'1':'1'}
)
def conform_genotype(genotype):
# split out the GT
fields = genotype.split(':')
fields[GT_index] = str.translate(fields[GT_index],trans)
return ':'.join(fields)
self.genos = [conform_genotype(g) for g in self.genos]
elif reference_genotype not in (self.alt,self.ref):
raise TriAllelicError((
'Cannot conform to genotype that is not one of'
'the reference or alt {} != ({},{})'.format(
reference_genotype,
self.ref,
self.alt
)
))
elif not self.is_biallelic:
raise TriAllelicError('cannot conform triallelic SNP: {}'.format(self.id))
else:
# Conform
self.alt,self.ref = self.ref,self.alt
# Make the translation table
trans = str.maketrans(
{'0':'1',
'1':'0'}
)
def conform_genotype(genotype):
# split out the GT
fields = genotype.split(':')
fields[GT_index] = str.translate(fields[GT_index],trans)
return ':'.join(fields)
self.genos = [conform_genotype(g) for g in self.genos]
return self
|
schae234/PonyTools
|
ponytools/Variant.py
|
Python
|
mit
| 12,432
|
import os, pickle
from multiprocessing import Pool # @UnresolvedImport
from pydmrs.core import RealPred
DATA = '/anfs/bigdisc/gete2/wikiwoods/core-5'
VOCAB = '/anfs/bigdisc/gete2/wikiwoods/core-5-vocab.pkl'
OUTPUT = DATA + '-nodes'
PROC = 80
if not os.path.exists(OUTPUT):
os.mkdir(OUTPUT)
with open(VOCAB, 'rb') as f:
vocab_list = pickle.load(f)
pred_index = {RealPred.from_string(p):i for i,p in enumerate(vocab_list)}
def convert_triple(triple, nid):
"""
Convert an SVO triple of RealPreds to a form suitable for training
:param triple: RealPred triple
:param nid: next free nodeid
:return: list of nodes, new next free nodeid
"""
verb, agent, patient = triple
verb_i = pred_index[verb]
agent_i = pred_index.get(agent, None)
patient_i = pred_index.get(patient, None)
output = []
verb_labs = []
verb_ids = []
output.append((nid, verb_i, verb_labs, verb_ids, [], []))
new_id = nid+1
if agent:
output.append((new_id, agent_i, [], [], [0], [nid]))
verb_labs.append(0)
verb_ids.append(new_id)
new_id += 1
if patient:
output.append((new_id, patient_i, [], [], [1], [nid]))
verb_labs.append(1)
verb_ids.append(new_id)
new_id += 1
return output, new_id
def process_file(fname):
"""
Process all the triples in a file
:param fname: filename
"""
print(fname)
with open(os.path.join(DATA, fname), 'rb') as f:
trips = pickle.load(f)
nodes = []
nid = 0
for triple in trips:
packaged, nid = convert_triple(triple, nid)
nodes.extend(packaged)
with open(os.path.join(OUTPUT, fname), 'wb') as f:
pickle.dump(nodes, f)
# Process all files in DATA
with Pool(PROC) as p:
p.map(process_file, os.listdir(DATA))
|
guyemerson/sem-func
|
src/preprocess/wikiwoods_convertcore.py
|
Python
|
mit
| 1,821
|
# forms.py
from django import forms
from .models import Client
class ClientForm(forms.ModelForm):
startdate = forms.DateField(required=False)
enddate = forms.DateField(required=False)
def clean_duration(self):
data = self.cleaned_data['duration']
if data <= 0:
raise forms.ValidationError("Visit duration must be greater than zero")
if data >= 9:
raise forms.ValidationError("Visit duration can't be greter than 8 hours")
return data
class Meta:
model = Client
fields = ['name', 'regularity', 'dayofweek', 'duration', 'note', 'startdate', 'enddate']
widgets = {
'note': forms.Textarea(attrs={'cols': 50, 'rows': 5, 'class': 'form-control'}),
}
def __init__(self, *args, **kwargs):
super(ClientForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update({'class': 'form-control'})
self.fields['regularity'].widget.attrs.update({'class': 'form-control'})
self.fields['dayofweek'].widget.attrs.update({'class': 'form-control'})
self.fields['duration'].widget.attrs.update({'class': 'form-control'})
self.fields['startdate'].widget.attrs.update({'class': 'datepicker'})
self.fields['enddate'].widget.attrs.update({'class': 'datepicker'})
# EOF
|
dwagon/Scheduler
|
scheduler/client/forms.py
|
Python
|
gpl-2.0
| 1,340
|
# Copyright 2010, 2011 Kalamazoo College Computer Science Club
# <kzoo-cs-board@googlegroups.com>
# This file is part of LitHub.
#
# LitHub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LitHub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LitHub. If not, see <http://www.gnu.org/licenses/>.
import urllib2
import isbn
import datetime
from django.conf import settings
from django.contrib import messages
from django.core.paginator import Paginator, InvalidPage, EmptyPage, Page
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
import json
def get_book_details(isbn_no):
try:
isbn_no = isbn.clean_isbn(isbn_no)
lookup_url = "https://www.googleapis.com/books/v1/volumes?"
opts = {'q':'isbn:%s'%isbn_no}
if hasattr(settings, "GOOGLE_API_KEY"):
opts['key'] = settings.GOOGLE_API_KEY
google_resp = urllib2.urlopen(lookup_url + urlencode(opts))
results = json.loads(google_resp.read())
google_resp.close()
if int(results['totalItems']) > 0:
info = {}
# Choose the first item, regardless of anything else
item = results['items'][0]
vol = item['volumeInfo']
info['title'] = vol.get('title')
authors = vol.get('authors')
if len(authors) > 1:
info['author'] = "%s and %s"%(", ".join(authors[:-1]),
authors[-1])
else:
info['author'] = authors[0]
info['copyrightYear'] = vol.get('publishedDate', '').split("-")[0]
info['publisher'] = vol.get('publisher', '')
info['thumbnail_url'] = (vol.get('imageLinks') or
{'na':'na'}).get('thumbnail', '')
return info
except Exception:
return None
def opengraph_list_book(request, copy):
from fbconnect import utils as fbutils
fb_at = request.session.get('fb_at', '')
if fb_at:
fb = fbutils.FBConnect(access_token=fb_at)
try:
params = {'expires_in': 60*60*24*3}
fb.publish_og('list', 'book', settings.FB_REDIRECT_URL +\
reverse('bookswap.views.book_details',
args=[copy.book.id]), params=params)
return True
except ValueError:
return False
# an access_token can sometimes expire and we cannot do anything to
# prevent it from doing so if we don't have offline access
else:
return False
class PaginatorN(Paginator):
GET_PAGE_VAR = 'p'
def __init__(self, object_list, request, per_page=12, *args, **kwargs):
self.request = request
Paginator.__init__(self, object_list, per_page, \
*args, **kwargs)
def page(self, number):
class PageN(Page):
def __init__(self, page):
self.__dict__ = page.__dict__
try:
return Paginator.page(self, number)
except (EmptyPage, InvalidPage):
return Paginator.page(self, self.num_pages)
def page_auto(self):
try:
page = int(self.request.GET.get(self.GET_PAGE_VAR, '1'))
except ValueError:
page = 1
return self.page(page)
def other_get_vars(self):
opts = self.request.GET.copy()
try:
opts.pop(self.GET_PAGE_VAR)
except KeyError:
pass
if opts:
return "?%s&%s="%(opts.urlencode(), self.GET_PAGE_VAR)
else:
return "?%s="%self.GET_PAGE_VAR
|
umangv/LitHub
|
LitHub/bookswap/utils.py
|
Python
|
gpl-3.0
| 4,056
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of pydsl.
#
# pydsl is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
# pydsl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pydsl. If not, see <http://www.gnu.org/licenses/>.
from pydsl.grammar.parsley import ParsleyGrammar
__author__ = "Ptolom"
__copyright__ = "Copyright 2014, Ptolom"
__email__ = "ptolom@hexifact.co.uk"
def load_parsley_grammar_file(filepath, root_rule="expr", repository=None):
with open(filepath,'r') as file:
return ParsleyGrammar(file.read(), root_rule, repository)
|
nesaro/pydsl
|
pydsl/file/parsley.py
|
Python
|
gpl-3.0
| 1,017
|
# -*- coding: utf-8 -*-
# flake8: noqa
from input_manager import InputManager
try:
from gpio_inpput_manager import GPIOManager
except ImportError:
pass
|
kdbdallas/Mopidy-Touchclient
|
mopidy_nowplayingtouch/input/__init__.py
|
Python
|
apache-2.0
| 162
|
from datapackage_pipelines.wrapper import ingest, spew
def get_votes(resource, data, stats):
data['session_voters'] = {}
stats['num_votes'] = 0
stats['num_vote_mks'] = 0
for vote in resource:
voters = data['session_voters'].setdefault(vote['session_id'], set())
for attr in ['mk_ids_pro', 'mk_ids_against', 'mk_ids_abstain']:
mk_ids = vote[attr]
if mk_ids:
for mk_id in mk_ids:
voters.add(mk_id)
stats['num_vote_mks'] += 1
stats['num_votes'] += 1
def get_plenum(resource, data, stats):
stats.update(known_sessions=0, unknown_sessions=0)
for session in resource:
if session['PlenumSessionID'] in data['session_voters']:
stats['known_sessions'] += 1
session['voter_mk_ids'] = list(data['session_voters'][session['PlenumSessionID']])
else:
session['voter_mk_ids'] = None
stats['unknown_sessions'] += 1
if not session['voter_mk_ids']:
session['voter_mk_ids'] = None
yield session
def get_resources(resources, stats, data):
for i, resource in enumerate(resources):
if i == data['votes_index']:
get_votes(resource, data, stats)
elif i == data['plenum_index']:
yield get_plenum(resource, data, stats)
else:
yield resource
def get_datapackage(datapackage, data):
for i, descriptor in enumerate(datapackage['resources']):
if descriptor['name'] == 'view_vote_rslts_hdr_approved':
data['votes_index'] = i
elif descriptor['name'] == 'kns_plenumsession':
data['plenum_index'] = i
fields = [{'name': 'voter_mk_ids', 'type': 'array'}]
descriptor['schema']['fields'] += fields
del datapackage['resources'][data['votes_index']]
return datapackage
def main():
parameters, datapackage, resources, stats, data = ingest() + ({}, {})
spew(get_datapackage(datapackage, data),
get_resources(resources, stats, data),
stats)
if __name__ == '__main__':
main()
|
hasadna/knesset-data-pipelines
|
people/plenum_session_voters.py
|
Python
|
mit
| 2,135
|
"""
This code is exclusively for use of testing read_radex. Don't use it in any
production anything until you're sure it works.
"""
bw = 0.01 # "bandwidth": free spectral range around line (used to say which lines get printer)
def read_radex_old(outfile,flow,fupp,bw=bw):
"""
A hack-ey means of reading a radex.out file.
Cycles through it based on fixed format
"""
line = outfile.readline()
words = line.split()
while (words[-1] != "FLUX"):
line = outfile.readline()
words = line.split()
if words[1] == "T(kin)":
temp = float(words[-1])
if words[1] == "Density" and words[3] == "H2":
dens = float(words[-1])
if words[1] == "Column":
col = float(words[-1])
line = outfile.readline()
line = outfile.readline()
words = line.split()
ftmp = float(words[4])
while ((ftmp < flow*(1-bw)) or (ftmp > flow/(1-bw))):
line = outfile.readline()
words = line.split()
ftmp = float(words[4])
low = float(words[-2])
TexLow = float(words[6])
TauLow = float(words[7])
TrotLow = float(words[8])
FluxLow = float(words[11])
line = outfile.readline()
words = line.split()
ftmp = float(words[4])
while ((ftmp < fupp*(1-bw)) or (ftmp > fupp/(1-bw))):
line = outfile.readline()
words = line.split()
ftmp = float(words[4])
upp = float(words[-2])
TexUpp = float(words[6])
TauUpp = float(words[7])
TrotUpp = float(words[8])
FluxUpp = float(words[11])
return temp,dens,col,TexLow,TexUpp,TauLow,TauUpp,TrotLow,TrotUpp,FluxLow,FluxUpp
def tryfloat(x):
try:
return float(x)
except ValueError:
return float(0)
def read_radex(file,flow,fupp,bw=bw,debug=False):
"""
less hack-ey way to read radex.out files
"""
linenum = 0
line = file.readline(); linenum+=1
if debug: print line
if line == '':
return 0
words = line.split()
if words[1] == '--':
freq = tryfloat(words[4])
else:
freq = 0
while not(freq*(1-bw) < flow < freq*(1+bw)):
if words[1] == 'T(kin)':
tkin = tryfloat(words[3])
elif line.find("Density of H2") != -1:
dens = tryfloat(words[5])
elif line.find("Density of pH2") != -1:
pdens = tryfloat(words[5])
elif line.find("Density of oH2") != -1:
odens = tryfloat(words[5])
elif line.find("Column density") != -1:
col = tryfloat(words[4])
line = file.readline(); linenum+=1
words = line.split()
if words[1] == '--':
freq = tryfloat(words[4])
TexLow = tryfloat(words[6])
TauLow = tryfloat(words[7])
TrotLow = tryfloat(words[8])
FluxLow = tryfloat(words[11])
line = file.readline(); linenum+=1
words = line.split()
if words[1] == '--':
freq = tryfloat(words[4])
while not(freq*(1-bw) < fupp < freq*(1+bw)):
line = file.readline(); linenum+=1
if debug: print freq,flow,line
words = line.split()
if words[1] == '--':
freq = tryfloat(words[4])
TexUpp = tryfloat(words[6])
TauUpp = tryfloat(words[7])
TrotUpp = tryfloat(words[8])
FluxUpp = tryfloat(words[11])
while len(words) > 0 and words[1] == '--':
line = file.readline(); linenum+=1
words = line.split()
return tkin,dens,col,TexLow,TexUpp,TauLow,TauUpp,TrotLow,TrotUpp,FluxLow,FluxUpp
|
tectronics/agpy
|
radex/read_radex.py
|
Python
|
mit
| 3,545
|
"""Collection of common words"""
COMMON_WORDS_EN = """
I
a
able
about
above
act
add
afraid
after
again
against
age
ago
agree
air
all
allow
also
always
am
among
an
and
anger
animal
answer
any
appear
apple
are
area
arm
arrange
arrive
art
as
ask
at
atom
baby
back
bad
ball
band
bank
bar
base
basic
bat
be
bear
beat
beauty
bed
been
before
began
begin
behind
believe
bell
best
better
between
big
bird
bit
black
block
blood
blow
blue
board
boat
body
bone
book
born
both
bottom
bought
box
boy
branch
bread
break
bright
bring
broad
broke
brother
brought
brown
build
burn
busy
but
buy
by
call
came
camp
can
capital
captain
car
card
care
carry
case
cat
catch
caught
cause
cell
cent
center
century
certain
chair
chance
change
character
charge
chart
check
chick
chief
child
children
choose
chord
circle
city
claim
class
clean
clear
climb
clock
close
clothe
cloud
coast
coat
cold
collect
colony
color
column
come
common
company
compare
complete
condition
connect
consider
consonant
contain
continent
continue
control
cook
cool
copy
corn
corner
correct
cost
cotton
could
count
country
course
cover
cow
crease
create
crop
cross
crowd
cry
current
cut
dad
dance
danger
dark
day
dead
deal
dear
death
decide
decimal
deep
degree
depend
describe
desert
design
determine
develop
dictionary
did
die
differ
difficult
direct
discuss
distant
divide
division
do
doctor
does
dog
dollar
don't
done
door
double
down
draw
dream
dress
drink
drive
drop
dry
duck
during
each
ear
early
earth
ease
east
eat
edge
effect
egg
eight
either
electric
element
else
end
enemy
energy
engine
enough
enter
equal
equate
especially
even
evening
event
ever
every
exact
example
except
excite
exercise
expect
experience
experiment
eye
face
fact
fair
fall
family
famous
far
farm
fast
fat
father
favor
fear
feed
feel
feet
fell
felt
few
field
fig
fight
figure
fill
final
find
fine
finger
finish
fire
first
fish
fit
five
flat
floor
flow
flower
fly
follow
food
foot
for
force
forest
form
forward
found
four
fraction
free
fresh
friend
from
front
fruit
full
fun
game
garden
gas
gather
gave
general
gentle
get
girl
give
glad
glass
go
gold
gone
good
got
govern
grand
grass
gray
great
green
grew
ground
group
grow
guess
guide
gun
had
hair
half
hand
happen
happy
hard
has
hat
have
he
head
hear
heard
heart
heat
heavy
held
help
her
here
high
hill
him
his
history
hit
hold
hole
home
hope
horse
hot
hour
house
how
huge
human
hundred
hunt
hurry
ice
idea
if
imagine
in
inch
include
indicate
industry
insect
instant
instrument
interest
invent
iron
is
island
it
job
join
joy
jump
just
keep
kept
key
kill
kind
king
knew
know
lady
lake
land
language
large
last
late
laugh
law
lay
lead
learn
least
leave
led
left
leg
length
less
let
letter
level
lie
life
lift
light
like
line
liquid
list
listen
little
live
locate
log
lone
long
look
lost
lot
loud
love
low
machine
made
magnet
main
major
make
man
many
map
mark
market
mass
master
match
material
matter
may
me
mean
meant
measure
meat
meet
melody
men
metal
method
middle
might
mile
milk
million
mind
mine
minute
miss
mix
modern
molecule
moment
money
month
moon
more
morning
most
mother
motion
mount
mountain
mouth
move
much
multiply
music
must
my
name
nation
natural
nature
near
necessary
neck
need
neighbor
never
new
next
night
nine
no
noise
noon
nor
north
nose
note
nothing
notice
noun
now
number
numeral
object
observe
occur
ocean
of
off
offer
office
often
oh
oil
old
on
once
one
only
open
operate
opposite
or
order
organ
original
other
our
out
over
own
oxygen
page
paint
pair
paper
paragraph
parent
part
particular
party
pass
past
path
pattern
pay
people
perhaps
period
person
phrase
pick
picture
piece
pitch
place
plain
plan
plane
planet
plant
play
please
plural
poem
point
poor
populate
port
pose
position
possible
post
pound
power
practice
prepare
present
press
pretty
print
probable
problem
process
produce
product
proper
property
protect
prove
provide
pull
push
put
quart
question
quick
quiet
quite
quotient
race
radio
rail
rain
raise
ran
range
rather
reach
read
ready
real
reason
receive
record
red
region
remember
repeat
reply
represent
require
rest
result
rich
ride
right
ring
rise
river
road
rock
roll
room
root
rope
rose
round
row
rub
rule
run
safe
said
sail
salt
same
sand
sat
save
saw
say
scale
school
science
score
sea
search
season
seat
second
section
see
seed
seem
segment
select
self
sell
send
sense
sent
sentence
separate
serve
set
settle
seven
several
shall
shape
share
sharp
she
sheet
shell
shine
ship
shoe
shop
shore
short
should
shoulder
shout
show
side
sight
sign
silent
silver
similar
simple
since
sing
single
sister
sit
six
size
skill
skin
sky
slave
sleep
slip
slow
small
smell
smile
snow
so
soft
soil
soldier
solution
solve
some
son
song
soon
sound
south
space
speak
special
speech
speed
spell
spend
spoke
spot
spread
spring
square
stand
star
start
state
station
stay
stead
steam
steel
step
stick
still
stone
stood
stop
store
story
straight
strange
stream
street
stretch
string
strong
student
study
subject
substance
subtract
success
such
sudden
suffix
sugar
suggest
suit
summer
sun
supply
support
sure
surface
surprise
swim
syllable
symbol
system
table
tail
take
talk
tall
teach
team
teeth
tell
temperature
ten
term
test
than
thank
that
the
their
them
then
there
these
they
thick
thin
thing
think
third
this
those
though
thought
thousand
three
through
throw
thus
tie
time
tiny
tire
to
together
told
tone
too
took
tool
top
total
touch
toward
town
track
trade
train
travel
tree
triangle
trip
trouble
truck
true
try
tube
turn
twenty
two
type
under
unit
until
up
us
use
usual
valley
value
vary
verb
very
view
village
visit
voice
vowel
wait
walk
wall
want
war
warm
was
wash
watch
water
wave
way
we
wear
weather
week
weight
well
went
were
west
what
wheel
when
where
whether
which
while
white
who
whole
whose
why
wide
wife
wild
will
win
wind
window
wing
winter
wire
wish
with
woman
women
won't
wonder
wood
word
work
world
would
write
written
wrong
wrote
yard
year
yellow
yes
yet
you
young
your
""".split()
|
patrickshuff/artofmemory
|
artofmemory/data/words.py
|
Python
|
mit
| 5,904
|
# -*- encoding: utf-8 -*-
"""Defines all models to admin for Django Admin UI."""
from django.contrib import admin
from models import *
admin.site.register(Organization)
admin.site.register(Grade)
admin.site.register(Group)
admin.site.register(Time)
admin.site.register(Student)
admin.site.register(Teacher)
admin.site.register(Rate)
|
dairdr/voteapp
|
voteapp/apps/vote/admin.py
|
Python
|
mit
| 333
|
__author__ = 'roelvdberg@gmail.com'
import os
import lxml
import gzip
import zipfile
import json
working_directory = os.getcwd()
root = working_directory
def make_file(before, after, start, end, filenames, location,
other_location, encoding=False):
if encoding:
filenames = [encoding + '_' + f for f in filenames]
between = ''.join(start + root + '/' + other_location + '/' + filename +
end for filename in filenames)
content = before + between + after
if encoding:
content.encode(encoding)
with open(working_directory + location, 'wb' if encoding else 'w') as f:
f.write(content)
sitemapindex_before = '''
<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
'''
sitemapindex_start = '''
<sitemap>
<loc>'''
sitemapindex_end = '''</loc>
</sitemap>
'''
sitemapindex_after = '''
</sitemapindex>
'''
sitemap1_before = '''
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
'''
sitemap1_start = '''
<url>
<loc>'''
sitemap1_end = '''</loc>
<lastmod>2015-07-25T18:10:32+00:00</lastmod>
</url>
'''
sitemap1_after = '''
</urlset>
'''
xml_filenames = ['1', '2.xml']
html_filenames = ['vk', 'nrc.htm', 'trouw.html', 'telegraaf', 'bndestem']
from test.webpage_testcases import *
try:
os.mkdir(working_directory + '/sitemap')
os.mkdir(working_directory + '/site')
except FileExistsError:
pass
make_file(sitemapindex_before, sitemapindex_after, sitemapindex_start,
sitemapindex_end, xml_filenames, '/sitemap/sitemapindex', 'sitemap')
for filename in xml_filenames:
make_file(sitemap1_before, sitemap1_after, sitemap1_start, sitemap1_end,
html_filenames, '/sitemap/' + filename, 'site')
for i, filename in enumerate(html_filenames):
make_file('', '', '', papers[i],
[''], '/site/' + filename, '')
with open('sitemapindexlocation.txt', 'w') as f:
f.write(root + '/sitemap/sitemapindex')
|
RoelvandenBerg/nlnieuwscrawler
|
test/sitemap_testcases.py
|
Python
|
gpl-2.0
| 2,047
|
from datagrowth.resources import ShellResource
|
fako/datascope
|
src/core/models/resources/shell.py
|
Python
|
gpl-3.0
| 47
|
#!/usr/bin/python
# Copyright 2017 Dhvani Patel
#
# This file is part of UnnaturalCode.
#
# UnnaturalCode is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UnnaturalCode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>.
from check_eclipse_syntax import checkEclipseSyntax
from compile_error import CompileError
import unittest
ERROR_TEST = """public class HelloWorld {
public static void main(String[] args)
// Prints "Hello, World" to the terminal window.
System.out.println("Hello, World)
}
}
"""
class TestStringMethods(unittest.TestCase):
def test_syntax_ok(self):
toTest = checkEclipseSyntax('public class Hello{ int a= 5;}')
self.assertTrue(toTest is None)
def test_syntax_error(self):
toTest = checkEclipseSyntax(ERROR_TEST)
self.assertEqual(toTest[0], [1, 2, 3, 4, 5])
self.assertEqual(toTest[1], [3, 5, 5, 5, 5])
if __name__ == '__main__':
unittest.main()
|
naturalness/unnaturalcode
|
unnaturalcode/test_eclipse.py
|
Python
|
agpl-3.0
| 1,489
|
import numpy
import subprocess
import math
from math import sqrt, exp
import matplotlib.pyplot as plt
import random
import scipy.signal
from pysrc import reader
from pysrc import caliberation
from pysrc import filters
import sys
import pdb
def FindTimeShiftA(a, b):
na = len(a)
dt = numpy.arange(1-na, na)
maxcorr = scipy.signal.correlate(a, b, "full").argmax()
val = float(dt[maxcorr])
return val
def FindTimeShiftB(a, b):
af = scipy.fft(a)
bf = scipy.fft(b)
c = scipy.ifft(af * scipy.conj(bf))
time_shift = numpy.argmax(abs(c))
n = len(a)
if(time_shift < -n/2): time_shift = time_shift + n
if(time_shift > n/2): time_shift = time_shift - n
return time_shift
def FindTimeShiftMatrix(frameSamples):
corrMatrixA = numpy.matrix([[0.0]*channelCount] * channelCount)
corrMatrixB = numpy.matrix([[0.0]*channelCount] * channelCount)
for i, (ci, di) in enumerate(frameSamples.items()):
for j, (cj, dj) in enumerate(frameSamples.items()):
if(i <= j):
valA = FindTimeShiftA(di, dj)
valB = FindTimeShiftB(di, dj)
corrMatrixA[(i,j)] = valA
corrMatrixA[(j,i)] = -valA
corrMatrixB[(i,j)] = valB
corrMatrixB[(j,i)] = -valB
if(numpy.count_nonzero(corrMatrixA) != 0 or numpy.count_nonzero(corrMatrixB) != 0):
print "Timestamp ", timeStamp
print corrMatrixA
print corrMatrixB
|
ap1/PixeeBel
|
code/pysrc/TimeShiftAnjul.py
|
Python
|
mit
| 1,374
|
import bisect
import json
import logging
import math
import numpy as np
import random
from learning.rlpower_controller import RLPowerController
from scipy.interpolate import splrep, splev
from hal.inputs.fitness_querier import FitnessQuerier
__author__ = 'matteo'
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# This class needs hard pep8
class RLPowerAlgorithm:
epsilon = 10 ** -10
def __init__(self, config_parameters):
self.RANKING_SIZE = config_parameters['ranking_size']
self.NUM_SERVOS = len(config_parameters['servo_pins'])
# In the original algorithm they used variance and square-rooted it every time. We're using standard deviation
# and decay parameter is also a square root of the parameter from original algorithm
self._sigma = math.sqrt(config_parameters['variance'])
self._sigma_decay = math.sqrt(config_parameters['sigma_decay_squared'])
self._initial_spline_size = config_parameters['initial_spline_size']
self._end_spline_size = config_parameters['end_spline_size']
self._number_of_fitness_evaluations = config_parameters['number_of_fitness_evaluations']
self._fitness_evaluation = config_parameters['fitness_evaluation_method']
self._runtime_data_file = config_parameters['runtime_data_file']
self._light_fitness_weight = config_parameters['light_fitness_weight']
self._current_spline_size = self._initial_spline_size
self._current_evaluation = 0
# Create an instance of fitness querier
if self._fitness_evaluation == 'auto':
self._fitness_querier = FitnessQuerier(config_parameters)
self._fitness_querier.start()
# Recover evaluation data from tmp file
self._runtime_data = self._load_runtime_data_from_file(self._runtime_data_file)
if 'last_spline' in self._runtime_data:
self.ranking = self._runtime_data['ranking']
self._current_spline = self._runtime_data['last_spline']
self._sigma = self._runtime_data['sigma']
self._current_spline_size = len(self._current_spline[0])
self._current_evaluation = self._runtime_data['evaluation']
else:
self.ranking = []
# Spline initialisation
self._current_spline = np.array(
[[0.5 + random.normalvariate(0, self._sigma) for x in range(self._initial_spline_size)]
for y in range(self.NUM_SERVOS)])
self.controller = RLPowerController(self._current_spline)
def _generate_spline(self):
# Add a weighted average of the best splines seen so far
total = self.epsilon # something similar to 0, but not 0 ( division by 0 is evil )
modifier = np.zeros(self._current_spline.shape)
for (fitness, spline) in self.ranking:
total += fitness
modifier += (spline - self._current_spline) * fitness
# random noise for the spline
noise = np.array(
[[random.normalvariate(0, self._sigma) for x in range(self._current_spline_size)]
for y in range(self.NUM_SERVOS)])
return self._current_spline + noise + modifier / total
def skip_evaluation(self):
logger.info("Skipping evaluation, starting new one")
self._current_spline = self._generate_spline()
self.controller.set_spline(self._current_spline)
self._fitness_querier.start()
def next_evaluation(self, light_sensor_value=0):
self._current_evaluation += 1
logger.info("current spline size: {}".format(self._current_spline_size))
# generate fitness
movement_fitness = self.get_current_fitness()
light_fitness = self._light_fitness_weight * light_sensor_value
current_fitness = movement_fitness + light_fitness
logger.info("Last evaluation fitness: {} (movement: {} + light: {})".format(current_fitness, movement_fitness, light_fitness))
logger.info("Current position: {}".format(self._fitness_querier.get_position()))
# save old evaluation
self.save_in_ranking(current_fitness, self._current_spline)
# check if is time to increase the number of evaluations
if math.floor((self._end_spline_size - self._initial_spline_size)/self._number_of_fitness_evaluations *
self._current_evaluation) + 3 > self._current_spline_size:
self._current_spline_size += 1
self._current_spline = self.recalculate_spline(self._current_spline, self._current_spline_size)
for number, (fitness, rspline) in enumerate(self.ranking):
self.ranking[number] = _RankingEntry((fitness, self.recalculate_spline(rspline, self._current_spline_size)))
# update values
self._current_spline = self._generate_spline()
self.controller.set_spline(self._current_spline)
self._sigma *= self._sigma_decay
self._save_runtime_data_to_file(self._runtime_data_file)
self._fitness_querier.start()
def recalculate_spline(self, spline, spline_size):
return np.apply_along_axis(self._interpolate, 1, spline, spline_size + 1)
def _interpolate(self, spline, spline_size):
spline = np.append(spline, spline[0])
x = np.linspace(0, 1, len(spline))
tck = splrep(x, spline, per=True)
x2 = np.linspace(0, 1, spline_size)
return splev(x2, tck)[:-1]
def get_current_fitness(self):
# Manual fitness evaluation
if self._fitness_evaluation == 'manual':
fitness = float(input("Enter fitness of current gait: "))
# Random fitness (for testing purposes)
elif self._fitness_evaluation == 'random':
fitness = 5 + random.normalvariate(0, 2)
elif self._fitness_evaluation == 'auto':
fitness = self._fitness_querier.get_fitness()
else:
logger.error("Unknown fitness evaluation method")
raise NameError("Unknown fitness evaluation method")
return fitness
def save_in_ranking(self, current_fitness, current_spline):
if len(self.ranking) < self.RANKING_SIZE:
bisect.insort(self.ranking, _RankingEntry((current_fitness, current_spline)))
elif current_fitness > self.ranking[0][0]:
bisect.insort(self.ranking, _RankingEntry((current_fitness, current_spline)))
self.ranking.pop(0)
self._save_runtime_data_to_file(self._runtime_data_file)
def _load_runtime_data_from_file(self, filename):
try:
with open(filename) as json_data:
d = json.load(json_data)
ranking_serialized = d['ranking']
ranking = [_RankingEntry((elem['fitness'], np.array(elem['spline']))) for elem in ranking_serialized]
d['ranking'] = ranking
d['last_spline'] = np.array(d['last_spline'])
return d
except IOError:
return {}
def _save_runtime_data_to_file(self, filename):
ranking_serialized = [{'fitness': f, 'spline': s.tolist()} for (f, s) in self.ranking]
data = {'ranking': ranking_serialized,
'last_spline': self._current_spline.tolist(),
'sigma': self._sigma,
'evaluation': self._current_evaluation
}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
class _RankingEntry(tuple):
def __lt__(self, other):
return other[0] > self[0]
def __gt__(self, other):
return not self.__lt__(other)
|
ci-group/revolve-brain
|
brain/learning/rlpower_algorithm.py
|
Python
|
lgpl-3.0
| 7,640
|
import sys
import heapq
import uvint
from pathlib import Path
from status import Status
def main():
path = Path(sys.argv[1])
out = Path(sys.argv[2])
bytes = path.read_bytes()
min_length = 4
max_length = 10
compressor = Compressor(bytes, min_length, max_length)
with open(out, 'wb') as f:
data = compressor.compress_to_file(f)
class Compressor:
def __init__(self, input, min_length, max_length):
self.input = input
self.image = Image(len(input))
self.min_length = min_length
self.max_length = max_length
def compress_to_file(self, file):
self.histogram = self.__build_histogram()
symbols = []
status = Status()
total = len(self.histogram)
while True:
proc = 100 * (total - len(self.histogram)) / total
status(f"Compressing {proc:.2f}%")
best = self.get_best()
if best is None:
break
n = len(best.word)
symbol = len(symbols)
image = self.image
for idx in best.indices:
if image.is_free(idx, n):
self.image.put_symbol(symbol, idx, n)
image.version += 1
symbols.append(best.word)
status.clear()
commands = self.__input_for_encoding()
#self.__check(symbols, commands)
self.__compress(file, symbols, commands)
def __input_for_encoding(self):
result = []
raw = b''
for c, status in zip(self.input, self.image.img):
if status is used:
continue
if status is free:
raw += c.to_bytes(1, "little")
else:
if raw:
result.append(raw)
raw = b''
result.append(status) # a symbol
if raw:
result.append(raw)
return result
def __compress(self, file, symbols, commands):
# 1a. compress symbols
for symbol in symbols:
file.write(uvint.encode(len(symbol)))
file.write(symbol)
# 1b. mark the last entry with length 0
file.write(uvint.encode(0))
# 2. compress either 1) raw substrings or 2) symbol references
for item in commands:
if type(item) is bytes:
# raw string is encoded as k=[0..127] + k*byte
while item:
k = min(len(item), 127)
file.write(uvint.encode(k))
file.write(item[:k])
item = item[k:]
else:
# symbol is encoded as 128 + symbol
file.write(uvint.encode(item + 128))
def __check(self, symbols, commands):
tmp = b''
for item in commands:
if type(item) is bytes:
tmp += item
else:
tmp += symbols[item]
assert tmp == self.input
def get_best(self):
histogram = self.histogram
while len(histogram) > 0:
best = heapq.heappop(histogram)
if not self.update_needed(best):
return best
self.update_word(best)
if len(best.indices) > 1:
heapq.heappush(histogram, best)
return None
def update_needed(self, word):
if word.version == self.image.version:
return False
length = len(word.word)
for idx in word.indices:
if not self.image.is_free(idx, length):
return True
return False
def update_word(self, word):
length = len(word.word)
word.indices = [idx for idx in word.indices if self.image.is_free(idx, length)]
word.version = self.image.version
def __build_histogram(self):
freq = {}
input = self.input
n = len(input)
status = Status()
for i in range(0, n):
status("Building histogram %d/%d (%0.2f%%)" % (i, n, 100 * i / n))
for length in range(self.min_length, self.max_length + 1):
substr = input[i:i+length]
if len(substr) != length:
break
if substr not in freq:
freq[substr] = []
freq[substr].append(i)
histogram = [Word(word, indices) for word, indices in freq.items() if len(indices) > 1]
histogram.sort()
status.clear()
return histogram
free = None
used = object()
class Image:
"""
Image tell which bytes in input was already replaced.
"""
def __init__(self, length):
self.img = [free] * length
self.version = 0
def is_free(self, idx, length):
"""
Checks if all bytes in range [idx:idx+length] are free.
"""
for i in range(idx, idx + length):
if self.img[i] is not free:
return False
return True
def put_symbol(self, symbol, idx, length):
"""
Marks range [idx:idx+length] as reserved.
The first item in the range keeps the symbol.
"""
for i in range(idx, idx + length):
self.img[i] = used
self.img[idx] = symbol
class Word:
def __init__(self, word, indices):
self.word = word
self.indices = indices
self.version = 0
@property
def weight(self):
"""
How profitable is to replace this word
"""
return len(self.indices) * len(self.word)
def __repr__(self):
return '<%s: %d [weight=%d]>' % (self.word, len(self.indices), self.weight)
def __lt__(self, other):
return self.weight > other.weight
if __name__ == '__main__':
main()
|
WojciechMula/toys
|
fpc-compression/compress.py
|
Python
|
bsd-2-clause
| 5,796
|
#!/usr/bin/env python
#
# user.py - Autobot for Nikki UP2U World Traveller New 2
#
# Copyright (C) 2014 Haoliang Wang
#
# 09/09/2014
#
from config import *
from base import *
from utils import *
class User(object):
name = ""
gold = 0
jewel = 0
max_p = 0
power = 0
delay = 0
session_id = ""
did = ""
task_dresses = {}
task_list = []
backpack = []
bp_dict = {}
lottery_info = {}
def __init__(self):
pass
def check_login_state(self):
pass
def query_power(self):
ret = action('power')
if ret != {}:
self.power = ret['data']['power']
self.delay = int(ret['data']['delay'])
def update_user(self):
self.query_power()
ret = action('user')
if ret != {}:
data = ret['data']
self.name, self.max_p = data['name'], data['maxpower']
self.gold, self.jewel = data['gold'], data['jewel']
def show_user(self):
fmt = em("Name %s Gold %s Jewel %s Power %s/%s")
msg = fmt % (self.name, self.gold, self.jewel, self.power, self.max_p)
log.info(msg)
print msg
def wait_power(self, power_threshold):
while True:
self.query_power()
demand = power_threshold - self.power + POWER_PER_TASK
if demand <= 0:
break
else:
msg = "Wait power %d => %d " % (self.power, self.power + demand)
log.info(msg)
wait(self.delay + (demand - 1) * POWER_INTERVAL, 0, msg)
def do_task(self, task_id, map_id):
dress = task_dresses[map_id][task_id]
ret = action('task_set', task_id = task_id, map_id = map_id)
ret = action('task', task_id = task_id, dress = dress)
if ret != {}:
data = ret['data']
fmt = "Map %s Task %s Grade %s Score %d"
msg = fmt % (map_id, task_id, data['grade'], data['score'])
log.info(msg)
print msg
def repeat_task(self, task_list, power_threshold, loop_count = 0):
count = 0
while True:
self.wait_power(power_threshold)
task = task_list[count % len(task_list)]
self.do_task(task['task_id'], task['map_id'])
count = count + 1
if count == loop_count:
break
def play_lottery(self, lot, count, repeat = False, cnt = 0, th = 15500):
self.get_backpack()
new_clothes = []
n = 0
while True:
self.update_user()
if self.gold > th:
cnt = cnt - 1
ret = action('lottery', lot = lot, count = count)
n += 1
if ret != {}:
for item in ret['data']['prize']:
if item['type'] == 3 and not item['content'] in self.bp_dict:
print "New item %s obtained" % item['content']
new_clothes.append(get_cloth_info(item['content']))
if not repeat or cnt == 0:
break
else:
break
print "%d Lottery played %d newly obtained items saved" % (n, save_clothes(new_clothes, "Lottery"))
def get_backpack(self, mtype = 0):
ret = action('backpack', mtype = mtype)
if ret != {}:
self.backpack = ret['data']
self.bp_dict = {}
for item in self.backpack:
self.bp_dict[item['itemid']] = item['quantity']
def sell_backpack(self, item_id, count, mtype):
ret = action('sell', mtype = mtype, item_id = item_id, count = count)
if ret != {}:
print "Items Id %d Sold %d" % (item_id, count)
def sweep_backpack(self, mtype = 0):
self.get_backpack(mtype)
for item in self.backpack:
if item['quantity'] > 1:
self.sell_backpack(item['itemid'], item['quantity'] - 1, mtype)
def find_best_cloth(self, task_list):
import sqlite3
conn = sqlite3.connect('db/Introduction_cracked.db')
c = conn.cursor()
tl = []
self.get_backpack()
for task in task_list:
tl.append(task['task_id'])
for task in tl:
c.execute('SELECT dress_id FROM T_Grading WHERE task_id=? AND grade="S"', (task,))
ret = c.fetchall()
good_clothes = []
for cloth_id in [int(item[0]) for item in ret]:
if cloth_id in self.bp_dict:
good_clothes.append(cloth_id)
save_clothes([get_cloth_info(i) for i in good_clothes], "Task-"+str(task)+"-")
conn.close()
|
haoliangx/Autobot-Nikki-UP2U
|
user.py
|
Python
|
gpl-2.0
| 3,843
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import json
import yaml
from kubernetes_py.utils import is_valid_string
class DeleteOptions(object):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_deleteoptions
"""
def __init__(self):
super(DeleteOptions, self).__init__()
# TODO(froch): add support for the below.
# self._preconditions = None
self._kind = "DeleteOptions"
self._api_version = "v1"
self._grace_period_seconds = 0
self._orphan_dependents = False
# ------------------------------------------------------------------------------------- kind
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, k=0):
if not is_valid_string(k):
raise SyntaxError("DeleteOptions: kind: [ {0} ] is invalid.".format(k))
self._kind = k
# ------------------------------------------------------------------------------------- apiVersion
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, v=0):
if not is_valid_string(v):
raise SyntaxError("DeleteOptions: api_version: [ {0} ] is invalid.".format(v))
self._kind = v
# ------------------------------------------------------------------------------------- grace period seconds
@property
def grace_period_seconds(self):
return self._grace_period_seconds
@grace_period_seconds.setter
def grace_period_seconds(self, secs=0):
if not isinstance(secs, int):
raise SyntaxError("DeleteOptions: grace_period_seconds: [ {0} ] is invalid.".format(secs))
self._grace_period_seconds = secs
# ------------------------------------------------------------------------------------- orphanDependents
@property
def orphan_dependents(self):
return self._orphan_dependents
@orphan_dependents.setter
def orphan_dependents(self, orphan=False):
if not isinstance(orphan, bool):
raise SyntaxError("DeleteOptions: orphan_dependents: [ {0} ] is invalid.".format(orphan))
self._orphan_dependents = orphan
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.kind is not None:
data["kind"] = self.kind
if self.api_version is not None:
data["apiVersion"] = self.api_version
if self.grace_period_seconds is not None:
data["gracePeriodSeconds"] = self.grace_period_seconds
if self.orphan_dependents is not None:
data["orphanDependents"] = self.orphan_dependents
return data
def as_json(self):
data = self.serialize()
j = json.dumps(data, indent=4)
return j
def as_yaml(self):
data = self.serialize()
y = yaml.dump(data, default_flow_style=False)
return y
|
mnubo/kubernetes-py
|
kubernetes_py/models/v1/DeleteOptions.py
|
Python
|
apache-2.0
| 3,140
|
from PyQt5 import QtWidgets, QtCore, QtGui
import html, json
from datetime import datetime
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
"""
MainWindow uses a QTextEdit to display chat
"""
# initialize parent class. Req'd for PyQt subclasses
super(MainWindow, self).__init__(parent)
# set title window to `CHATIMUSMAXIMUS`
self.setWindowTitle("CHATIMUSMAXIMUS")
# create the text edit used to display the text
self.text_edit = QtWidgets.QTextEdit(parent=self)
self.text_edit.setReadOnly(True)
vertical_layout = QtWidgets.QVBoxLayout()
button = QtWidgets.QPushButton("CLEAR")
button.clicked.connect(self.text_edit.clear)
vertical_layout.addWidget(self.text_edit)
vertical_layout.addWidget(button)
central_widget = QtWidgets.QWidget()
central_widget.setLayout(vertical_layout)
self.setCentralWidget(central_widget)
# sets name and text formats
self.name_format = None
self.text_format = None
self._set_up_text_formats()
@QtCore.pyqtSlot(str, str, str)
def chat_string_slot(self, sender, message, platform):
self._chat_formater(sender, message, platform)
@QtCore.pyqtSlot(QtCore.QByteArray, str)
def chat_byte_slot(self, qbyte_array, platform):
text = str(qbyte_array).split('\\r\\n')[12]
text = text.split('&', maxsplit=2)
host = text[0][5:]
user = html.unescape(text[1][7:])
message = html.unescape(text[2][8:][:-1])
self._chat_formater(user, message, platform)
# FIXME: poor method name, not descriptive of what it does
def _chat_formater(self, sender, message, platform):
"""
Helper method to handle the text display logic
"""
# get cursor from text edit
cursor = self.text_edit.textCursor()
# set the format to the name format
cursor.setCharFormat(self.name_format)
# get the timestamp
formatted_datetime = datetime.now().strftime("%H:%M:%S")
# the platform name and timestamp are in a bracket. Example: `[YT@12:54:00]:`
bracket_string = ' [{}@{}]: '.format(platform, formatted_datetime)
# inserts the sender name next to the platform & timestamp
cursor.insertText(sender + bracket_string)
# sets format to text format
cursor.setCharFormat(self.text_format)
# inserts message
cursor.insertText(message)
# inserts newline
cursor.insertBlock()
def _set_up_text_formats(self):
"""
Helper method to create the name formater helpers
`self.name_format` is used for names
`self.test_formater` is used for the messages
"""
# name format is Demibold, blue, with size 13
self.name_format = QtGui.QTextCharFormat()
self.name_format.setFontWeight(QtGui.QFont.DemiBold)
self.name_format.setForeground(QtCore.Qt.blue)
self.name_format.setFontPointSize(13)
#text format is black, normal fontweight, and size 13
self.text_format = QtGui.QTextCharFormat()
self.text_format.setFontWeight(self.text_edit.fontWeight())
self.text_format.setForeground(QtCore.Qt.black)
self.text_format.setFontPointSize(13)
|
benhoff/chrome-stream-chat
|
CHATIMUSMAXIMUS/gui/main_window.py
|
Python
|
mit
| 3,388
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The `calendar` module provides standardised date management functionality
based on a calendar subgraph::
from py2neo import Graph
from py2neo.ext.calendar import GregorianCalendar
graph = Graph()
time_index = graph.legacy.get_or_create_index(neo4j.Node, "TIME")
calendar = GregorianCalendar(time_index)
graph.create(
{"name": "Alice"},
(0, "BORN", calendar.day(1800, 1, 1)),
(0, "DIED", calendar.day(1900, 12, 31)),
)
The root calendar node is held within a dedicated node index which needs
to be supplied to the calendar constructor.
All dates managed by the :py:class:`GregorianCalendar` class adhere to a
hierarchy such as::
(CALENDAR)-[:YEAR]->(2000)-[:MONTH]->(12)-[:DAY]->(25)
"""
from __future__ import unicode_literals
from datetime import date as _date
class GregorianCalendar(object):
class Date(object):
def __init__(self, year, month=None, day=None):
if year and month and day:
self._date = _date(year, month, day)
self._resolution = 3
self.year = year
self.month = month
self.day = day
elif year and month:
self._date = _date(year, month, 1)
self._resolution = 2
self.year = year
self.month = month
self.day = None
elif year:
self._date = _date(year, 1, 1)
self._resolution = 1
self.year = year
self.month = None
self.day = None
def __str__(self):
if self.year and self.month and self.day:
return "{y:04d}-{m:02d}-{d:02d}".format(
y=self.year, m=self.month, d=self.day
)
elif self.year and self.month:
return "{y:04d}-{m:02d}".format(
y=self.year, m=self.month
)
elif self.year:
return "{y:04d}".format(
y=self.year
)
def get_node(self, calendar):
if self.year and self.month and self.day:
return calendar.day(self.year, self.month, self.day)
elif self.year and self.month:
return calendar.month(self.year, self.month)
elif self.year:
return calendar.year(self.year)
else:
raise ValueError()
class DateRange(object):
def __init__(self, start_date=None, end_date=None):
if start_date and end_date:
self.start_date = GregorianCalendar.Date(*start_date)
self.end_date = GregorianCalendar.Date(*end_date)
# check both dates are compatible
if self.start_date._resolution != self.end_date._resolution:
raise ValueError("Range start and end dates are specified "
"at different resolutions")
# ensure dates are correctly ordered
if self.start_date._date > self.end_date._date:
self.start_date, self.end_date = self.end_date, self.start_date
elif start_date:
self.start_date = GregorianCalendar.Date(*start_date)
self.end_date = None
elif end_date:
self.start_date = None
self.end_date = GregorianCalendar.Date(*end_date)
else:
raise ValueError("Either start date or end date must "
"be specified for a date range")
def __init__(self, index):
""" Create a new calendar instance pointed to by the index provided.
"""
self._index = index
self._graph = self._index.service_root.graph
self._calendar = self._index.get_or_create("calendar", "Gregorian", {})
def calendar(self):
return self._calendar
def day(self, year, month, day):
""" Fetch the calendar node representing the day specified by `year`,
`month` and `day`.
"""
d = GregorianCalendar.Date(year, month, day)
date_path = self._calendar.get_or_create_path(
"YEAR", {"year": d.year},
"MONTH", {"year": d.year, "month": d.month},
"DAY", {"year": d.year, "month": d.month, "day": d.day},
)
return date_path.nodes[-1]
def month(self, year, month):
""" Fetch the calendar node representing the month specified by `year`
and `month`.
"""
d = GregorianCalendar.Date(year, month)
date_path = self._calendar.get_or_create_path(
"YEAR", {"year": d.year},
"MONTH", {"year": d.year, "month": d.month},
)
return date_path.nodes[-1]
def year(self, year):
""" Fetch the calendar node representing the year specified by `year`.
"""
d = GregorianCalendar.Date(year)
date_path = self._calendar.get_or_create_path(
"YEAR", {"year": d.year},
)
return date_path.nodes[-1]
def date(self, date):
return GregorianCalendar.Date(*date).get_node(self)
def date_range(self, start_date=None, end_date=None):
""" Fetch the calendar node representing the date range defined by
`start_date` and `end_date`. If either are unspecified, this defines an
open-ended range. Either `start_date` or `end_date` must be specified.
"""
# (CAL)
# |
# [:RANGE]
# |
# v
# (START)<-[:START_DATE]-(RANGE)-[:END_DATE]->(END)
range_ = GregorianCalendar.DateRange(start_date, end_date)
start, end = range_.start_date, range_.end_date
if start and end:
# if start and end are equal, return the day node instead
if (start.year, start.month, start.day) == (end.year, end.month, end.day):
return start.get_node(self)
if (start.year, start.month) == (end.year, end.month):
root = self.month(start.year, start.month)
elif start.year == end.year:
root = self.year(start.year)
else:
root = self._calendar
query = """\
START z=node({z}), s=node({s}), e=node({e})
CREATE UNIQUE (s)<-[:START_DATE]-(r {r})-[:END_DATE]->(e),
(z)-[:DATE_RANGE]->(r {r})
RETURN r
"""
params = {
"z": root._id,
"s": start.get_node(self)._id,
"e": end.get_node(self)._id,
"r": {
"start_date": str(start),
"end_date": str(end),
},
}
elif start:
query = """\
START z=node({z}), s=node({s})
CREATE UNIQUE (s)<-[:START_DATE]-(r {r}),
(z)-[:DATE_RANGE]->(r {r})
RETURN r
"""
params = {
"z": self._calendar._id,
"s": start.get_node(self)._id,
"r": {
"start_date": str(start),
},
}
elif end:
query = """\
START z=node({z}), e=node({e})
CREATE UNIQUE (r {r})-[:END_DATE]->(e),
(z)-[:DATE_RANGE]->(r {r})
RETURN r
"""
params = {
"z": self._calendar._id,
"e": end.get_node(self)._id,
"r": {
"end_date": str(end),
},
}
else:
raise ValueError("Either start or end date must be supplied "
"for a date range")
return self._graph.cypher.execute_one(query, params)
def quarter(self, year, quarter):
if quarter == 1:
return self.date_range((year, 1, 1), (year, 3, 31))
elif quarter == 2:
return self.date_range((year, 4, 1), (year, 6, 30))
elif quarter == 3:
return self.date_range((year, 7, 1), (year, 9, 30))
elif quarter == 4:
return self.date_range((year, 10, 1), (year, 12, 31))
else:
raise ValueError("quarter must be in 1..4")
|
pombreda/py2neo
|
py2neo/ext/calendar/__init__.py
|
Python
|
apache-2.0
| 9,179
|
#!/usr/bin/python
#
# anaconda: The Red Hat Linux Installation program
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brent Fox <bfox@redhat.com>
# Mike Fulbright <msf@redhat.com>
# Jakub Jelinek <jakub@redhat.com>
# Jeremy Katz <katzj@redhat.com>
# Chris Lumens <clumens@redhat.com>
# Paul Nasrat <pnasrat@redhat.com>
# Erik Troan <ewt@rpath.com>
# Matt Wilson <msw@rpath.com>
#
import os
import sys
from tempfile import mkstemp
from pyanaconda.bootloader import get_bootloader
from pyanaconda import iutil
from pyanaconda.constants import ADDON_PATHS
from pyanaconda import addons
import logging
log = logging.getLogger("anaconda")
stdoutLog = logging.getLogger("anaconda.stdout")
class Anaconda(object):
def __init__(self):
from pyanaconda import desktop
self._bootloader = None
self.canReIPL = False
self.desktop = desktop.Desktop()
self.dir = None
self.displayMode = None
self.extraModules = []
self.id = None
self._instClass = None
self._intf = None
self.isHeadless = False
self.ksdata = None
self.mediaDevice = None
self.methodstr = None
self._network = None
self.opts = None
self._payload = None
self.proxy = None
self.proxyUsername = None
self.proxyPassword = None
self.reIPLMessage = None
self.rescue_mount = True
self.rootParts = None
self.stage2 = None
self._storage = None
self.updateSrc = None
self.mehConfig = None
# *sigh* we still need to be able to write this out
self.xdriver = None
@property
def bootloader(self):
if not self._bootloader:
self._bootloader = get_bootloader()
return self._bootloader
@property
def instClass(self):
if not self._instClass:
from pyanaconda.installclass import DefaultInstall
self._instClass = DefaultInstall()
return self._instClass
def _getInterface(self):
return self._intf
def _setInterface(self, v):
# "lambda cannot contain assignment"
self._intf = v
def _delInterface(self):
del self._intf
intf = property(_getInterface, _setInterface, _delInterface)
@property
def payload(self):
# Try to find the packaging payload class. First try the install
# class. If it doesn't give us one, fall back to the default.
if not self._payload:
klass = self.instClass.getBackend()
if not klass:
from pyanaconda.flags import flags
if self.ksdata.ostreesetup.seen:
from pyanaconda.packaging.rpmostreepayload import RPMOSTreePayload
klass = RPMOSTreePayload
elif flags.livecdInstall:
from pyanaconda.packaging.livepayload import LiveImagePayload
klass = LiveImagePayload
elif self.ksdata.method.method == "liveimg":
from pyanaconda.packaging.livepayload import LiveImageKSPayload
klass = LiveImageKSPayload
else:
from pyanaconda.packaging.yumpayload import YumPayload
klass = YumPayload
self._payload = klass(self.ksdata)
return self._payload
@property
def protected(self):
import stat
specs = []
if os.path.exists("/run/initramfs/livedev") and \
stat.S_ISBLK(os.stat("/run/initramfs/livedev")[stat.ST_MODE]):
specs.append(os.readlink("/run/initramfs/livedev"))
if self.methodstr and self.methodstr.startswith("hd:"):
specs.append(self.methodstr[3:].split(":", 3)[0])
if self.stage2 and self.stage2.startswith("hd:"):
specs.append(self.stage2[3:].split(":", 3)[0])
return specs
@property
def storage(self):
if not self._storage:
import blivet
self._storage = blivet.Blivet(ksdata=self.ksdata)
if self.instClass.defaultFS:
self._storage.setDefaultFSType(self.instClass.defaultFS)
return self._storage
def dumpState(self):
from meh import ExceptionInfo
from meh.dump import ReverseExceptionDump
from inspect import stack as _stack
from traceback import format_stack
# Skip the frames for dumpState and the signal handler.
stack = _stack()[2:]
stack.reverse()
exn = ReverseExceptionDump(ExceptionInfo(None, None, stack),
self.mehConfig)
# gather up info on the running threads
threads = "\nThreads\n-------\n"
for thread_id, frame in sys._current_frames().iteritems():
threads += "\nThread %s\n" % (thread_id,)
threads += "".join(format_stack(frame))
# dump to a unique file
(fd, filename) = mkstemp(prefix="anaconda-tb-", dir="/tmp")
dump_text = exn.traceback_and_object_dump(self)
dump_text += threads
dump_text = dump_text.encode("utf-8")
os.write(fd, dump_text)
os.close(fd)
# append to a given file
with open("/tmp/anaconda-tb-all.log", "a+") as f:
f.write("--- traceback: %s ---\n" % filename)
f.write(dump_text + "\n")
def initInterface(self, addon_paths=None):
if self._intf:
raise RuntimeError("Second attempt to initialize the InstallInterface")
if self.displayMode == 'g':
from pyanaconda.ui.gui import GraphicalUserInterface
self._intf = GraphicalUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(ADDON_PATHS,
ui_subdir="gui")
elif self.displayMode in ['t', 'c']: # text and command line are the same
from pyanaconda.ui.tui import TextUserInterface
self._intf = TextUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(ADDON_PATHS,
ui_subdir="tui")
else:
raise RuntimeError("Unsupported displayMode: %s" % self.displayMode)
if addon_paths:
self._intf.update_paths(addon_paths)
def writeXdriver(self, root = None):
# this should go away at some point, but until it does, we
# need to keep it around.
if self.xdriver is None:
return
if root is None:
root = iutil.getSysroot()
if not os.path.isdir("%s/etc/X11" %(root,)):
os.makedirs("%s/etc/X11" %(root,), mode=0755)
f = open("%s/etc/X11/xorg.conf" %(root,), 'w')
f.write('Section "Device"\n\tIdentifier "Videocard0"\n\tDriver "%s"\nEndSection\n' % self.xdriver)
f.close()
|
gautamMalu/XenInBox
|
pyanaconda/anaconda.py
|
Python
|
gpl-2.0
| 8,011
|
#!/usr/bin/env python3
import subprocess
import time
def start():
engine = subprocess.Popen(
"chess/stockfish-dd-64-modern.exe",
universal_newlines = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
)
return engine
def put(engine, command):
engine.stdin.write(command + "\n")
def get(engine):
data = ""
engine.stdin.write('isready\n')
while True:
text = engine.stdout.readline().strip()
if text == 'readyok':
break
if text !='':
data += text
if data:
print(data)
return data
def checkLegal(engine, move):
put(engine, "go depth 5 searchmoves " + move)
while True:
r = get(engine)
if "bestmove" in r:
results = r[r.index("bestmove") + 9:].split(" ")
testmove = results[0]
print(r)
pondermove = results[2]
break
return testmove != "(none)"
def setPosition(engine, moves):
put(engine, "position startpos moves " + ' '.join(moves))
get(engine)
if __name__ == "__main__":
print("Let's play CHESS!")
engine = start()
#get(engine)
put(engine, "uci")
#get(engine)
put(engine, "ucinewgame")
#get(engine)
moves = []
while True:
setPosition(engine, moves)
print("")
print("-----------------------------")
while True:
print("What is your move? ")
whitemove = input(" :: ")
if checkLegal(engine, whitemove):
break
else:
print("That is not a legal move!")
print("-----------------------------")
print("")
moves.append(whitemove)
setPosition(engine, moves)
put(engine, "go depth 12")
while True:
r = get(engine)
if "bestmove" in r:
results = r[r.index("bestmove") + 9:].split(" ")
blackmove = results[0]
moves.append(blackmove)
print(r)
pondermove = results[2]
break
print("")
print("==========================")
print("BLACK PLAYS: ")
print(blackmove)
print("==========================")
print("")
|
omegachysis/arche-chess
|
chess test 2.py
|
Python
|
gpl-2.0
| 2,319
|
#!/usr/bin/python
__author__ = 'mlissner'
import argparse
from datetime import datetime
INPUT_FORMATS = [
'%Y-%m-%d', # '2006-10-25'
'%m-%d-%Y', # '10-25-2006'
'%m-%d-%y', # '10-25-06'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
'%Y/%m/%d', # '2006/10/26'
]
def make_date(date_string):
for format in INPUT_FORMATS:
try:
return datetime.strptime(date_string, format).date()
except ValueError:
continue
# If we made it this far, we can't handle the date.
raise argparse.ArgumentTypeError("Unable to parse date, %s" % date_string)
def main():
parser = argparse.ArgumentParser(description='Describe me')
parser.add_argument('-m', '--my-foo', default=True, required=False,
action='store_true',
help='Do we foo it?')
parser.add_argument('-f', '--file', type=file,
help='The path to the file.')
parser.add_argument('-d', '--date', type=make_date,
help='How far back should we eat pie?')
args = parser.parse_args()
if __name__ == '__main__':
main()
|
Andr3iC/courtlistener
|
cl/recap/download/__init__.py
|
Python
|
agpl-3.0
| 1,168
|
#
# Copyright (C) University College London, 2007-2012, all rights reserved.
#
# This file is part of HemeLB and is provided to you under the terms of
# the GNU LGPL. Please see LICENSE in the top level directory for full
# details.
#
"""Regarding indices, a few conventions:
1) Broadly there are two types of index:
- Three-dimensional indices into a 3D array, these are typically a
numpy.ndarray (with shape == (3,)) and are named with a suffix of
'Idx'.
- One-dimensional indices into a flattened array. These are just
integers and have the suffix 'Ijk'.
2) Indices can refer to a number of things and have additional naming:
- b : Index of a block
- sg : Index of a site in the whole domain (site global)
- sl : Index of a site in the block (site local)
"""
import numpy as np
GeometryMagicNumber = 0x676d7904
MooreNeighbourhoodDirections = np.array(
[[-1,-1,-1],
[-1,-1, 0],
[-1,-1,+1],
[-1, 0,-1],
[-1 , 0, 0],
[-1, 0,+1],
[-1,+1,-1],
[-1,+1, 0],
[-1,+1,+1],
[ 0,-1,-1],
[ 0,-1, 0],
[ 0,-1,+1],
[ 0, 0,-1],
#[ 0, 0, 0], <= the null displacement is not part of the Moore N'hood
[ 0, 0,+1],
[ 0,+1,-1],
[ 0,+1, 0],
[ 0,+1,+1],
[+1,-1,-1],
[+1,-1, 0],
[+1,-1,+1],
[+1, 0,-1],
[+1, 0, 0],
[+1, 0,+1],
[+1,+1,-1],
[+1,+1, 0],
[+1,+1,+1]]
)
|
jenshnielsen/hemelb
|
Tools/hemeTools/parsers/geometry/__init__.py
|
Python
|
lgpl-3.0
| 1,408
|
""" Crawl http://abcnotation.com to get dataset of traditional musics. """
import requests
import bs4
import time
import pickle
import os.path
import argparse
import re
root_url = 'http://abcnotation.com'
index_url = root_url + '/searchTunes?q=C:trad&f=c&o=a&s=0'
parser = argparse.ArgumentParser()
parser.add_argument('--num-pages', '-n', default=500, type=int,
help='Number of pages to be searched for musics.')
args = parser.parse_args()
def get_links(url):
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text, "lxml")
links = []
for el in soup.findAll('li'):
tune_link = el.find('a', href=True, text='tune page')
if tune_link:
links.append(root_url + tune_link['href'])
next_url = soup.find('a', href=True, text='next')
print(next_url)
if next_url:
next_url = root_url + next_url['href']
return links, next_url
def crawl_links(index_url):
all_links = []
links, next_url = get_links(index_url)
all_links += links
count = 1
while next_url is not None and count < args.num_pages:
time.sleep(0.5) # Be nice
links, next_url = get_links(next_url)
all_links += links
count += 1
return all_links
def get_musics(links):
pattern = re.compile('^([A-Za-z]):')
musics = []
for url in links:
# Get only musics from this guy. Let's make things easier for the network
if 'dl.dropboxusercontent.com/u/4496965' not in url:
continue
time.sleep(0.5) # Be nice
print(url)
try:
response = requests.get(url, timeout=5)
soup = bs4.BeautifulSoup(response.text, "lxml")
text = soup.select('textarea')[0].get_text().lstrip(' \n').rstrip('\n')
res = ''
good = False
for l in text.split('\n'):
p = pattern.match(l)
if pattern.match(l):
c = p.group(1)
if c in ['Q', 'M', 'L', 'K']: # Fields to keep
res += l
res += '\n'
else:
if l[0] != '%': # Ignore comment line
# Strip spaces and newlines
good = True
res += '\n'
if good:
musics.append(text.encode('ascii',errors='ignore'))
else:
print 'Bad:\n%s' % text.encode('ascii',errors='ignore')
except Exception, e:
print e
if len(musics) == 10:
print 'Writing!'
yield musics
musics = []
if not os.path.exists('links.pkl'):
links = crawl_links(index_url)
pickle.dump(links, open('links.pkl', 'wb'))
else:
links = pickle.load(open('links.pkl', 'rb'))
musics = get_musics(links)
try:
os.remove("input.txt")
except:
pass
with open("input.txt", "a") as f:
for chunk in musics:
f.write('\n\n'.join(chunk))
|
allanino/rnn-composer-helper
|
get_u_4496965_data.py
|
Python
|
mit
| 2,996
|
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Luke Macken <lmacken@redhat.com>
from tg import config
from paste.deploy.converters import asbool
import tw.jquery.flot
import tw2.jqplugins.flot
import tw2.excanvas
from moksha.api.widgets import TW1LiveWidget, TW2LiveWidget
class TW1LiveFlotWidget(TW1LiveWidget):
""" A live graphing widget """
topic = None
params = ['id', 'data', 'options', 'height', 'width', 'onmessage']
onmessage = '$.plot($("#${id}"),json[0]["data"],json[0]["options"])'
template = "mako:moksha.api.widgets.flot.templates.flot"
javascript = [tw.jquery.flot.flot_js, tw.jquery.flot.excanvas_js]
css = [tw.jquery.flot.flot_css]
height = '250px'
width = '390px'
options = {}
data = [{}]
class TW2LiveFlotWidget(TW2LiveWidget):
""" A live graphing widget """
topic = None
params = ['id', 'data', 'options', 'height', 'width', 'onmessage']
onmessage = '$.plot($("#${id}"),json[0]["data"],json[0]["options"])'
template = "mako:moksha.api.widgets.flot.templates.flot"
resources = [
tw2.jqplugins.flot.flot_js,
tw2.excanvas.excanvas_js
]
height = '250px'
width = '390px'
options = {}
data = [{}]
if asbool(config.get('moksha.use_tw2', False)):
LiveFlotWidget = TW2LiveFlotWidget
else:
LiveFlotWidget = TW1LiveFlotWidget
|
ralphbean/moksha
|
moksha/api/widgets/flot/flot.py
|
Python
|
apache-2.0
| 1,938
|
# This file is part of aoc2016.
#
# aoc2016 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aoc2016 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aoc2016. If not, see <http://www.gnu.org/licenses/>.
SAFE_TILE = '.'
TRAP_TILE = '^'
def part_one(puzzle_input):
return count_safe_tiles_in_room(first_row_of_tiles=puzzle_input, n_rows=40)
def part_two(puzzle_input):
return count_safe_tiles_in_room(first_row_of_tiles=puzzle_input, n_rows=400000)
def count_safe_tiles_in_room(first_row_of_tiles, n_rows):
current_row = list(first_row_of_tiles)
n_safe_tiles = count_safe_tiles(current_row)
for _ in range(n_rows - 1):
current_row = decode_next_row_of_tiles(current_row)
n_safe_tiles += count_safe_tiles((current_row))
return n_safe_tiles
def count_safe_tiles(row_of_tiles):
n_traps = 0
for tile in row_of_tiles:
if tile == SAFE_TILE:
n_traps += 1
return n_traps
def decode_next_row_of_tiles(input_row):
new_row = ['' for _ in range(len(input_row))]
new_row[0] = determine_tile(SAFE_TILE, input_row[0], input_row[1])
new_row[-1] = determine_tile(input_row[-2], input_row[-1], SAFE_TILE)
for i in range(1, len(input_row) - 1):
new_row[i] = determine_tile(*input_row[i - 1: i + 2])
return new_row
def determine_tile(left, center, right):
if (left == TRAP_TILE and center == SAFE_TILE and right == SAFE_TILE) or \
(left == SAFE_TILE and center == SAFE_TILE and right == TRAP_TILE) or \
(left == TRAP_TILE and center == TRAP_TILE and right == SAFE_TILE) or \
(left == SAFE_TILE and center == TRAP_TILE and right == TRAP_TILE):
return TRAP_TILE
return SAFE_TILE
|
T-R0D/JustForFun
|
aoc2016/aoc2016/day18/solution.py
|
Python
|
gpl-2.0
| 2,158
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import django.forms as forms
from django.db.models import Q
from pdc.apps.release.models import Release
class ComposeSearchForm(forms.Form):
search = forms.CharField(required=False)
def get_query(self, request):
self.is_valid()
search = self.cleaned_data["search"]
query = Q()
if search:
query |= Q(compose_id__icontains=search)
return query
class ComposeRPMSearchForm(forms.Form):
search = forms.CharField(required=False)
def get_query(self, request):
query = Q()
if not self.is_valid():
return query
search = self.cleaned_data["search"]
if search:
query |= Q(rpm__name__icontains=search)
query |= Q(rpm__arch__icontains=search)
query |= Q(rpm__version__icontains=search)
query |= Q(rpm__release__icontains=search)
return query
class ComposeImageSearchForm(forms.Form):
search = forms.CharField(required=False)
def get_query(self, request):
query = Q()
if not self.is_valid():
return query
search = self.cleaned_data["search"]
if search:
query |= Q(image__file_name__icontains=search)
query |= Q(image__image_format__name__icontains=search)
query |= Q(image__image_type__name__icontains=search)
query |= Q(image__arch__icontains=search)
query |= Q(image__implant_md5__icontains=search)
query |= Q(image__volume_id__icontains=search)
query |= Q(image__md5__icontains=search)
query |= Q(image__sha1__icontains=search)
query |= Q(image__sha256__icontains=search)
return query
class ComposeRpmMappingSearchForm(forms.Form):
package = forms.CharField(required=False)
release = forms.ChoiceField()
def __init__(self, *args, **kwargs):
super(ComposeRpmMappingSearchForm, self).__init__(*args, **kwargs)
self.fields["release"].choices = [(i.pk, str(i)) for i in Release.objects.all()]
def get_query(self, request):
query = Q()
if not self.is_valid():
return query
package = self.cleaned_data["package"] # noqa
release = self.cleaned_data["release"] # noqa
# if package:
# package |= Q(compose_id__icontains=search)
return query
class ComposeRPMDisableForm(forms.Form):
variant = forms.CharField(widget=forms.HiddenInput())
arch = forms.CharField(widget=forms.HiddenInput())
rpm_name = forms.CharField(widget=forms.HiddenInput())
rpm_arch = forms.CharField(widget=forms.HiddenInput())
included = forms.BooleanField(
required=False,
initial=True,
widget=forms.CheckboxInput(attrs={"class": "form-control input-small"})
)
override = forms.CharField(required=False, widget=forms.HiddenInput())
class OverrideRPMForm(forms.Form):
variant = forms.CharField(widget=forms.HiddenInput(), required=False)
arch = forms.CharField(widget=forms.HiddenInput(), required=False)
rpm_name = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control input-sm'}),
required=False
)
rpm_arch = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control input-sm'}),
required=False
)
new_variant = forms.IntegerField(required=False,
initial=-1,
widget=forms.HiddenInput())
def clean(self):
cleaned_data = super(OverrideRPMForm, self).clean()
if (cleaned_data['rpm_name'] and not cleaned_data['rpm_arch'] or
cleaned_data['rpm_arch'] and not cleaned_data['rpm_name']):
raise forms.ValidationError("Both RPM name and arch must be filled in.")
if (cleaned_data['new_variant'] >= 0 and
(cleaned_data.get('variant') or cleaned_data.get('arch'))):
raise forms.ValidationError("Can not reference both old and new variant.arch.")
return cleaned_data
class VariantArchForm(forms.Form):
variant = forms.CharField(
required=False,
widget=forms.TextInput(attrs={'class': 'form-control input-sm'})
)
arch = forms.CharField(
required=False,
widget=forms.TextInput(attrs={'class': 'form-control input-sm'})
)
def clean(self):
cleaned_data = super(VariantArchForm, self).clean()
if (cleaned_data['variant'] and not cleaned_data['arch'] or
cleaned_data['arch'] and not cleaned_data['variant']):
raise forms.ValidationError("Both variant and arch must be filled in.")
return cleaned_data
class OverrideRPMActionForm(forms.Form):
action = forms.CharField(widget=forms.HiddenInput())
# release_id = forms.CharField(widget=forms.HiddenInput())
variant = forms.CharField(widget=forms.HiddenInput())
arch = forms.CharField(widget=forms.HiddenInput())
# srpm_name = forms.CharField(widget=forms.HiddenInput())
rpm_name = forms.CharField(widget=forms.HiddenInput())
rpm_arch = forms.CharField(widget=forms.HiddenInput())
include = forms.BooleanField(required=False, widget=forms.HiddenInput())
comment = forms.CharField(required=False,
widget=forms.TextInput(attrs={'class': 'form-control input-sm'}))
do_not_delete = forms.BooleanField(required=False,
widget=forms.CheckboxInput(attrs={'class': 'form-control'}))
warning = forms.CharField(required=False)
def clean(self):
cleaned_data = super(OverrideRPMActionForm, self).clean()
if not cleaned_data.get('do_not_delete', False) and cleaned_data['comment']:
raise forms.ValidationError('Comment needs do_not_delete checked.')
return cleaned_data
|
release-engineering/product-definition-center
|
pdc/apps/compose/forms.py
|
Python
|
mit
| 6,020
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Produces contiguous completed ranges of recurring tasks.
See ``RangeDaily`` and ``RangeHourly`` for basic usage.
Caveat - if gaps accumulate, their causes (e.g. missing dependencies) going
unmonitored/unmitigated, then this will eventually keep retrying the same gaps
over and over and make no progress to more recent times. (See ``task_limit``
and ``reverse`` parameters.)
TODO foolproof against that kind of misuse?
"""
import itertools
import functools
import logging
import warnings
import operator
import re
import time
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
from luigi import six
import luigi
from luigi.parameter import ParameterException
from luigi.target import FileSystemTarget
from luigi.task import Register, flatten_output
logger = logging.getLogger('luigi-interface')
class RangeEvent(luigi.Event): # Not sure if subclassing currently serves a purpose. Stringly typed, events are.
"""
Events communicating useful metrics.
``COMPLETE_COUNT`` would normally be nondecreasing, and its derivative
would describe performance (how many instances complete
invocation-over-invocation).
``COMPLETE_FRACTION`` reaching 1 would be a telling event in case of a
backfill with defined start and stop. Would not be strikingly useful for a
typical recurring task without stop defined, fluctuating close to 1.
``DELAY`` is measured from the first found missing datehour till (current
time + hours_forward), or till stop if it is defined. In hours for Hourly.
TBD different units for other frequencies?
TODO any different for reverse mode? From first missing till last missing?
From last gap till stop?
"""
COMPLETE_COUNT = "event.tools.range.complete.count"
COMPLETE_FRACTION = "event.tools.range.complete.fraction"
DELAY = "event.tools.range.delay"
class RangeBase(luigi.WrapperTask):
"""
Produces a contiguous completed range of a recurring task.
Made for the common use case where a task is parameterized by e.g.
``DateParameter``, and assurance is needed that any gaps arising from
downtime are eventually filled.
Emits events that one can use to monitor gaps and delays.
At least one of start and stop needs to be specified.
(This is quite an abstract base class for subclasses with different
datetime parameter classes, e.g. ``DateParameter``, ``DateHourParameter``,
..., and different parameter naming, e.g. days_back/forward,
hours_back/forward, ..., as well as different documentation wording,
to improve user experience.)
Subclasses will need to use the ``of`` parameter when overriding methods.
"""
# TODO lift the single parameter constraint by passing unknown parameters through WrapperTask?
of = luigi.TaskParameter(
description="task name to be completed. The task must take a single datetime parameter")
of_params = luigi.DictParameter(default=dict(), description="Arguments to be provided to the 'of' class when instantiating")
# The common parameters 'start' and 'stop' have type (e.g. DateParameter,
# DateHourParameter) dependent on the concrete subclass, cumbersome to
# define here generically without dark magic. Refer to the overrides.
start = luigi.Parameter()
stop = luigi.Parameter()
reverse = luigi.BoolParameter(
default=False,
description="specifies the preferred order for catching up. False - work from the oldest missing outputs onward; True - from the newest backward")
task_limit = luigi.IntParameter(
default=50,
description="how many of 'of' tasks to require. Guards against scheduling insane amounts of tasks in one go")
# TODO overridable exclude_datetimes or something...
now = luigi.IntParameter(
default=None,
description="set to override current time. In seconds since epoch")
param_name = luigi.Parameter(
default=None,
description="parameter name used to pass in parameterized value. Defaults to None, meaning use first positional parameter",
positional=False)
@property
def of_cls(self):
"""
DONT USE. Will be deleted soon. Use ``self.of``!
"""
if isinstance(self.of, six.string_types):
warnings.warn('When using Range programatically, dont pass "of" param as string!')
return Register.get_task_cls(self.of)
return self.of
# a bunch of datetime arithmetic building blocks that need to be provided in subclasses
def datetime_to_parameter(self, dt):
raise NotImplementedError
def parameter_to_datetime(self, p):
raise NotImplementedError
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
raise NotImplementedError
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
raise NotImplementedError
def moving_start(self, now):
"""
Returns a datetime from which to ensure contiguousness in the case when
start is None or unfeasibly far back.
"""
raise NotImplementedError
def moving_stop(self, now):
"""
Returns a datetime till which to ensure contiguousness in the case when
stop is None or unfeasibly far forward.
"""
raise NotImplementedError
def finite_datetimes(self, finite_start, finite_stop):
"""
Returns the individual datetimes in interval [finite_start, finite_stop)
for which task completeness should be required, as a sorted list.
"""
raise NotImplementedError
def _emit_metrics(self, missing_datetimes, finite_start, finite_stop):
"""
For consistent metrics one should consider the entire range, but
it is open (infinite) if stop or start is None.
Hence make do with metrics respective to the finite simplification.
"""
datetimes = self.finite_datetimes(
finite_start if self.start is None else min(finite_start, self.parameter_to_datetime(self.start)),
finite_stop if self.stop is None else max(finite_stop, self.parameter_to_datetime(self.stop)))
delay_in_jobs = len(datetimes) - datetimes.index(missing_datetimes[0]) if datetimes and missing_datetimes else 0
self.trigger_event(RangeEvent.DELAY, self.of_cls.task_family, delay_in_jobs)
expected_count = len(datetimes)
complete_count = expected_count - len(missing_datetimes)
self.trigger_event(RangeEvent.COMPLETE_COUNT, self.of_cls.task_family, complete_count)
self.trigger_event(RangeEvent.COMPLETE_FRACTION, self.of_cls.task_family, float(complete_count) / expected_count if expected_count else 1)
def _format_datetime(self, dt):
return self.datetime_to_parameter(dt)
def _format_range(self, datetimes):
param_first = self._format_datetime(datetimes[0])
param_last = self._format_datetime(datetimes[-1])
return '[%s, %s]' % (param_first, param_last)
def _instantiate_task_cls(self, param):
return self.of(**self._task_parameters(param))
@property
def _param_name(self):
if self.param_name is None:
return next(x[0] for x in self.of.get_params() if x[1].positional)
else:
return self.param_name
def _task_parameters(self, param):
kwargs = dict(**self.of_params)
kwargs[self._param_name] = param
return kwargs
def requires(self):
# cache because we anticipate a fair amount of computation
if hasattr(self, '_cached_requires'):
return self._cached_requires
if not self.start and not self.stop:
raise ParameterException("At least one of start and stop needs to be specified")
if not self.start and not self.reverse:
raise ParameterException("Either start needs to be specified or reverse needs to be True")
if self.start and self.stop and self.start > self.stop:
raise ParameterException("Can't have start > stop")
# TODO check overridden complete() and exists()
now = datetime.utcfromtimestamp(time.time() if self.now is None else self.now)
moving_start = self.moving_start(now)
finite_start = moving_start if self.start is None else max(self.parameter_to_datetime(self.start), moving_start)
moving_stop = self.moving_stop(now)
finite_stop = moving_stop if self.stop is None else min(self.parameter_to_datetime(self.stop), moving_stop)
datetimes = self.finite_datetimes(finite_start, finite_stop) if finite_start <= finite_stop else []
if datetimes:
logger.debug('Actually checking if range %s of %s is complete',
self._format_range(datetimes), self.of_cls.task_family)
missing_datetimes = sorted(self._missing_datetimes(datetimes))
logger.debug('Range %s lacked %d of expected %d %s instances',
self._format_range(datetimes), len(missing_datetimes), len(datetimes), self.of_cls.task_family)
else:
missing_datetimes = []
logger.debug('Empty range. No %s instances expected', self.of_cls.task_family)
self._emit_metrics(missing_datetimes, finite_start, finite_stop)
if self.reverse:
required_datetimes = missing_datetimes[-self.task_limit:]
else:
required_datetimes = missing_datetimes[:self.task_limit]
if required_datetimes:
logger.debug('Requiring %d missing %s instances in range %s',
len(required_datetimes), self.of_cls.task_family, self._format_range(required_datetimes))
if self.reverse:
required_datetimes.reverse() # TODO priorities, so that within the batch tasks are ordered too
self._cached_requires = [self._instantiate_task_cls(self.datetime_to_parameter(d)) for d in required_datetimes]
return self._cached_requires
def missing_datetimes(self, finite_datetimes):
"""
Override in subclasses to do bulk checks.
Returns a sorted list.
This is a conservative base implementation that brutally checks completeness, instance by instance.
Inadvisable as it may be slow.
"""
return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
def _missing_datetimes(self, finite_datetimes):
"""
Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015)
"""
try:
return self.missing_datetimes(finite_datetimes)
except TypeError as ex:
if 'missing_datetimes()' in repr(ex):
warnings.warn('In your Range* subclass, missing_datetimes() should only take 1 argument (see latest docs)')
return self.missing_datetimes(self.of_cls, finite_datetimes)
else:
raise
class RangeDailyBase(RangeBase):
"""
Produces a contiguous completed range of a daily recurring task.
"""
start = luigi.DateParameter(
default=None,
description="beginning date, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateParameter(
default=None,
description="ending date, exclusive. Default: None - work forward forever")
days_back = luigi.IntParameter(
default=100, # slightly more than three months
description=("extent to which contiguousness is to be assured into "
"past, in days from current time. Prevents infinite loop "
"when start is none. If the dataset has limited retention"
" (i.e. old outputs get removed), this should be set "
"shorter to that, too, to prevent the oldest outputs "
"flapping. Increase freely if you intend to process old "
"dates - worker's memory is the limit"))
days_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in days from current time. Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return dt.date()
def parameter_to_datetime(self, p):
return datetime(p.year, p.month, p.day)
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt.date())
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, dt.day)
def moving_start(self, now):
return now - timedelta(days=self.days_back)
def moving_stop(self, now):
return now + timedelta(days=self.days_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to turn of day.
"""
date_start = datetime(finite_start.year, finite_start.month, finite_start.day)
dates = []
for i in itertools.count():
t = date_start + timedelta(days=i)
if t >= finite_stop:
return dates
if t >= finite_start:
dates.append(t)
class RangeHourlyBase(RangeBase):
"""
Produces a contiguous completed range of an hourly recurring task.
"""
start = luigi.DateHourParameter(
default=None,
description="beginning datehour, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateHourParameter(
default=None,
description="ending datehour, exclusive. Default: None - work forward forever")
hours_back = luigi.IntParameter(
default=100 * 24, # slightly more than three months
description=("extent to which contiguousness is to be assured into "
"past, in hours from current time. Prevents infinite "
"loop when start is none. If the dataset has limited "
"retention (i.e. old outputs get removed), this should "
"be set shorter to that, too, to prevent the oldest "
"outputs flapping. Increase freely if you intend to "
"process old dates - worker's memory is the limit"))
# TODO always entire interval for reprocessings (fixed start and stop)?
hours_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in hours from current time. Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return dt
def parameter_to_datetime(self, p):
return p
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt)
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
return p[self._param_name]
def moving_start(self, now):
return now - timedelta(hours=self.hours_back)
def moving_stop(self, now):
return now + timedelta(hours=self.hours_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to whole hours.
"""
datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour)
datehours = []
for i in itertools.count():
t = datehour_start + timedelta(hours=i)
if t >= finite_stop:
return datehours
if t >= finite_start:
datehours.append(t)
def _format_datetime(self, dt):
return luigi.DateHourParameter().serialize(dt)
class RangeByMinutesBase(RangeBase):
"""
Produces a contiguous completed range of an recurring tasks separated a specified number of minutes.
"""
start = luigi.DateMinuteParameter(
default=None,
description="beginning date-hour-minute, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateMinuteParameter(
default=None,
description="ending date-hour-minute, exclusive. Default: None - work forward forever")
minutes_back = luigi.IntParameter(
default=60*24, # one day
description=("extent to which contiguousness is to be assured into "
"past, in minutes from current time. Prevents infinite "
"loop when start is none. If the dataset has limited "
"retention (i.e. old outputs get removed), this should "
"be set shorter to that, too, to prevent the oldest "
"outputs flapping. Increase freely if you intend to "
"process old dates - worker's memory is the limit"))
minutes_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, "
"in minutes from current time. Prevents infinite loop when stop is none")
minutes_interval = luigi.IntParameter(
default=1,
description="separation between events in minutes. It must evenly divide 60"
)
def datetime_to_parameter(self, dt):
return dt
def parameter_to_datetime(self, p):
return p
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt)
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute)
def moving_start(self, now):
return now - timedelta(minutes=self.minutes_back)
def moving_stop(self, now):
return now + timedelta(minutes=self.minutes_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to a whole number of minutes intervals.
"""
# Validate that the minutes_interval can divide 60 and it is greater than 0 and lesser than 60
if not (0 < self.minutes_interval < 60):
raise ParameterException('minutes-interval must be within 0..60')
if (60 / self.minutes_interval) * self.minutes_interval != 60:
raise ParameterException('minutes-interval does not evenly divide 60')
# start of a complete interval, e.g. 20:13 and the interval is 5 -> 20:10
start_minute = int(finite_start.minute/self.minutes_interval)*self.minutes_interval
datehour_start = datetime(
year=finite_start.year,
month=finite_start.month,
day=finite_start.day,
hour=finite_start.hour,
minute=start_minute)
datehours = []
for i in itertools.count():
t = datehour_start + timedelta(minutes=i*self.minutes_interval)
if t >= finite_stop:
return datehours
if t >= finite_start:
datehours.append(t)
def _format_datetime(self, dt):
return luigi.DateMinuteParameter().serialize(dt)
def _constrain_glob(glob, paths, limit=5):
"""
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra.
Saves us minutes long listings for long dataset histories.
Specifically, in this implementation the leftmost occurrences of "[0-9]"
give rise to a few separate globs that each specialize the expression to
digits that actually occur in paths.
"""
def digit_set_wildcard(chars):
"""
Makes a wildcard expression for the set, a bit readable, e.g. [1-5].
"""
chars = sorted(chars)
if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1:
return '[%s-%s]' % (chars[0], chars[-1])
else:
return '[%s]' % ''.join(chars)
current = {glob: paths}
while True:
pos = list(current.keys())[0].find('[0-9]')
if pos == -1:
# no wildcard expressions left to specialize in the glob
return list(current.keys())
char_sets = {}
for g, p in six.iteritems(current):
char_sets[g] = sorted({path[pos] for path in p})
if sum(len(s) for s in char_sets.values()) > limit:
return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current]
for g, s in six.iteritems(char_sets):
for c in s:
new_glob = g.replace('[0-9]', c, 1)
new_paths = list(filter(lambda p: p[pos] == c, current[g]))
current[new_glob] = new_paths
del current[g]
def most_common(items):
"""
Wanted functionality from Counters (new in Python 2.7).
"""
counts = {}
for i in items:
counts.setdefault(i, 0)
counts[i] += 1
return max(six.iteritems(counts), key=operator.itemgetter(1))
def _get_per_location_glob(tasks, outputs, regexes):
"""
Builds a glob listing existing output paths.
Esoteric reverse engineering, but worth it given that (compared to an
equivalent contiguousness guarantee by naive complete() checks)
requests to the filesystem are cut by orders of magnitude, and users
don't even have to retrofit existing tasks anyhow.
"""
paths = [o.path for o in outputs]
# naive, because some matches could be confused by numbers earlier
# in path, e.g. /foo/fifa2000k/bar/2000-12-31/00
matches = [r.search(p) for r, p in zip(regexes, paths)]
for m, p, t in zip(matches, paths, tasks):
if m is None:
raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t))
n_groups = len(matches[0].groups())
# the most common position of every group is likely
# to be conclusive hit or miss
positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)]
glob = list(paths[0]) # FIXME sanity check that it's the same for all paths
for start, end in positions:
glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:]
# chop off the last path item
# (wouldn't need to if `hadoop fs -ls -d` equivalent were available)
return ''.join(glob).rsplit('/', 1)[0]
def _get_filesystems_and_globs(datetime_to_task, datetime_to_re):
"""
Yields a (filesystem, glob) tuple per every output location of task.
The task can have one or several FileSystemTarget outputs.
For convenience, the task can be a luigi.WrapperTask,
in which case outputs of all its dependencies are considered.
"""
# probe some scattered datetimes unlikely to all occur in paths, other than by being sincere datetime parameter's representations
# TODO limit to [self.start, self.stop) so messages are less confusing? Done trivially it can kill correctness
sample_datetimes = [datetime(y, m, d, h) for y in range(2000, 2050, 10) for m in range(1, 4) for d in range(5, 8) for h in range(21, 24)]
regexes = [re.compile(datetime_to_re(d)) for d in sample_datetimes]
sample_tasks = [datetime_to_task(d) for d in sample_datetimes]
sample_outputs = [flatten_output(t) for t in sample_tasks]
for o, t in zip(sample_outputs, sample_tasks):
if len(o) != len(sample_outputs[0]):
raise NotImplementedError("Outputs must be consistent over time, sorry; was %r for %r and %r for %r" % (o, t, sample_outputs[0], sample_tasks[0]))
# TODO fall back on requiring last couple of days? to avoid astonishing blocking when changes like that are deployed
# erm, actually it's not hard to test entire hours_back..hours_forward and split into consistent subranges FIXME?
for target in o:
if not isinstance(target, FileSystemTarget):
raise NotImplementedError("Output targets must be instances of FileSystemTarget; was %r for %r" % (target, t))
for o in zip(*sample_outputs): # transposed, so here we're iterating over logical outputs, not datetimes
glob = _get_per_location_glob(sample_tasks, o, regexes)
yield o[0].fs, glob
def _list_existing(filesystem, glob, paths):
"""
Get all the paths that do in fact exist. Returns a set of all existing paths.
Takes a luigi.target.FileSystem object, a str which represents a glob and
a list of strings representing paths.
"""
globs = _constrain_glob(glob, paths)
time_start = time.time()
listing = []
for g in sorted(globs):
logger.debug('Listing %s', g)
if filesystem.exists(g):
listing.extend(filesystem.listdir(g))
logger.debug('%d %s listings took %f s to return %d items',
len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing))
return set(listing)
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re):
"""
Efficiently determines missing datetimes by filesystem listing.
The current implementation works for the common case of a task writing
output to a ``FileSystemTarget`` whose path is built using strftime with
format like '...%Y...%m...%d...%H...', without custom ``complete()`` or
``exists()``.
(Eventually Luigi could have ranges of completion as first-class citizens.
Then this listing business could be factored away/be provided for
explicitly in target API or some kind of a history server.)
"""
filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re)
paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes]
listing = set()
for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes
listing |= _list_existing(f, g, p)
# quickly learn everything that's missing
missing_datetimes = []
for d, p in zip(datetimes, paths_by_datetime):
if not set(p) <= listing:
missing_datetimes.append(d)
return missing_datetimes
class RangeMonthly(RangeBase):
"""
Produces a contiguous completed range of a monthly recurring task.
Unlike the Range* classes with shorter intervals, this class does not perform bulk optimisation.
It is assumed that the number of months is low enough not to motivate the increased complexity.
Hence, there is no class RangeMonthlyBase.
"""
start = luigi.MonthParameter(
default=None,
description="beginning month, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.MonthParameter(
default=None,
description="ending month, exclusive. Default: None - work forward forever")
months_back = luigi.IntParameter(
default=13, # Little over a year
description=("extent to which contiguousness is to be assured into "
"past, in months from current time. Prevents infinite loop "
"when start is none. If the dataset has limited retention"
" (i.e. old outputs get removed), this should be set "
"shorter to that, too, to prevent the oldest outputs "
"flapping. Increase freely if you intend to process old "
"dates - worker's memory is the limit"))
months_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in months from current time. "
"Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return date(dt.year, dt.month, 1)
def parameter_to_datetime(self, p):
return datetime(p.year, p.month, 1)
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt.date())
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, 1)
def _format_datetime(self, dt):
return dt.strftime('%Y-%m')
def moving_start(self, now):
return now - relativedelta(months=self.months_back)
def moving_stop(self, now):
return now + relativedelta(months=self.months_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to turn of month.
"""
start_date = datetime(finite_start.year, finite_start.month, 1)
dates = []
for m in itertools.count():
t = start_date + relativedelta(months=m)
if t >= finite_stop:
return dates
if t >= finite_start:
dates.append(t)
class RangeDaily(RangeDailyBase):
"""Efficiently produces a contiguous completed range of a daily recurring
task that takes a single ``DateParameter``.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeDaily --of YourActualTask --start 2014-01-01
"""
def missing_datetimes(self, finite_datetimes):
try:
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, map(self.datetime_to_parameter, finite_datetimes))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d)'))
class RangeHourly(RangeHourlyBase):
"""Efficiently produces a contiguous completed range of an hourly recurring
task that takes a single ``DateHourParameter``.
Benefits from ``bulk_complete`` information to efficiently cover gaps.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeHourly --of YourActualTask --start 2014-01-01T00
"""
def missing_datetimes(self, finite_datetimes):
try:
# TODO: Why is there a list() here but not for the RangeDaily??
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, list(map(self.datetime_to_parameter, finite_datetimes)))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'))
class RangeByMinutes(RangeByMinutesBase):
"""Efficiently produces a contiguous completed range of an recurring
task every interval minutes that takes a single ``DateMinuteParameter``.
Benefits from ``bulk_complete`` information to efficiently cover gaps.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeByMinutes --of YourActualTask --start 2014-01-01T0123
"""
def missing_datetimes(self, finite_datetimes):
try:
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, map(self.datetime_to_parameter, finite_datetimes))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H).*(%M)'))
|
Magnetic/luigi
|
luigi/tools/range.py
|
Python
|
apache-2.0
| 34,017
|
# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import sys
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
import six
from neutron.common import constants
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db.qos import models as qos_db_model
from neutron.objects import base
from neutron.objects import common_types
from neutron.services.qos import qos_consts
DSCP_MARK = 'dscp_mark'
def get_rules(context, qos_policy_id):
all_rules = []
with db_api.autonested_transaction(context.session):
for rule_type in qos_consts.VALID_RULE_TYPES:
rule_cls_name = 'Qos%sRule' % utils.camelize(rule_type)
rule_cls = getattr(sys.modules[__name__], rule_cls_name)
rules = rule_cls.get_objects(context, qos_policy_id=qos_policy_id)
all_rules.extend(rules)
return all_rules
@six.add_metaclass(abc.ABCMeta)
class QosRule(base.NeutronDbObject):
# Version 1.0: Initial version, only BandwidthLimitRule
# 1.1: Added DscpMarkingRule
#
#NOTE(mangelajo): versions need to be handled from the top QosRule object
# because it's the only reference QosPolicy can make
# to them via obj_relationships version map
VERSION = '1.1'
fields = {
'id': obj_fields.UUIDField(),
'qos_policy_id': obj_fields.UUIDField()
}
fields_no_update = ['id', 'qos_policy_id']
# should be redefined in subclasses
rule_type = None
def to_dict(self):
dict_ = super(QosRule, self).to_dict()
dict_['type'] = self.rule_type
return dict_
def should_apply_to_port(self, port):
"""Check whether a rule can be applied to a specific port.
This function has the logic to decide whether a rule should
be applied to a port or not, depending on the source of the
policy (network, or port). Eventually rules could override
this method, or we could make it abstract to allow different
rule behaviour.
"""
is_network_rule = self.qos_policy_id != port[qos_consts.QOS_POLICY_ID]
is_network_device_port = any(port['device_owner'].startswith(prefix)
for prefix
in constants.DEVICE_OWNER_PREFIXES)
return not (is_network_rule and is_network_device_port)
@obj_base.VersionedObjectRegistry.register
class QosBandwidthLimitRule(QosRule):
db_model = qos_db_model.QosBandwidthLimitRule
fields = {
'max_kbps': obj_fields.IntegerField(nullable=True),
'max_burst_kbps': obj_fields.IntegerField(nullable=True)
}
rule_type = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
@obj_base.VersionedObjectRegistry.register
class QosDscpMarkingRule(QosRule):
db_model = qos_db_model.QosDscpMarkingRule
fields = {
DSCP_MARK: common_types.DscpMarkField(),
}
rule_type = qos_consts.RULE_TYPE_DSCP_MARK
|
wolverineav/neutron
|
neutron/objects/qos/rule.py
|
Python
|
apache-2.0
| 3,646
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.