code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
incisive: Tiny library for handling csv
=======================================
"""
from .core import read_csv, write_csv, format_to_csv
__version__ = '0.1.0'
__title__ = 'incisive'
__author__ = 'Taurus Olson'
__license__ = 'MIT'
|
TaurusOlson/incisive
|
incisive/__init__.py
|
Python
|
mit
| 238
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTraceback2(PythonPackage):
"""Backports of the traceback module"""
homepage = "https://github.com/testing-cabal/traceback2"
url = "https://pypi.io/packages/source/t/traceback2/traceback2-1.4.0.tar.gz"
version('1.4.0', sha256='05acc67a09980c2ecfedd3423f7ae0104839eccb55fc645773e1caa0951c3030')
depends_on('py-setuptools', type='build')
depends_on('py-pbr', type='build')
# test-requirements.txt
depends_on('py-contextlib2', type='test')
depends_on('py-fixtures', type='test')
depends_on('py-testtools', type='test')
depends_on('py-unittest2', type='test')
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-traceback2/package.py
|
Python
|
lgpl-2.1
| 836
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
from nototools import summary
from nototools.py23 import unichr
class HbInputGenerator(object):
"""Provides functions to generate harbuzz input.
The input is returned as a list of strings, suitable for passing into
subprocess.call or something similar.
"""
def __init__(self, font):
self.font = font
self.memo = {}
self.reverse_cmap = build_reverse_cmap(self.font)
self.widths = {}
glyph_set = font.getGlyphSet()
for name in glyph_set.keys():
glyph = glyph_set[name]
if glyph.width:
width = glyph.width
elif hasattr(glyph._glyph, "xMax"):
width = abs(glyph._glyph.xMax - glyph._glyph.xMin)
else:
width = 0
self.widths[name] = width
# some stripped fonts don't have space
try:
space_name = font["cmap"].tables[0].cmap[0x20]
self.space_width = self.widths[space_name]
except:
self.space_width = -1
def all_inputs(self, warn=False):
"""Generate harfbuzz inputs for all glyphs in a given font."""
inputs = []
glyph_set = self.font.getGlyphSet()
for name in self.font.getGlyphOrder():
is_zero_width = glyph_set[name].width == 0
cur_input = self.input_from_name(name, pad=is_zero_width)
if cur_input is not None:
inputs.append(cur_input)
elif warn:
print("not tested (unreachable?): %s" % name)
return inputs
def input_from_name(self, name, seen=None, pad=False):
"""Given glyph name, return input to harbuzz to render this glyph.
Returns input in the form of a (features, text) tuple, where `features`
is a list of feature tags to activate and `text` is an input string.
Argument `seen` is used by the method to avoid following cycles when
recursively looking for possible input. `pad` can be used to add
whitespace to text output, for non-spacing glyphs.
Can return None in two situations: if no possible input is found (no
simple unicode mapping or substitution rule exists to generate the
glyph), or if the requested glyph already exists in `seen` (in which
case this path of generating input should not be followed further).
"""
if name in self.memo:
return self.memo[name]
inputs = []
# avoid following cyclic paths through features
if seen is None:
seen = set()
if name in seen:
return None
seen.add(name)
# see if this glyph has a simple unicode mapping
if name in self.reverse_cmap:
text = unichr(self.reverse_cmap[name])
inputs.append(((), text))
# check the substitution features
inputs.extend(self._inputs_from_gsub(name, seen))
seen.remove(name)
# since this method sometimes returns None to avoid cycles, the
# recursive calls that it makes might have themselves returned None,
# but we should avoid returning None here if there are other options
inputs = [i for i in inputs if i is not None]
if not inputs:
return None
features, text = min(inputs)
# can't pad if we don't support space
if pad and self.space_width > 0:
width, space = self.widths[name], self.space_width
padding = " " * (width // space + (1 if width % space else 0))
text = padding + text
self.memo[name] = features, text
return self.memo[name]
def _inputs_from_gsub(self, name, seen):
"""Check GSUB for possible input yielding glyph with given name.
The `seen` argument is passed in from the original call to
input_from_name().
"""
inputs = []
if "GSUB" not in self.font:
return inputs
gsub = self.font["GSUB"].table
if gsub.LookupList is None:
return inputs
for lookup_index, lookup in enumerate(gsub.LookupList.Lookup):
for st in lookup.SubTable:
# see if this glyph can be a single-glyph substitution
if lookup.LookupType == 1:
for glyph, subst in st.mapping.items():
if subst == name:
inputs.append(
self._input_with_context(
gsub, [glyph], lookup_index, seen
)
)
# see if this glyph is a ligature
elif lookup.LookupType == 4:
for prefix, ligatures in st.ligatures.items():
for ligature in ligatures:
if ligature.LigGlyph == name:
glyphs = [prefix] + list(ligature.Component)
inputs.append(
self._input_with_context(
gsub, glyphs, lookup_index, seen
)
)
return inputs
def _input_with_context(self, gsub, glyphs, target_i, seen):
"""Given GSUB, input glyphs, and target lookup index, return input to
harfbuzz to render the input glyphs with the target lookup activated.
"""
inputs = []
# try to get a feature tag to activate this lookup
for feature in gsub.FeatureList.FeatureRecord:
if target_i in feature.Feature.LookupListIndex:
inputs.append(
self._sequence_from_glyph_names(glyphs, (feature.FeatureTag,), seen)
)
for cur_i, lookup in enumerate(gsub.LookupList.Lookup):
# try contextual substitutions
if lookup.LookupType == 5:
for st in lookup.SubTable:
# TODO handle format 3
if st.Format == 1:
inputs.extend(
self._input_from_5_1(
gsub, st, glyphs, target_i, cur_i, seen
)
)
if st.Format == 2:
inputs.extend(
self._input_from_5_2(
gsub, st, glyphs, target_i, cur_i, seen
)
)
# try chaining substitutions
if lookup.LookupType == 6:
for st in lookup.SubTable:
# TODO handle format 2
if st.Format == 1:
inputs.extend(
self._input_from_6_1(
gsub, st, glyphs, target_i, cur_i, seen
)
)
if st.Format == 3:
inputs.extend(
self._input_from_6_3(
gsub, st, glyphs, target_i, cur_i, seen
)
)
inputs = [i for i in inputs if i is not None]
return min(inputs) if inputs else None
def _input_from_5_1(self, gsub, st, glyphs, target_i, cur_i, seen):
"""Return inputs from GSUB type 5.1 (simple context) rules."""
inputs = []
for ruleset in st.SubRuleSet:
for rule in ruleset.SubRule:
if not any(
subst_lookup.LookupListIndex == target_i
for subst_lookup in rule.SubstLookupRecord
):
continue
for prefix in st.Coverage.glyphs:
input_glyphs = [prefix] + rule.Input
if not self._is_sublist(input_glyphs, glyphs):
continue
inputs.append(
self._input_with_context(gsub, input_glyphs, cur_i, seen)
)
return inputs
def _input_from_5_2(self, gsub, st, glyphs, target_i, cur_i, seen):
"""Return inputs from GSUB type 5.2 (class-based context) rules."""
inputs = []
prefixes = st.Coverage.glyphs
class_defs = st.ClassDef.classDefs.items()
for ruleset in st.SubClassSet:
if ruleset is None:
continue
for rule in ruleset.SubClassRule:
classes = [[n for n, c in class_defs if c == cls] for cls in rule.Class]
input_lists = [prefixes] + classes
input_glyphs = self._min_permutation(input_lists, glyphs)
if not (
any(
subst_lookup.LookupListIndex == target_i
for subst_lookup in rule.SubstLookupRecord
)
and self._is_sublist(input_glyphs, glyphs)
):
continue
inputs.append(self._input_with_context(gsub, input_glyphs, cur_i, seen))
return inputs
def _input_from_6_1(self, gsub, st, glyphs, target_i, cur_i, seen):
"""Return inputs from GSUB type 6.1 (simple chaining) rules."""
inputs = []
for ruleset in st.ChainSubRuleSet:
for rule in ruleset.ChainSubRule:
if not any(
subst_lookup.LookupListIndex == target_i
for subst_lookup in rule.SubstLookupRecord
):
continue
for prefix in st.Coverage.glyphs:
input_glyphs = [prefix] + rule.Input
if not self._is_sublist(input_glyphs, glyphs):
continue
if rule.LookAhead:
input_glyphs = input_glyphs + rule.LookAhead
if rule.Backtrack:
bt = list(reversed(rule.Backtrack))
input_glyphs = bt + input_glyphs
inputs.append(
self._input_with_context(gsub, input_glyphs, cur_i, seen)
)
return inputs
def _input_from_6_3(self, gsub, st, glyphs, target_i, cur_i, seen):
"""Return inputs from GSUB type 6.3 (coverage-based chaining) rules."""
input_lists = [c.glyphs for c in st.InputCoverage]
input_glyphs = self._min_permutation(input_lists, glyphs)
if not (
any(
subst_lookup.LookupListIndex == target_i
for subst_lookup in st.SubstLookupRecord
)
and self._is_sublist(input_glyphs, glyphs)
):
return []
if st.LookAheadCoverage:
la = [min(c.glyphs) for c in st.LookAheadCoverage]
input_glyphs = input_glyphs + la
if st.BacktrackCoverage:
bt = list(reversed([min(c.glyphs) for c in st.BacktrackCoverage]))
input_glyphs = bt + input_glyphs
return [self._input_with_context(gsub, input_glyphs, cur_i, seen)]
def _sequence_from_glyph_names(self, glyphs, features, seen):
"""Return a sequence of glyphs from glyph names."""
text = []
for glyph in glyphs:
cur_input = self.input_from_name(glyph, seen)
if cur_input is None:
return None
cur_features, cur_text = cur_input
features += cur_features
text.append(cur_text)
return features, "".join(text)
def _min_permutation(self, lists, target):
"""Deterministically select a permutation, containing target list as a
sublist, of items picked one from each input list.
"""
if not all(lists):
return []
i = 0
j = 0
res = [None for _ in range(len(lists))]
for cur_list in lists:
if j < len(target) and target[j] in cur_list:
res[i] = target[j]
j += 1
else:
res[i] = min(cur_list)
i += 1
if j < len(target):
return []
return res
def _is_sublist(self, lst, sub):
"""Return whether sub is a sub-list of lst."""
return any(lst[i : i + len(sub)] == sub for i in range(1 + len(lst) - len(sub)))
def build_reverse_cmap(font):
"""Build a dictionary mapping glyph names to unicode values.
Maps each name to its smallest unicode value.
"""
cmap_items = summary.get_largest_cmap(font).items()
return {n: v for v, n in reversed(sorted(cmap_items))}
|
googlefonts/nototools
|
nototools/hb_input.py
|
Python
|
apache-2.0
| 13,368
|
# Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from itertools import chain
class BayesianEncoder(object):
def __init__(self, config):
self.inputs = [ev.placeholder(config) for ev in config.evidence]
exists = [ev.exists(i) for ev, i in zip(config.evidence, self.inputs)]
zeros = tf.zeros([config.batch_size, config.latent_size], dtype=tf.float32)
# Compute the denominator used for mean and covariance
for ev in config.evidence:
ev.init_sigma(config)
d = [tf.where(exist, tf.tile([1. / tf.square(ev.sigma)], [config.batch_size]),
tf.zeros(config.batch_size)) for ev, exist in zip(config.evidence, exists)]
d = 1. + tf.reduce_sum(tf.stack(d), axis=0)
denom = tf.tile(tf.reshape(d, [-1, 1]), [1, config.latent_size])
# Compute the mean of Psi
with tf.variable_scope('mean'):
# 1. compute encoding
self.encodings = [ev.encode(i, config) for ev, i in zip(config.evidence, self.inputs)]
encodings = [encoding / tf.square(ev.sigma) for ev, encoding in
zip(config.evidence, self.encodings)]
# 2. pick only encodings from valid inputs that exist, otherwise pick zero encoding
encodings = [tf.where(exist, enc, zeros) for exist, enc in zip(exists, encodings)]
# 3. tile the encodings according to each evidence type
encodings = [[enc] * ev.tile for ev, enc in zip(config.evidence, encodings)]
encodings = tf.stack(list(chain.from_iterable(encodings)))
# 4. compute the mean of non-zero encodings
self.psi_mean = tf.reduce_sum(encodings, axis=0) / denom
# Compute the covariance of Psi
with tf.variable_scope('covariance'):
I = tf.ones([config.batch_size, config.latent_size], dtype=tf.float32)
self.psi_covariance = I / denom
class BayesianDecoder(object):
def __init__(self, config, initial_state, infer=False):
cells1, cells2 = [], []
for _ in range(config.decoder.num_layers):
cells1.append(tf.nn.rnn_cell.GRUCell(config.decoder.units))
cells2.append(tf.nn.rnn_cell.GRUCell(config.decoder.units))
self.cell1 = tf.nn.rnn_cell.MultiRNNCell(cells1)
self.cell2 = tf.nn.rnn_cell.MultiRNNCell(cells2)
# placeholders
self.initial_state = [initial_state] * config.decoder.num_layers
self.nodes = [tf.placeholder(tf.int32, [config.batch_size], name='node{0}'.format(i))
for i in range(config.decoder.max_ast_depth)]
self.edges = [tf.placeholder(tf.bool, [config.batch_size], name='edge{0}'.format(i))
for i in range(config.decoder.max_ast_depth)]
# projection matrices for output
self.projection_w = tf.get_variable('projection_w', [self.cell1.output_size,
config.decoder.vocab_size])
self.projection_b = tf.get_variable('projection_b', [config.decoder.vocab_size])
# setup embedding
with tf.variable_scope('decoder'):
emb = tf.get_variable('emb', [config.decoder.vocab_size, config.decoder.units])
def loop_fn(prev, _):
prev = tf.nn.xw_plus_b(prev, self.projection_w, self.projection_b)
prev_symbol = tf.argmax(prev, 1)
return tf.nn.embedding_lookup(emb, prev_symbol)
loop_function = loop_fn if infer else None
emb_inp = (tf.nn.embedding_lookup(emb, i) for i in self.nodes)
# the decoder (modified from tensorflow's seq2seq library to fit tree RNNs)
# TODO: update with dynamic decoder (being implemented in tf) once it is released
with tf.variable_scope('rnn'):
self.state = self.initial_state
self.outputs = []
prev = None
for i, inp in enumerate(emb_inp):
if loop_function is not None and prev is not None:
with tf.variable_scope('loop_function', reuse=True):
inp = loop_function(prev, i)
if i > 0:
tf.get_variable_scope().reuse_variables()
with tf.variable_scope('cell1'): # handles CHILD_EDGE
output1, state1 = self.cell1(inp, self.state)
with tf.variable_scope('cell2'): # handles SIBLING_EDGE
output2, state2 = self.cell2(inp, self.state)
output = tf.where(self.edges[i], output1, output2)
self.state = [tf.where(self.edges[i], state1[j], state2[j])
for j in range(config.decoder.num_layers)]
self.outputs.append(output)
if loop_function is not None:
prev = output
|
capergroup/bayou
|
src/main/python/bayou/models/low_level_evidences/architecture.py
|
Python
|
apache-2.0
| 5,511
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Midokura Europe SARL, All Rights Reserved.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from midonetclient import url_provider
from midonetclient import util
from midonetclient import vendor_media_type as mt
LOG = logging.getLogger(__name__)
class ChainRuleUrlProviderMixin(url_provider.UrlProviderMixin):
"""ChainRule URL provider mixin
This mixin provides URLs for chain rules.
"""
def chain_url(self, chain_id):
return self.template_url("chain_template", chain_id)
def chains_url(self):
return self.resource_url("chains")
def rule_url(self, rule_id):
return self.template_url("rule_template", rule_id)
def rules_url(self, chain_id):
return self.chain_url(chain_id) + "/rules"
class ChainRuleClientMixin(ChainRuleUrlProviderMixin):
"""ChainRule mixin
Mixin that defines all the Neutron chain rule operations in MidoNet API.
"""
@util.convert_case
def create_chain(self, chain):
LOG.info("create_chain %r", chain)
return self.client.post(self.chains_url(),
mt.APPLICATION_CHAIN_JSON, body=chain)
def delete_chain(self, chain_id):
LOG.info("delete_chain %r", chain_id)
self.client.delete(self.chain_url(chain_id))
@util.convert_case
def get_chain(self, chain_id, fields=None):
LOG.info("get_chain %r", chain_id)
return self.client.get(self.chain_url(chain_id),
mt.APPLICATION_CHAIN_JSON)
@util.convert_case
def get_chains(self, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
LOG.info("get_chains")
return self.client.get(self.chains_url(),
mt.APPLICATION_CHAIN_COLLECTION_JSON)
@util.convert_case
def update_chain(self, chain):
LOG.info("update_chain %r", chain)
return self.client.put(self.chain_url(chain["id"]),
mt.APPLICATION_CHAIN_JSON, chain)
@util.convert_case
def create_chain_rule(self, rule):
LOG.info("create_chain_rule %r", rule)
# convert_case converted to camel
return self.client.post(self.rules_url(rule["chainId"]),
mt.APPLICATION_RULE_JSON, body=rule)
def delete_chain_rule(self, rule_id):
LOG.info("delete_chain_rule %r", rule_id)
self.client.delete(self.rule_url(rule_id))
@util.convert_case
def get_chain_rule(self, rule_id):
LOG.info("get_chain_rule %r", rule_id)
return self.client.get(self.rule_url(rule_id),
mt.APPLICATION_RULE_JSON)
@util.convert_case
def get_chain_rules(self, chain_id):
LOG.info("get_chain_rules %r", chain_id)
return self.client.get(self.rules_url(chain_id),
mt.APPLICATION_RULE_COLLECTION_JSON)
|
celebdor/python-midonetclient
|
src/midonetclient/neutron/chain_rule.py
|
Python
|
apache-2.0
| 3,528
|
from growler_guys import scrape_growler_guys
|
ryanpitts/growlerbot
|
scrapers/__init__.py
|
Python
|
mit
| 45
|
import matplotlib.pyplot as plt
import numpy as np
import pdb
if __name__ == "__main__":
fig, ax = plt.subplots(figsize=(10,5))
for clients in (10, 50, 100, 200):
median_data = np.zeros(5)
for k in (1, 2, 3, 4, 5):
data = np.loadtxt("loss_" + str(clients) + "_" + str(k) + ".csv", delimiter=',')
median_data[k-1] = data.shape[0]
print str(clients) + " median is " + str(np.median(median_data))
print str(clients) + " stddev is " + str(np.std(median_data))
data1 = np.loadtxt("loss_10_2.csv", delimiter=',')
data2 = np.loadtxt("loss_50_2.csv", delimiter=',')
data3 = np.loadtxt("loss_100_2.csv", delimiter=',')
data4 = np.loadtxt("loss_200_2.csv", delimiter=',')
plt.plot(data1, color="black", label="10 clients", lw=5)
plt.plot(data2, color="red", label="50 clients", lw=5)
plt.plot(data3, color="orange", label="100 clients", lw=5)
plt.plot(data4, color="green", label="200 clients", lw=5)
plt.legend(loc='best', ncol=1, fontsize=18)
plt.xlabel("Time (s)", fontsize=22)
plt.ylabel("Training Error", fontsize=22)
axes = plt.gca()
axes.set_ylim([0, 0.5])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.setp(ax.get_xticklabels(), fontsize=18)
plt.setp(ax.get_yticklabels(), fontsize=18)
plt.tight_layout()
plt.show()
|
DistributedML/TorML
|
eurosys-eval/results_tor_no_tor/makeplot.py
|
Python
|
mit
| 1,404
|
from django.db import models
class Report(models.Model):
id = models.IntegerField(primary_key = True)
name = models.CharField(max_length = 200)
url = models.CharField(max_length = 10)
def __unicode__(self):
return self.url
|
fedorahungary/fedinv
|
fedinv/swag_reports/models.py
|
Python
|
gpl-2.0
| 233
|
###
# Copyright (c) 2013, Frumious Bandersnatch
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Goo', True)
Goo = conf.registerPlugin('Goo')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Goo, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
kg-bot/SupyBot
|
plugins/Goo/config.py
|
Python
|
gpl-3.0
| 2,348
|
#!/usr/bin/env python
""":mod:`Redirection <testbed.resources._redirect>` tests."""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import json as _json
import unittest as _unittest
from urllib import quote as _percent_encode
import napper as _napper
import spruce.logging as _logging
import testbed.testing as _testbedtest
class TestRedirections(_testbedtest.TestTestbed):
@property
def webservice_path(self):
return '/redirect'
@property
def webservice_probe_path(self):
return self._redirect_path
def _create_requests_session(self):
return _napper.WebRequestSession(follow_redirects=False)
class TestResponseRedirection(TestRedirections):
def test_get_response_redirect(self):
response = self.request('get', self._redirect_path)
self.assert_response_redirect_response(response,
loc=self._redirect_loc)
def test_get_response_redirect_as_html(self):
response = self.request('get',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; q=0.01'))
self.assert_response_redirect_response(response,
loc=self._redirect_loc,
contenttype='text/html')
def test_post_response_redirect(self):
response = self.request('post', self._redirect_path)
self.assert_response_redirect_response(response,
loc=self._redirect_loc)
def test_post_response_redirect_as_html(self):
response = self.request('post',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; q=0.01'))
self.assert_response_redirect_response(response,
loc=self._redirect_loc,
contenttype='text/html')
def test_postget_response_redirect(self):
response = self.request('postget', self._redirect_path)
self.assert_response_redirect_response(response,
loc=self._redirect_loc)
def test_postget_response_redirect_as_html(self):
response = self.request('postget',
self._redirect_path,
accept_mediaranges=('text/html',
'*/*; q=0.01'))
self.assert_response_redirect_response(response,
loc=self._redirect_loc,
contenttype='text/html')
@property
def _redirect_loc(self):
return 'aoeu'
@property
def _redirect_path(self):
return 'response;loc={}'\
.format(_percent_encode(_json.dumps(self._redirect_loc),
safe=''))
class _TestRedirectionsCorsWithUntrustedOriginMixin(object):
def assert_response_redirect_response(self, response, **kwargs):
kwargs_ = {}
try:
kwargs_['contenttype'] = kwargs['contenttype']
except KeyError:
pass
self.assert_cors_rejected_response(response,
exc_name='CorsOriginForbidden',
**kwargs_)
class TestRedirectionsCorsActualWithTrustedOrigin\
(_testbedtest.TestCorsWithTrustedOrigin, _testbedtest.TestCorsActual,
TestRedirections):
pass
class TestRedirectionsCorsActualWithUntrustedOrigin\
(_TestRedirectionsCorsWithUntrustedOriginMixin,
_testbedtest.TestCorsWithUntrustedOrigin,
_testbedtest.TestCorsActual, TestRedirections):
pass
class TestRedirectionsCorsPreflightWithTrustedOrigin\
(_testbedtest.TestCorsWithTrustedOrigin,
_testbedtest.TestCorsPreflight, TestRedirections):
def assert_response_redirect_response(self, response, **kwargs):
kwargs_ = {}
try:
kwargs_['contenttype'] = kwargs['contenttype']
except KeyError:
pass
self.assert_cors_preflight_accepted_response(response, **kwargs_)
class TestRedirectionsCorsPreflightWithUntrustedOrigin\
(_TestRedirectionsCorsWithUntrustedOriginMixin,
_testbedtest.TestCorsWithUntrustedOrigin,
_testbedtest.TestCorsPreflight, TestRedirections):
pass
if __name__ == '__main__':
_logging.basicConfig()
_unittest.main()
|
nisavid/testbed
|
testbed/tests/redirect.py
|
Python
|
lgpl-3.0
| 4,763
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise "can't find toplevel directory!"
sys.path.append(os.path.join(path[0]))
from scripts import *
print "tests with regular server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0")
print "tests with AMD server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="serveramd")
print "tests with TIE server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="servertie")
print "tests with AMD TIE server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="serveramdtie")
print "tests with collocated server."
TestUtil.collocatedTest()
|
joshmoore/zeroc-ice
|
cs/test/Ice/operations/run.py
|
Python
|
gpl-2.0
| 1,339
|
from direct.directnotify import DirectNotifyGlobal
from toontown.classicchars.DistributedDaisyAI import DistributedDaisyAI
class DistributedSockHopDaisyAI(DistributedDaisyAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedSockHopDaisyAI")
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/classicchars/DistributedSockHopDaisyAI.py
|
Python
|
apache-2.0
| 263
|
"""
"""
from neoteric.util.compat import unittest
from neoteric.resource.tracker import ResourceTracker, DuplicateResourceError
class ResourceTrackerTests(unittest.TestCase):
def test_all(self):
r = ResourceTracker()
r['test'] = 'abc123'
self.assertEqual(r['test'], 'abc123')
with self.assertRaises(DuplicateResourceError):
r['test'] = 'def456'
r.pop('test')
with self.assertRaises(KeyError):
r['test']
|
j3ffhubb/neoteric
|
tests/neoteric/resource/test_tracker.py
|
Python
|
gpl-3.0
| 482
|
from __future__ import annotations
import copy
from typing import Dict, List
from bson import ObjectId
from mongoengine import DoesNotExist
from monkey_island.cc.models.edge import Edge
RIGHT_ARROW = "\u2192"
class EdgeService(Edge):
@staticmethod
def get_all_edges() -> List[EdgeService]:
return EdgeService.objects()
@staticmethod
def get_or_create_edge(src_node_id, dst_node_id, src_label, dst_label) -> EdgeService:
edge = None
try:
edge = EdgeService.objects.get(src_node_id=src_node_id, dst_node_id=dst_node_id)
except DoesNotExist:
edge = EdgeService(src_node_id=src_node_id, dst_node_id=dst_node_id)
finally:
if edge:
edge.update_label(node_id=src_node_id, label=src_label)
edge.update_label(node_id=dst_node_id, label=dst_label)
return edge
@staticmethod
def get_by_dst_node(dst_node_id: ObjectId) -> List[EdgeService]:
return EdgeService.objects(dst_node_id=dst_node_id)
@staticmethod
def get_edge_by_id(edge_id: ObjectId) -> EdgeService:
return EdgeService.objects.get(id=edge_id)
def update_label(self, node_id: ObjectId, label: str):
if self.src_node_id == node_id:
self.src_label = label
elif self.dst_node_id == node_id:
self.dst_label = label
else:
raise DoesNotExist(
"Node id provided does not match with any endpoint of an self provided."
)
self.save()
@staticmethod
def update_all_dst_nodes(old_dst_node_id: ObjectId, new_dst_node_id: ObjectId):
for edge in EdgeService.objects(dst_node_id=old_dst_node_id):
edge.dst_node_id = new_dst_node_id
edge.save()
@staticmethod
def get_tunnel_edges_by_src(src_node_id) -> List[EdgeService]:
try:
return EdgeService.objects(src_node_id=src_node_id, tunnel=True)
except DoesNotExist:
return []
def disable_tunnel(self):
self.tunnel = False
self.save()
def update_based_on_scan_telemetry(self, telemetry: Dict):
machine_info = copy.deepcopy(telemetry["data"]["machine"])
new_scan = {"timestamp": telemetry["timestamp"], "data": machine_info}
ip_address = machine_info.pop("ip_addr")
domain_name = machine_info.pop("domain_name")
self.scans.append(new_scan)
self.ip_address = ip_address
self.domain_name = domain_name
self.save()
def update_based_on_exploit(self, exploit: Dict):
self.exploits.append(exploit)
self.save()
if exploit["result"]:
self.set_exploited()
def set_exploited(self):
self.exploited = True
self.save()
def get_group(self) -> str:
if self.exploited:
return "exploited"
if self.tunnel:
return "tunnel"
if self.scans or self.exploits:
return "scan"
return "empty"
def get_label(self) -> str:
return f"{self.src_label} {RIGHT_ARROW} {self.dst_label}"
|
guardicore/monkey
|
monkey/monkey_island/cc/services/edge/edge.py
|
Python
|
gpl-3.0
| 3,128
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Author: Aldo Sotolongo
# @Date: 2017-01-13 15:17:37
# @Last Modified by: Aldo Sotolongo
# @Last Modified time: 2017-01-15 21:04:43
# Description: Small program to get some info about shares in ZFS Storage Appliance.
from __future__ import print_function
import json
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# to disable warning
#InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate
#verification is strongly advised. See:
#https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
URL = "https://192.168.100.150:215/api"
ZAUTH = ("useradm", "password")
HEADER = {"Content-Type": "application/json"}
def getpools():
"""Get all Pools"""
pools = []
req = requests.get(URL + "/storage/v1/pools", auth=ZAUTH, verify=False, headers=HEADER)
j = json.loads(req.text)
req.close()
for item in j.values():
for pool in item:
pools.append(pool["name"])
pools = [str(x) for x in pools]
return sorted(pools)
def getprojects(poolname):
"""Get all projects in a pool"""
projects = []
req = requests.get(URL + "/storage/v1/pools/{}/projects".format(poolname), auth=ZAUTH,
verify=False, headers=HEADER)
j = json.loads(req.text)
req.close()
for item in j.values():
for project in item:
projects.append(project["name"])
projects = [str(x) for x in projects]
return sorted(projects)
def printlunsinproject(poolname, project):
"""Print LUNS info for a project"""
req = requests.get(URL + "/storage/v1/pools/{}/projects/{}/luns".format(poolname, project),
auth=ZAUTH, verify=False, headers=HEADER)
j = json.loads(req.text)
req.close()
for item in j.values():
if item:
for lun in sorted(item):
initiatorgroup = [str(i) for i in lun["initiatorgroup"]]
print("{:15} {:8.2f} {:15} {:20} {:10} {:34} {:8} {:25} {:8} {:4}"
.format(lun["name"], (lun["volsize"]/(1024*1024*1024)), lun["targetgroup"],
initiatorgroup, lun["status"], lun["lunguid"], lun["pool"],
lun["project"], lun["logbias"], lun["assignednumber"]))
def printfsinproject(poolname, project):
"""Print Filesystems info for a project"""
req = requests.get(URL + "/storage/v1/pools/{}/projects/{}/filesystems"
.format(poolname, project), auth=ZAUTH, verify=False, headers=HEADER)
j = json.loads(req.text)
req.close()
for item in j.values():
if item:
for fs in sorted(item):
print("{:20} {:8.2f} {:40} {:8} {:22} {:8.2f} {:8.2f} {:10} {:10} {:5} {:40} {:50}"
.format(fs["name"], (fs["space_total"]/(1024*1024*1024)), fs["mountpoint"],
fs["pool"], fs["project"], (fs["quota"]/(1024*1024*1024)),
(fs["reservation"]/(1024*1024*1024)), fs["root_user"],
fs["root_group"], fs["root_permissions"], fs["sharesmb"],
fs["sharenfs"]))
def main():
"""Main function for script"""
pools = getpools()
projects4pool = {}
# Get LUN Info
print("="*230)
print("{:15} {:8} {:15} {:20} {:10} {:34} {:8} {:25} {:8} {:4}"
.format("LUN", "Size(GB)", "TargetGroup", "InitiatorGroup", "status", "lunguid", "Pool",
"Project", "logbias", "assignednumber"))
print("="*230)
for pool in pools:
projects4pool[pool] = getprojects(pool)
for keypool, values in projects4pool.iteritems():
for projvalue in values:
printlunsinproject(keypool, projvalue)
# Get Filesystems Info
print("="*230)
print("{:20} {:8} {:40} {:8} {:22} {:8} {:8} {:10} {:10} {:5} {:40} {:50}"
.format("FSNAME", "space(GB)", "mountpoint", "pool", "project", "quota",
"reserv", "root_user", "root_group", "perm", "sharesmb", "sharenfs"))
print("="*230)
for pool in pools:
for keypool, values in projects4pool.iteritems():
for projvalue in values:
printfsinproject(keypool, projvalue)
if __name__ == "__main__":
main()
|
aldenso/tools
|
ZFS/zfssainfo.py
|
Python
|
gpl-2.0
| 4,422
|
from flask import Blueprint
from flask import session
second = Blueprint('schedule', __name__)
@second.route('/')
def home():
names = session['names']
return str(names)
|
tacksoo/excel2pdf
|
schedule.py
|
Python
|
mit
| 178
|
#!/usr/bin/env python
# Copyright (C) 2005, 2018 by INRIA
import numpy as np
# import siconos.numerics * fails with py.test!
import siconos.numerics as SN
def mcp_function(n, z, F):
M = np.array([[2., 1.],
[1., 2.]])
q = np.array([-5., -6.])
F[:] = np.dot(M,z) + q
pass
def mcp_Nablafunction (n, z, nabla_F):
M = np.array([[2., 1.],
[1., 2.]])
nabla_F[:] = M
pass
# solution
zsol = np.array([4./3., 7./3.])
wsol = np.array([0. , 0.])
# problem
#mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
ztol = 1e-8
def test_new():
mcp=SN.MixedComplementarityProblem2(1, 1, mcp_function, mcp_Nablafunction)
def test_mcp_newton_FBLSA():
mcp = SN.MixedComplementarityProblem2(0, 2, mcp_function, mcp_Nablafunction)
z = np.array([0., 0.])
w = np.array([0., 0.])
SO = SN.SolverOptions(mcp, SN.SICONOS_MCP_NEWTON_FBLSA)
info = SN.mcp_newton_FBLSA(mcp, z, w, SO)
#print("z = ", z)
#print("w = ", w)
assert (np.linalg.norm(z-zsol) <= ztol)
assert not info
def test_mcp_newton_minFBLSA():
mcp = SN.MixedComplementarityProblem2(0, 2, mcp_function, mcp_Nablafunction)
z = np.array([0., 0.])
w = np.array([0., 0.])
SO = SN.SolverOptions(mcp, SN.SICONOS_MCP_NEWTON_MINFBLSA)
info = SN.mcp_newton_minFBLSA(mcp, z, w, SO)
#print("z = ", z)
#print("w = ", w)
assert (np.linalg.norm(z-zsol) <= ztol)
assert not info
|
fperignon/siconos
|
numerics/swig/tests/test_mcp2.py
|
Python
|
apache-2.0
| 1,437
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = os.environ['DEBUG']
ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS']
ROOT_URLCONF = 'mywebsite.urls'
WSGI_APPLICATION = 'mywebsite.wsgi.application'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'portfolio',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
SITE_ID = 1
|
tpeek/Personal-Website
|
mywebsite/mywebsite/settings.py
|
Python
|
mit
| 1,864
|
# Django settings for demo project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@n$c$o_7^imv1u$m^@5*b8z!d!(o@ar98nvdm-*vsuz_=q2@*5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'demo.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"templates/",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'ajax',
)
|
qingfeng/django-ajax-tag
|
demo/settings.py
|
Python
|
bsd-2-clause
| 2,844
|
import test
s = """<entry>
<id>tag:search.twitter.com,2005,1142881099</id>
<published>2009-01-23T20:04:53Z</published>
</entry> """
print s.find(">")
print len(s.split(':'))
print len(s.split('>')[2].split(':'))
def h(x=1, y=3, z=4):
return[x, y, z]
print h(1,2)
print h(1,z=5)
# test.testEqual(h(1, 2), [1, 2, 4])
# test.testEqual(h(1, z = 5), [1, 3, 5])
def enum(L):
res = []
n = 1
for item in L:
res.append((n, item))
n = n + 1
return res
#
# test.testEqual(enum(["a", "b", "c"]), [(1, "a"), (2, "b"), (3, "c")])
print enum(["a", "b", "c"])
students = [("Jamal", 98, "A+"),
("Eloise", 87, "B+"),
("Madeline", 99, "A+")]
outfile = open("grades.csv","w")
# output the header row
outfile.write("Name, score, grade\n")
# output each of the rows:
for student in students:
outfile.write("%s, %d, %s\n" % student)
outfile.close()
s = """ Michelle
loves
to
play
ball"""
print s
s = """<entry>
<id>tag:search.twitter.com,2005,1142881099</id>
<published>2009-01-23T20:04:53Z</published>
</entry> """
print (s.split(':'))
print len(s.split(':'))
print len(s.split('>')[2].split(':'))
# def f(list):
# newlist= []
# for x in list:
# if "z" in x:
# newlist=newlist.append(x.split[0])
# return newlist
# words= ['Amazing', 'corny', 'zany']
# print f(words)
#
#
# def f(list):
# newlist= []
# for x in list:
# s=x.split()
# first=s[0]
# newlist=newlist.append(first)
# return newlist
#
# words= ['Amazing', 'corny', 'zany']
# print f(words)
def interp(x, y):
mystr = 'That is %d in a row, %s. Congratulations!' %(x,y)
return mystr
print interp(5,'sir')
test.testEqual(interp(5, "sir"), "That is 5 in a row, sir. Congratulations!")
test.testEqual(interp(6, "your highness"), "That is 6 in a row, your highness. Congratulations!")
def h(x=1,y=3 ,z=4):
return[x, y, z]
print h()
def enum(L):
res = []
n = 1
for item in L:
res.append((n, item))
n = n + 1
return res
print enum(["a", "b", "c"])
def f(list):
newlist= []
for x in list:
if 'z' in x:
newlist.append(x[0])
return newlist
words= ['Amazing', 'corny', 'zany']
print f(words)
# def function(sentence):
# #print input
# answer = []
# for word in sentence.split():
# if word not in answer:
# answer.append(word)
def f(list):
newlist= []
for x in list:
if 'z' in x:
newlist.append(x[0])
# return newlist
return [x[0] for x in list if 'z' in x]
words=['amazing', 'corny', 'zany']
print f(words)
print map ((lambda value: 5*value), [1,2,3])
def g(list):
return sorted(list, key= lambda x: x[-1])
# PROBLEM SET 8
fall_list = ["leaves","apples","autumn","bicycles","pumpkin","squash","excellent"]
# Write code to sort the list fall_list in reverse alphabetical order.
# Assign the sorted list to the variable sorted_fall_list.
sorted_fall_list= sorted(fall_list, reverse=True)
print sorted_fall_list
# Now write code to sort the list fall_list by length of the word, longest to shortest.
# Assign this sorted list to the variable length_fall_list.
length_fall_list= sorted(fall_list, key=lambda x: len(x), reverse=True)
print length_fall_list
food_amounts = [{"sugar_grams":245,"carbohydrate":83,"fiber":67},{"carbohydrate":74,"sugar_grams":52,"fiber":26},{"fiber":47,"carbohydrate":93,"sugar_grams":6}]
# Write code to sort the list food_amounts by the key "sugar_grams", from lowest to highest.
# Assign the sorted list to the variable sorted_sugar.
sorted_sugar= sorted(food_amounts, key = lambda x: x["sugar_grams"])
print sorted_sugar
# Now write code to sort the list food_amounts by
# the value of the key "carbohydrate" minus the value of the key "fiber" in each one, from lowest to highest.
# Assign this sorted list to the variable raw_carb_sort.
raw_carb_sort = sorted(food_amounts, key= lambda x: x["carbohydrate"]- x["fiber"])
print raw_carb_sort
# Convert this string concatenation to one using string interpolation.
# Assign the result to the variable t.
x = 12
fname = "Joe"
our_email = "scammer@dontfallforthis.com"
s = "Hello, " + fname + ", you may have won $" + str(x) + " million dollars. Please send your bank account number to " + our_email + " and we will deposit your winnings."
t = ""
print "hello, %s, you may have won $%d million dollars. Please send your bank account number to %s and we will deposit your winnings." %(fname, x, our_email)
# Write code, using string interpolation and the variables nm, min_mt, and mile_amt, to produce the string
# "Albert walked 0.67 miles today in 50 minutes." Assign it to albert_str.
nm = "Albert"
min_amt = 50
mile_amt = 0.673892
albert_str= "%s walked %0.2f miles today in %d minutes." %(nm, mile_amt, min_amt)
print albert_str
# Define a function called walk_reporter, which takes as input:
# - a string that represents someone's name,
# - a float that represents the number of miles they walked,
# - and an integer that represents the number of minutes they spent walking.
#
# The function should RETURN a string in the format:
# "[NAME STR] walked [MILE FLOAT with TWO digits after the decimal] miles in
# [MINUTES INT] minutes."
# You MUST use string interpolation in the function.
# You should NOT use raw_input to get the inputs; they are passed in as parameters.
def walk_reporter(name, miles, minutes):
return "%s walked %0.2f miles in %d minutes." %(name, miles, minutes)
print walk_reporter("michelle", 2.345, 10)
test.testEqual(walk_reporter("Jamie",5.233679,202), "Jamie walked 5.23 miles in 202 minutes.", "walk_reporter test 1")
def f(list):
newlist= []
for x in list:
if 'z' in x:
newlist.append(x[0])
return newlist
words= ['Amazing', 'corny', 'zany']
print f(words)
def f(list):
return [x[0] for x in list if 'z' in x]
test.testEqual(f(['Amazing', 'corny', 'zany']), ['A', 'z'])
#While studying i went through the textbook and redid some of the manuel accumulation problems. See if you can go back on my work, i think it will help you study!
x=raw_input("enter word")
things4=map(lambda x: x + "-" + x + "-", x)
print "".join(things4)
|
mgooel/106-Extra-Credit
|
practice.py
|
Python
|
mit
| 6,103
|
from pygame.locals import *
import pygame
from util import save_yaml
from dialog import FloatDialog
from util import gamedir
import json
import os
from tempsprites import Tempsprites
from button import render_text
class Journal(list):
"""
>>> from journal import Journal
>>> j = Journal()
>>> j.autosave = False
>>> j.write('Hello world')
>>> j.write('Love me')
>>> j.read()
['', 'Hello world', '', 'Love me']
>>> j.bufferlen = 2
>>> j.read()
['', 'Hello world']
>>> j.prev()
>>> j.read()
['', 'Hello world']
>>> j.next()
>>> j.read()
['', 'Love me']
"""
bufferlen = 30
bookmark = 0
autosave = True
def write(self, s):
self.append('')
self += s.split('\n')
if self.autosave:
self.savetoslot()
def read(self):
if len(self) >= self.bufferlen:
return self.__getitem__(slice(self.bookmark, self.bookmark+self.bufferlen))
else:
return self
def next(self):
self.bookmark += self.bufferlen
if self.bookmark >= len(self):
self.bookmark = len(self) - self.bufferlen
def scrollup(self):
self.prev()
def scrolldown(self):
self.next()
def prev(self):
self.bookmark -= self.bufferlen
if self.bookmark <= 0:
self.bookmark = 0
def savetoslot(self):
savedir = gamedir[0]
filename = os.path.join(savedir,'journal','journal.yaml')
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
open(filename, 'w').write(json.dumps(self,indent=4))
class JournalView(FloatDialog, Tempsprites):
def __init__(self, rect, frontend, journal, layer=5, title=''):
self._layer = layer
self.journal = journal
self.frontend = frontend
FloatDialog.__init__(self, rect, frontend, layer)
if self.journal is None:
self.journal = self.frontend.game.journal
self.rect = rect
book = self.frontend.imagecache['spellbookForFlare.png'].copy()
book = pygame.transform.smoothscale(book, (self.rect.w, self.rect.h))
self.image.blit(book, (0,0))
self.mb_rect = pygame.Rect(self.rect.x +200, self.rect.y + 10, self.rect.w - 400, self.rect.h - 200)
self.bg = self.image.copy()
self.up = self.frontend.eventstack.register_event("wheelup", self, self.journal.prev)
self.down = self.frontend.eventstack.register_event("wheeldown", self, self.journal.next)
def delete(self):
self.frontend.eventstack.unregister_event(self.up)
self.frontend.eventstack.unregister_event(self.down)
FloatDialog.delete(self)
def update(self):
self.image.blit(self.bg, (0,0))
x = self.mb_rect.x
y = self.mb_rect.y
for line in self.journal.read():
lineimg=render_text(line, 16, (0,0,0))
y += lineimg.get_rect().h
if y >= self.mb_rect.y + self.mb_rect.h:
x = self.mb_rect.w/2 + 300
y = self.mb_rect.y
self.image.blit(lineimg,(x,y))
|
ajventer/mirthless
|
src/lib/journal.py
|
Python
|
mit
| 3,165
|
# if k is even, return True
# else return False
def even(k):
return str(k)[-1] in ('0', '2', '4', '6', '8')
if __name__ == '__main__':
#test1
print('k = 4')
print(even(4))
#test2
print('k = 5')
print(even(5))
|
maxiee/DataStructuresAlgorithmsPythonExercises
|
chapter1/r_1_2.py
|
Python
|
gpl-2.0
| 239
|
# Copyright (c) 2012 Adi Roiban.
# See LICENSE for details.
from __future__ import (
absolute_import,
print_function,
with_statement,
unicode_literals,
)
import os
class ProjectPaths(object):
"""
Container for common path used by the build system.
"""
def __init__(self, os_name, build_folder_name, folders, filesystem):
build_folder_name = build_folder_name.rstrip(b'/')
build_folder_name = build_folder_name.rstrip(b'\\')
self.fs = filesystem
self._os_name = os_name
self.product = os.path.abspath(b'.')
self.build = self.fs.join([self.product, build_folder_name])
self.dist = self.fs.join([self.product, folders['dist']])
self.publish = self.fs.join([self.product, folders['publish']])
self.python_executable = self.getPythonExecutable(os_name=os_name)
self.python_scripts = self.getPythonScripts()
self.brink_package = os.path.dirname(__file__)
def getPythonExecutable(self, os_name=None):
"""
Return the path to the Python executable for an OS.
"""
if os_name is None:
os_name = os.name
if os_name == 'win':
return self.fs.join(['lib', 'python.exe'])
else:
return self.fs.join(['bin', 'python'])
def getPythonScripts(self, os_name=None):
"""
Return the path to the Python scripts folder for an OS.
"""
if os_name is None:
os_name = os.name
if os_name in ['nt', 'win']:
return self.fs.join(['lib', 'Scripts'])
else:
return self.fs.join(['bin'])
|
chevah/brink
|
brink/paths.py
|
Python
|
bsd-3-clause
| 1,656
|
# this progrma will implement the merge sort
# keep care of using python 3.5
def arraycopy(to, start, end, source):
"copy the array"
for index in range(start, end):
to[index] = source[index]
def mergeparts(v, start, middle, end, work):
"merge the two part into one in array v"
ipart1 = start;
ipart2 = middle;
for index in range(start, end):
if ipart1 < middle and (ipart2 >= end or v[ipart1] <= v[ipart2]):
work[index] = v[ipart1]
ipart1 += 1
else:
work[index] = v[ipart2]
ipart2 += 1
def top2bottom(v, start, end, work):
"top to bottom mode recursive"
if(end - start >= 2):
imiddle = (start + end)//2;
# split the array
top2bottom(v, start, imiddle, work)
top2bottom(v, imiddle, end, work)
# merge two part
mergeparts(v, start, imiddle, end, work)
# copy the sort array
arraycopy(v, start, end, work)
def mergesort(v, n, work):
"merge sort"
top2bottom(v, 0, n, work)
print("""
Merge sort
implement using python 3.5
top-bottom mode.
""")
# sort the list
array = [1, 4, 89, 67, 90, 34, 56, 23, 15, 48]
work = list(range(len(array)))
print("souce array: ", array)
# use the merge sort
mergesort(array, len(array), work)
print("after using merge sort: ", array)
input("\n\nPress enter to exit.")
|
smileboywtu/Algorithms-Python3
|
sort/merge-sort/merge-sort.py
|
Python
|
gpl-2.0
| 1,401
|
#!/usr/bin/env python
# encoding: utf-8
"""
generateSequences.py
Created by Brant Faircloth on 2009-03-28.
Copyright (c) 2009 Brant Faircloth. All rights reserved.
"""
import pdb
import motif
import numpy
def generateSequence(motifs, outfile, max_rep = 14):
for m in motifs:
motif_length = m[0]
for seq in m[1:]:
prime5_seq = 'NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN' #len=36
prime3_seq = 'NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN' #len=36
#pdb.set_trace()
rep_number = numpy.random.randint(6,max_rep)
prime5_end = numpy.random.randint(16,36)
prime3_end = numpy.random.randint(16,36)
insert = seq*rep_number
start = len(prime5_seq[:prime5_end])
end = start + len(insert)
seq_id = '>%s_%s_%s_(%s,%s)' % (motif_length, seq, rep_number, start, end)
test_seq = prime5_seq[:prime5_end] + insert + prime3_seq[:prime3_end]
outfile.write('%s\n%s\n' % (seq_id, test_seq))
def main():
motifs = (motif.mononucleotide, motif.dinucleotide, motif.trinucleotide, motif.tetranucleotide, motif.pentanucleotide, motif.hexanucleotide)
outfile = open('testRepeats.fa', 'w')
generateSequence(motifs, outfile)
outfile.close()
if __name__ == '__main__':
main()
|
brantfaircloth/msatcommander
|
msat/helper/generateSequences.py
|
Python
|
gpl-2.0
| 1,336
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the 4n6time SQLite database output module CLI arguments helper."""
import argparse
import unittest
from plaso.cli.helpers import sqlite_4n6time_output
from plaso.lib import errors
from plaso.output import sqlite_4n6time
from tests.cli import test_lib as cli_test_lib
from tests.cli.helpers import test_lib
class SQLite4n6TimeOutputArgumentsHelperTEst(
test_lib.OutputModuleArgumentsHelperTest):
"""Tests the 4n6time SQLite database output module CLI arguments helper."""
_EXPECTED_OUTPUT = u'\n'.join([
(u'usage: cli_helper.py [--append] [--evidence EVIDENCE] '
u'[--fields FIELDS]'),
u' [--additional_fields ADDITIONAL_FIELDS]',
u'',
u'Test argument parser.',
u'',
u'optional arguments:',
u' --additional_fields ADDITIONAL_FIELDS',
(u' Defines extra fields to be included in the '
u'output, in'),
u' addition to the default fields, which are',
u' datetime,host,source,sourcetype,user,type.',
(u' --append Defines whether the intention is to append '
u'to an'),
(u' already existing database or overwrite it. '
u'Defaults to'),
u' overwrite.',
(u' --evidence EVIDENCE Set the evidence field to a specific value, '
u'defaults'),
u' to empty.',
(u' --fields FIELDS Defines which fields should be indexed in '
u'the'), u' database.',
u''])
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog=u'cli_helper.py',
description=u'Test argument parser.', add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
sqlite_4n6time_output.SQLite4n6TimeOutputArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
output_mediator = self._CreateOutputMediator()
output_module = sqlite_4n6time.SQLite4n6TimeOutputModule(output_mediator)
with self.assertRaises(errors.BadConfigOption):
sqlite_4n6time_output.SQLite4n6TimeOutputArgumentsHelper.ParseOptions(
options, output_module)
options.write = u'4n6time.sqlite'
sqlite_4n6time_output.SQLite4n6TimeOutputArgumentsHelper.ParseOptions(
options, output_module)
with self.assertRaises(errors.BadConfigObject):
sqlite_4n6time_output.SQLite4n6TimeOutputArgumentsHelper.ParseOptions(
options, None)
if __name__ == '__main__':
unittest.main()
|
dc3-plaso/plaso
|
tests/cli/helpers/sqlite_4n6time_output.py
|
Python
|
apache-2.0
| 2,905
|
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=redefined-builtin
"""Common math functions used by multiple brax modules."""
from typing import Tuple
from brax import jumpy as jp
Vector3 = jp.ndarray
Quaternion = jp.ndarray
def rotate(vec: Vector3, quat: Quaternion):
"""Rotates a vector vec by a unit quaternion quat.
Args:
vec: (3,) a vector
quat: (4,) a quaternion
Returns:
ndarray(3) containing vec rotated by quat.
"""
s, u = quat[0], quat[1:]
r = 2 * (jp.dot(u, vec) * u) + (s * s - jp.dot(u, u)) * vec
r = r + 2 * s * jp.cross(u, vec)
return r
def inv_rotate(vec: Vector3, quat: Quaternion):
"""Rotates a vector by the inverse of a unit quaternion.
Args:
vec: (3,) a vector
quat: (4,) a quaternion
Returns:
A vector rotated by quat^{-1}
"""
return rotate(vec, quat_inv(quat))
def ang_to_quat(ang: Vector3):
"""Converts angular velocity to a quaternion.
Args:
ang: (3,) angular velocity
Returns:
A rotation quaternion.
"""
return jp.array([0, ang[0], ang[1], ang[2]])
def euler_to_quat(v: Vector3) -> Quaternion:
"""Converts euler rotations in degrees to quaternion."""
# this follows the Tait-Bryan intrinsic rotation formalism: x-y'-z''
c1, c2, c3 = jp.cos(v * jp.pi / 360)
s1, s2, s3 = jp.sin(v * jp.pi / 360)
w = c1 * c2 * c3 - s1 * s2 * s3
x = s1 * c2 * c3 + c1 * s2 * s3
y = c1 * s2 * c3 - s1 * c2 * s3
z = c1 * c2 * s3 + s1 * s2 * c3
return jp.array([w, x, y, z])
def quat_to_axis_angle(q: Quaternion) -> Tuple[Vector3, jp.ndarray]:
"""Returns the axis-angle representation of a quaternion.
Args:
q: (4,) a quaternion
Returns:
The angle of axis-angle of this quaternion, in the range [-pi, pi].
"""
# TODO: replace with more accurate safe function
# avoid the singularity at 0:
epsilon = 0.00001
# safety 1e-6 jitter added because both sqrt and arctan2 have bad gradients
denom = jp.sqrt(epsilon + 1 - q[0] * q[0])
angle = 2. * jp.arctan2(
jp.sqrt(epsilon + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]), q[0])
angle += jp.where(angle > jp.pi, x=-2 * jp.pi, y=0)
angle += jp.where(angle < -jp.pi, x=2 * jp.pi, y=0)
return q[1:] / denom, angle
def signed_angle(axis: Vector3, ref_p: Vector3, ref_c: Vector3) -> jp.ndarray:
"""Calculates the signed angle between two vectors along an axis.
Args:
axis: (3,) common axis around which to calculate change in angle
ref_p: (3,) vector pointing at 0-degrees offset in the parent's frame
ref_c: (3,) vector pointing at 0-degrees offset in the child's frame
Returns:
The signed angle between two parts.
"""
return jp.arctan2(jp.dot(jp.cross(ref_p, ref_c), axis), jp.dot(ref_p, ref_c))
def quat_mul(u: Quaternion, v: Quaternion) -> Quaternion:
"""Multiplies two quaternions.
Args:
u: (4,) quaternion (w,x,y,z)
v: (4,) quaternion (w,x,y,z)
Returns:
A quaternion u * v.
"""
return jp.array([
u[0] * v[0] - u[1] * v[1] - u[2] * v[2] - u[3] * v[3],
u[0] * v[1] + u[1] * v[0] + u[2] * v[3] - u[3] * v[2],
u[0] * v[2] - u[1] * v[3] + u[2] * v[0] + u[3] * v[1],
u[0] * v[3] + u[1] * v[2] - u[2] * v[1] + u[3] * v[0],
])
def quat_rot_axis(axis: Vector3, angle: jp.ndarray) -> Vector3:
"""Provides a quaternion that describes rotating around axis v by angle.
Args:
axis: (3,) axis (x,y,z)
angle: () float angle to rotate by
Returns:
A quaternion that rotates around v by angle
"""
qx = axis[0] * jp.sin(angle / 2)
qy = axis[1] * jp.sin(angle / 2)
qz = axis[2] * jp.sin(angle / 2)
qw = jp.cos(angle / 2)
return jp.array([qw, qx, qy, qz])
def quat_inv(q: Quaternion) -> Quaternion:
"""Calculates the inverse of quaternion q.
Args:
q: (4,) quaternion [w, x, y, z]
Returns:
The inverse of q, where qmult(q, inv_quat(q)) = [1, 0, 0, 0].
"""
return q * jp.array([1, -1, -1, -1])
def normalize(v: Vector3, epsilon=1e-6) -> Vector3:
"""Normalizes a vector."""
return v / (epsilon + jp.safe_norm(v))
|
google/brax
|
brax/math.py
|
Python
|
apache-2.0
| 4,570
|
"""
Infrastructure code for testing Gabble by pretending to be a Jabber server.
"""
import base64
import os
import hashlib
import sys
import random
import re
import traceback
import ns
import constants as cs
import servicetest
from servicetest import (
assertEquals, assertLength, assertContains, wrap_channel,
EventPattern, call_async, unwrap, Event)
import twisted
from twisted.words.xish import domish, xpath
from twisted.words.protocols.jabber.client import IQ
from twisted.words.protocols.jabber import xmlstream
from twisted.internet import reactor, ssl
import dbus
def make_result_iq(stream, iq, add_query_node=True):
result = IQ(stream, "result")
result["id"] = iq["id"]
to = iq.getAttribute('to')
if to is not None:
result["from"] = to
query = iq.firstChildElement()
if query and add_query_node:
result.addElement((query.uri, query.name))
return result
def acknowledge_iq(stream, iq):
stream.send(make_result_iq(stream, iq))
def send_error_reply(stream, iq, error_stanza=None):
result = IQ(stream, "error")
result["id"] = iq["id"]
query = iq.firstChildElement()
to = iq.getAttribute('to')
if to is not None:
result["from"] = to
if query:
result.addElement((query.uri, query.name))
if error_stanza:
result.addChild(error_stanza)
stream.send(result)
def request_muc_handle(q, conn, stream, muc_jid):
servicetest.call_async(q, conn, 'RequestHandles', 2, [muc_jid])
event = q.expect('dbus-return', method='RequestHandles')
return event.value[0][0]
def make_muc_presence(affiliation, role, muc_jid, alias, jid=None, photo=None):
presence = domish.Element((None, 'presence'))
presence['from'] = '%s/%s' % (muc_jid, alias)
x = presence.addElement((ns.MUC_USER, 'x'))
item = x.addElement('item')
item['affiliation'] = affiliation
item['role'] = role
if jid is not None:
item['jid'] = jid
if photo is not None:
presence.addChild(
elem(ns.VCARD_TEMP_UPDATE, 'x')(
elem('photo')(unicode(photo))
))
return presence
def sync_stream(q, stream):
"""Used to ensure that Gabble has processed all stanzas sent to it."""
iq = IQ(stream, "get")
id = iq['id']
iq.addElement(('http://jabber.org/protocol/disco#info', 'query'))
stream.send(iq)
q.expect('stream-iq', query_ns='http://jabber.org/protocol/disco#info',
predicate=(lambda event:
event.stanza['id'] == id and event.iq_type == 'result'))
class GabbleAuthenticator(xmlstream.Authenticator):
def __init__(self, username, password, resource=None):
self.username = username
self.password = password
self.resource = resource
self.bare_jid = None
self.full_jid = None
self._event_func = lambda e: None
xmlstream.Authenticator.__init__(self)
def set_event_func(self, event_func):
self._event_func = event_func
class JabberAuthenticator(GabbleAuthenticator):
"Trivial XML stream authenticator that accepts one username/digest pair."
# Patch in fix from http://twistedmatrix.com/trac/changeset/23418.
# This monkeypatch taken from Gadget source code
from twisted.words.xish.utility import EventDispatcher
def _addObserver(self, onetime, event, observerfn, priority, *args,
**kwargs):
if self._dispatchDepth > 0:
self._updateQueue.append(lambda: self._addObserver(onetime, event,
observerfn, priority, *args, **kwargs))
return self._oldAddObserver(onetime, event, observerfn, priority,
*args, **kwargs)
EventDispatcher._oldAddObserver = EventDispatcher._addObserver
EventDispatcher._addObserver = _addObserver
def __init__(self, username, password, resource=None, emit_events=False):
GabbleAuthenticator.__init__(self, username, password, resource)
self.emit_events = emit_events
def streamStarted(self, root=None):
if root:
self.xmlstream.sid = '%x' % random.randint(1, sys.maxint)
self.xmlstream.sendHeader()
self.xmlstream.addOnetimeObserver(
"/iq/query[@xmlns='jabber:iq:auth']", self.initialIq)
def initialIq(self, iq):
if self.emit_events:
self._event_func(Event('auth-initial-iq', authenticator=self,
iq=iq, id=iq["id"]))
else:
self.respondToInitialIq(iq)
self.xmlstream.addOnetimeObserver('/iq/query/username', self.secondIq)
def respondToInitialIq(self, iq):
result = IQ(self.xmlstream, "result")
result["id"] = iq["id"]
query = result.addElement('query')
query["xmlns"] = "jabber:iq:auth"
query.addElement('username', content='test')
query.addElement('password')
query.addElement('digest')
query.addElement('resource')
self.xmlstream.send(result)
def secondIq(self, iq):
if self.emit_events:
self._event_func(Event('auth-second-iq', authenticator=self,
iq=iq, id=iq["id"]))
else:
self.respondToSecondIq(self, iq)
def respondToSecondIq(self, iq):
username = xpath.queryForNodes('/iq/query/username', iq)
assert map(str, username) == [self.username]
digest = xpath.queryForNodes('/iq/query/digest', iq)
expect = hashlib.sha1(self.xmlstream.sid + self.password).hexdigest()
assert map(str, digest) == [expect]
resource = xpath.queryForNodes('/iq/query/resource', iq)
assertLength(1, resource)
if self.resource is not None:
assertEquals(self.resource, str(resource[0]))
self.bare_jid = '%s@localhost' % self.username
self.full_jid = '%s/%s' % (self.bare_jid, resource)
result = IQ(self.xmlstream, "result")
result["id"] = iq["id"]
self.xmlstream.send(result)
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
class XmppAuthenticator(GabbleAuthenticator):
def __init__(self, username, password, resource=None):
GabbleAuthenticator.__init__(self, username, password, resource)
self.authenticated = False
def streamInitialize(self, root):
if root:
self.xmlstream.sid = root.getAttribute('id')
if self.xmlstream.sid is None:
self.xmlstream.sid = '%x' % random.randint(1, sys.maxint)
self.xmlstream.sendHeader()
def streamIQ(self):
features = elem(xmlstream.NS_STREAMS, 'features')(
elem(ns.NS_XMPP_BIND, 'bind'),
elem(ns.NS_XMPP_SESSION, 'session'),
)
self.xmlstream.send(features)
self.xmlstream.addOnetimeObserver(
"/iq/bind[@xmlns='%s']" % ns.NS_XMPP_BIND, self.bindIq)
self.xmlstream.addOnetimeObserver(
"/iq/session[@xmlns='%s']" % ns.NS_XMPP_SESSION, self.sessionIq)
def streamSASL(self):
features = domish.Element((xmlstream.NS_STREAMS, 'features'))
mechanisms = features.addElement((ns.NS_XMPP_SASL, 'mechanisms'))
mechanism = mechanisms.addElement('mechanism', content='PLAIN')
self.xmlstream.send(features)
self.xmlstream.addOnetimeObserver("/auth", self.auth)
def streamStarted(self, root=None):
self.streamInitialize(root)
if self.authenticated:
# Initiator authenticated itself, and has started a new stream.
self.streamIQ()
else:
self.streamSASL()
def auth(self, auth):
assert (base64.b64decode(str(auth)) ==
'\x00%s\x00%s' % (self.username, self.password))
success = domish.Element((ns.NS_XMPP_SASL, 'success'))
self.xmlstream.send(success)
self.xmlstream.reset()
self.authenticated = True
def bindIq(self, iq):
resource = xpath.queryForString('/iq/bind/resource', iq)
if self.resource is not None:
assertEquals(self.resource, resource)
else:
assert resource is not None
result = IQ(self.xmlstream, "result")
result["id"] = iq["id"]
bind = result.addElement((ns.NS_XMPP_BIND, 'bind'))
self.bare_jid = '%s@localhost' % self.username
self.full_jid = '%s/%s' % (self.bare_jid, resource)
jid = bind.addElement('jid', content=self.full_jid)
self.xmlstream.send(result)
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
def sessionIq(self, iq):
self.xmlstream.send(make_result_iq(self.xmlstream, iq))
class StreamEvent(servicetest.Event):
def __init__(self, type_, stanza, stream):
servicetest.Event.__init__(self, type_, stanza=stanza)
self.stream = stream
self.to = stanza.getAttribute("to")
class IQEvent(StreamEvent):
def __init__(self, stream, iq):
StreamEvent.__init__(self, 'stream-iq', iq, stream)
self.iq_type = iq.getAttribute("type")
self.iq_id = iq.getAttribute("id")
query = iq.firstChildElement()
if query:
self.query = query
self.query_ns = query.uri
self.query_name = query.name
if query.getAttribute("node"):
self.query_node = query.getAttribute("node")
else:
self.query = None
class PresenceEvent(StreamEvent):
def __init__(self, stream, stanza):
StreamEvent.__init__(self, 'stream-presence', stanza, stream)
self.presence_type = stanza.getAttribute('type')
statuses = xpath.queryForNodes('/presence/status', stanza)
if statuses:
self.presence_status = str(statuses[0])
class MessageEvent(StreamEvent):
def __init__(self, stream, stanza):
StreamEvent.__init__(self, 'stream-message', stanza, stream)
self.message_type = stanza.getAttribute('type')
class StreamFactory(twisted.internet.protocol.Factory):
def __init__(self, streams, jids):
self.streams = streams
self.jids = jids
self.presences = {}
self.mappings = dict(map (lambda jid, stream: (jid, stream),
jids, streams))
# Make a copy of the streams
self.factory_streams = list(streams)
self.factory_streams.reverse()
# Do not add observers for single instances because it's unnecessary and
# some unit tests need to respond to the roster request, and we shouldn't
# answer it for them otherwise we break compatibility
if len(streams) > 1:
# We need to have a function here because lambda keeps a reference on
# the stream and jid and in the for loop, there is no context
def addObservers(stream, jid):
stream.addObserver('/iq', lambda x: \
self.forward_iq(stream, jid, x))
stream.addObserver('/presence', lambda x: \
self.got_presence(stream, jid, x))
for (jid, stream) in self.mappings.items():
addObservers(stream, jid)
def protocol(self, *args):
return self.factory_streams.pop()
def got_presence (self, stream, jid, stanza):
stanza.attributes['from'] = jid
self.presences[jid] = stanza
for dest_jid in self.presences.keys():
# Dispatch the new presence to other clients
stanza.attributes['to'] = dest_jid
self.mappings[dest_jid].send(stanza)
# Don't echo the presence twice
if dest_jid != jid:
# Dispatch other client's presence to this stream
presence = self.presences[dest_jid]
presence.attributes['to'] = jid
stream.send(presence)
def lost_presence(self, stream, jid):
if self.presences.has_key(jid):
del self.presences[jid]
for dest_jid in self.presences.keys():
presence = domish.Element(('jabber:client', 'presence'))
presence['from'] = jid
presence['to'] = dest_jid
presence['type'] = 'unavailable'
self.mappings[dest_jid].send(presence)
def forward_iq(self, stream, jid, stanza):
stanza.attributes['from'] = jid
query = stanza.firstChildElement()
# Fake other accounts as being part of our roster
if query and query.uri == ns.ROSTER:
roster = make_result_iq(stream, stanza)
query = roster.firstChildElement()
for roster_jid in self.mappings.keys():
if jid != roster_jid:
item = query.addElement('item')
item['jid'] = roster_jid
item['subscription'] = 'both'
stream.send(roster)
return
to = stanza.getAttribute('to')
dest = None
if to is not None:
dest = self.mappings.get(to)
if dest is not None:
dest.send(stanza)
class BaseXmlStream(xmlstream.XmlStream):
initiating = False
namespace = 'jabber:client'
pep_support = True
disco_features = []
handle_privacy_lists = True
def __init__(self, event_func, authenticator):
xmlstream.XmlStream.__init__(self, authenticator)
self.event_func = event_func
self.addObserver('//iq', lambda x: event_func(
IQEvent(self, x)))
self.addObserver('//message', lambda x: event_func(
MessageEvent(self, x)))
self.addObserver('//presence', lambda x: event_func(
PresenceEvent(self, x)))
self.addObserver('//event/stream/authd', self._cb_authd)
if self.handle_privacy_lists:
self.addObserver("/iq/query[@xmlns='%s']" % ns.PRIVACY,
self._cb_priv_list)
def _cb_priv_list(self, iq):
send_error_reply(self, iq)
def _cb_authd(self, _):
# called when stream is authenticated
assert self.authenticator.full_jid is not None
assert self.authenticator.bare_jid is not None
self.addObserver(
"/iq[@to='localhost']/query[@xmlns='http://jabber.org/protocol/disco#info']",
self._cb_disco_iq)
self.addObserver(
"/iq[@to='%s']/query[@xmlns='http://jabber.org/protocol/disco#info']"
% self.authenticator.bare_jid,
self._cb_bare_jid_disco_iq)
self.event_func(servicetest.Event('stream-authenticated'))
def _cb_disco_iq(self, iq):
nodes = xpath.queryForNodes(
"/iq/query[@xmlns='http://jabber.org/protocol/disco#info']", iq)
query = nodes[0]
for feature in self.disco_features:
query.addChild(elem('feature', var=feature))
iq['type'] = 'result'
iq['from'] = iq['to']
self.send(iq)
def _cb_bare_jid_disco_iq(self, iq):
# advertise PEP support
nodes = xpath.queryForNodes(
"/iq/query[@xmlns='http://jabber.org/protocol/disco#info']",
iq)
query = nodes[0]
identity = query.addElement('identity')
identity['category'] = 'pubsub'
identity['type'] = 'pep'
iq['type'] = 'result'
iq['from'] = iq['to']
self.send(iq)
def onDocumentEnd(self):
self.event_func(servicetest.Event('stream-closed'))
# We don't chain up XmlStream.onDocumentEnd() because it will
# disconnect the TCP connection making tests as
# connect/disconnect-timeout.py not working
def send_stream_error(self, error='system-shutdown'):
# Yes, there are meant to be two different STREAMS namespaces.
go_away = \
elem(xmlstream.NS_STREAMS, 'error')(
elem(ns.STREAMS, error)
)
self.send(go_away)
class JabberXmlStream(BaseXmlStream):
version = (0, 9)
class XmppXmlStream(BaseXmlStream):
version = (1, 0)
class GoogleXmlStream(BaseXmlStream):
version = (1, 0)
pep_support = False
disco_features = [ns.GOOGLE_ROSTER,
ns.GOOGLE_JINGLE_INFO,
ns.GOOGLE_MAIL_NOTIFY,
ns.GOOGLE_QUEUE,
]
def _cb_bare_jid_disco_iq(self, iq):
# Google talk doesn't support PEP :(
iq['type'] = 'result'
iq['from'] = iq['to']
self.send(iq)
def make_connection(bus, event_func, params=None, suffix=''):
# Gabble accepts a resource in 'account', but the value of 'resource'
# overrides it if there is one.
test_name = re.sub('(.*tests/twisted/|\./)', '', sys.argv[0])
account = 'test%s@localhost/%s' % (suffix, test_name)
default_params = {
'account': account,
'password': 'pass',
'resource': 'Resource',
'server': 'localhost',
'port': dbus.UInt32(4242),
'fallback-socks5-proxies': dbus.Array([], signature='s'),
'require-encryption': False,
}
if params:
default_params.update(params)
# Allow omitting the 'password' param
if default_params['password'] is None:
del default_params['password']
# Allow omitting the 'account' param
if default_params['account'] is None:
del default_params['account']
jid = default_params.get('account', None)
conn = servicetest.make_connection(bus, event_func, 'gabble', 'jabber',
default_params)
return (conn, jid)
def make_stream(event_func, authenticator=None, protocol=None,
resource=None, suffix=''):
# set up Jabber server
if authenticator is None:
authenticator = XmppAuthenticator('test%s' % suffix, 'pass', resource=resource)
authenticator.set_event_func(event_func)
if protocol is None:
protocol = XmppXmlStream
stream = protocol(event_func, authenticator)
return stream
def disconnect_conn(q, conn, stream, expected_before=[], expected_after=[]):
call_async(q, conn, 'Disconnect')
tmp = expected_before + [
EventPattern('dbus-signal', signal='StatusChanged', args=[cs.CONN_STATUS_DISCONNECTED, cs.CSR_REQUESTED]),
EventPattern('stream-closed')]
before_events = q.expect_many(*tmp)
stream.sendFooter()
tmp = expected_after + [EventPattern('dbus-return', method='Disconnect')]
after_events = q.expect_many(*tmp)
return before_events[:-2], after_events[:-1]
def exec_test_deferred(fun, params, protocol=None, timeout=None,
authenticator=None, num_instances=1,
do_connect=True):
# hack to ease debugging
domish.Element.__repr__ = domish.Element.toXml
colourer = None
if sys.stdout.isatty() or 'CHECK_FORCE_COLOR' in os.environ:
colourer = servicetest.install_colourer()
bus = dbus.SessionBus()
queue = servicetest.IteratingEventQueue(timeout)
queue.verbose = (
os.environ.get('CHECK_TWISTED_VERBOSE', '') != ''
or '-v' in sys.argv)
conns = []
jids = []
streams = []
resource = params.get('resource') if params is not None else None
for i in range(0, num_instances):
if i == 0:
suffix = ''
else:
suffix = str(i)
try:
(conn, jid) = make_connection(bus, queue.append, params, suffix)
except Exception, e:
# Crap. This is normally because the connection's still kicking
# around on the bus. Let's bin any connections we *did* manage to
# get going and then bail out unceremoniously.
print e
for conn in conns:
conn.Disconnect()
os._exit(1)
conns.append(conn)
jids.append(jid)
streams.append(make_stream(queue.append, protocol=protocol,
authenticator=authenticator,
resource=resource, suffix=suffix))
factory = StreamFactory(streams, jids)
port = reactor.listenTCP(4242, factory, interface='localhost')
def signal_receiver(*args, **kw):
if kw['path'] == '/org/freedesktop/DBus' and \
kw['member'] == 'NameOwnerChanged':
bus_name, old_name, new_name = args
if new_name == '':
for i, conn in enumerate(conns):
stream = streams[i]
jid = jids[i]
if conn._requested_bus_name == bus_name:
factory.lost_presence(stream, jid)
break
queue.append(Event('dbus-signal',
path=unwrap(kw['path']),
signal=kw['member'], args=map(unwrap, args),
interface=kw['interface']))
match_all_signals = bus.add_signal_receiver(
signal_receiver,
None, # signal name
None, # interface
None,
path_keyword='path',
member_keyword='member',
interface_keyword='interface',
byte_arrays=True
)
error = None
try:
if do_connect:
for conn in conns:
conn.Connect()
queue.expect('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTING, cs.CSR_REQUESTED])
queue.expect('stream-authenticated')
queue.expect('dbus-signal', signal='PresencesChanged',
args=[{1L: (cs.PRESENCE_AVAILABLE, u'available', '')}])
queue.expect('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED])
if len(conns) == 1:
fun(queue, bus, conns[0], streams[0])
else:
fun(queue, bus, conns, streams)
except Exception, e:
traceback.print_exc()
error = e
queue.verbose = False
if colourer:
sys.stdout = colourer.fh
d = port.stopListening()
# Does the Connection object still exist?
for i, conn in enumerate(conns):
if not bus.name_has_owner(conn.object.bus_name):
# Connection has already been disconnected and destroyed
continue
try:
if conn.GetStatus() == cs.CONN_STATUS_CONNECTED:
# Connection is connected, properly disconnect it
disconnect_conn(queue, conn, streams[i])
else:
# Connection is not connected, call Disconnect() to destroy it
conn.Disconnect()
except dbus.DBusException, e:
pass
except Exception, e:
traceback.print_exc()
error = e
try:
conn.Disconnect()
raise AssertionError("Connection didn't disappear; "
"all subsequent tests will probably fail")
except dbus.DBusException, e:
pass
except Exception, e:
traceback.print_exc()
error = e
match_all_signals.remove()
if error is None:
d.addBoth((lambda *args: reactor.crash()))
else:
# please ignore the POSIX behind the curtain
d.addBoth((lambda *args: os._exit(1)))
def exec_test(fun, params=None, protocol=None, timeout=None,
authenticator=None, num_instances=1, do_connect=True):
reactor.callWhenRunning(
exec_test_deferred, fun, params, protocol, timeout, authenticator, num_instances,
do_connect)
reactor.run()
# Useful routines for server-side vCard handling
current_vcard = domish.Element(('vcard-temp', 'vCard'))
def expect_and_handle_get_vcard(q, stream):
get_vcard_event = q.expect('stream-iq', query_ns=ns.VCARD_TEMP,
query_name='vCard', iq_type='get')
iq = get_vcard_event.stanza
vcard = iq.firstChildElement()
assert vcard.name == 'vCard', vcard.toXml()
# Send back current vCard
result = make_result_iq(stream, iq, add_query_node=False)
result.addChild(current_vcard)
stream.send(result)
def expect_and_handle_set_vcard(q, stream, check=None):
global current_vcard
set_vcard_event = q.expect('stream-iq', query_ns=ns.VCARD_TEMP,
query_name='vCard', iq_type='set')
iq = set_vcard_event.stanza
vcard = iq.firstChildElement()
assert vcard.name == 'vCard', vcard.toXml()
if check is not None:
check(vcard)
# Update current vCard
current_vcard = vcard
stream.send(make_result_iq(stream, iq))
def _elem_add(elem, *children):
for child in children:
if isinstance(child, domish.Element):
elem.addChild(child)
elif isinstance(child, unicode):
elem.addContent(child)
else:
raise ValueError(
'invalid child object %r (must be element or unicode)', child)
def elem(a, b=None, attrs={}, **kw):
r"""
>>> elem('foo')().toXml()
u'<foo/>'
>>> elem('foo', x='1')().toXml()
u"<foo x='1'/>"
>>> elem('foo', x='1')(u'hello').toXml()
u"<foo x='1'>hello</foo>"
>>> elem('foo', x='1')(u'hello',
... elem('http://foo.org', 'bar', y='2')(u'bye')).toXml()
u"<foo x='1'>hello<bar xmlns='http://foo.org' y='2'>bye</bar></foo>"
>>> elem('foo', attrs={'xmlns:bar': 'urn:bar', 'bar:cake': 'yum'})(
... elem('bar:e')(u'i')
... ).toXml()
u"<foo xmlns:bar='urn:bar' bar:cake='yum'><bar:e>i</bar:e></foo>"
"""
class _elem(domish.Element):
def __call__(self, *children):
_elem_add(self, *children)
return self
if b is not None:
elem = _elem((a, b))
else:
elem = _elem((None, a))
# Can't just update kw into attrs, because that *modifies the parameter's
# default*. Thanks python.
allattrs = {}
allattrs.update(kw)
allattrs.update(attrs)
# First, let's pull namespaces out
realattrs = {}
for k, v in allattrs.iteritems():
if k.startswith('xmlns:'):
abbr = k[len('xmlns:'):]
elem.localPrefixes[abbr] = v
else:
realattrs[k] = v
for k, v in realattrs.iteritems():
if k == 'from_':
elem['from'] = v
else:
elem[k] = v
return elem
def elem_iq(server, type, **kw):
class _iq(IQ):
def __call__(self, *children):
_elem_add(self, *children)
return self
iq = _iq(server, type)
for k, v in kw.iteritems():
if k == 'from_':
iq['from'] = v
else:
iq[k] = v
return iq
def make_presence(_from, to='test@localhost', type=None, show=None,
status=None, caps=None, photo=None):
presence = domish.Element((None, 'presence'))
presence['from'] = _from
presence['to'] = to
if type is not None:
presence['type'] = type
if show is not None:
presence.addElement('show', content=show)
if status is not None:
presence.addElement('status', content=status)
if caps is not None:
cel = presence.addElement(('http://jabber.org/protocol/caps', 'c'))
for key,value in caps.items():
cel[key] = value
# <x xmlns="vcard-temp:x:update"><photo>4a1...</photo></x>
if photo is not None:
x = presence.addElement((ns.VCARD_TEMP_UPDATE, 'x'))
x.addElement('photo').addContent(photo)
return presence
|
mlundblad/telepathy-gabble
|
tests/twisted/gabbletest.py
|
Python
|
lgpl-2.1
| 27,467
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio Demosite.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Workflow for articles."""
from __future__ import absolute_import, print_function
from datetime import date
from invenio.base.i18n import _, language_list_long
from invenio.modules.deposit import fields
from invenio.modules.deposit.field_widgets import CKEditorWidget, \
ColumnInput, ExtendedListWidget, ItemWidget, date_widget, plupload_widget
from invenio.modules.deposit.filter_utils import sanitize_html, \
strip_prefixes, strip_string
from invenio.modules.deposit.form import WebDepositForm
from invenio.modules.deposit.types import SimpleRecordDeposition
from invenio.modules.deposit.validation_utils import doi_syntax_validator, \
list_length, required_if
from werkzeug.local import LocalProxy
from wtforms import validators
def keywords_autocomplete(form, field, term, limit=50):
"""Return keywords for autocomplete."""
return [{'value': "Keyword 1"}, {'value': "Keyword 2"}]
def missing_doi_warning(dummy_form, field, submit=False, fields=None):
"""Field processor.
Checking for existence of a DOI, and otherwise asking people to provide it.
"""
if not field.errors and not field.data:
field.add_message("Please provide a DOI if possible.", state="warning")
raise StopIteration()
#
# Helpers
#
def filter_empty_helper(keys=None):
"""Remove empty elements from a list."""
def _inner(elem):
if isinstance(elem, dict):
for k, v in elem.items():
if (keys is None or k in keys) and v:
return True
return False
else:
return bool(elem)
return _inner
#
# Forms
#
class AuthorInlineForm(WebDepositForm):
"""Author inline form."""
name = fields.TextField(
placeholder=_("Family name, First name"),
widget_classes='form-control',
# autocomplete=map_result(
# dummy_autocomplete,
# authorform_mapper
# ),
widget=ColumnInput(class_="col-xs-6"),
validators=[
required_if(
'affiliation',
[lambda x: bool(x.strip()), ], # non-empty
message=_(
"Creator name is required if you specify affiliation.")
),
],
)
affiliation = fields.TextField(
placeholder=_("Affiliation"),
widget_classes='form-control',
widget=ColumnInput(class_="col-xs-4 col-pad-0"),
)
class ArticleForm(WebDepositForm):
"""Article form."""
#
# Fields
#
doi = fields.TextField(
label=_("Digital Object Identifier"),
placeholder=_("e.g. 10.1234/foo.bar..."),
widget_classes="form-control",
icon='fa fa-barcode fa-fw',
validators=[
doi_syntax_validator,
],
filters=[
strip_string,
strip_prefixes("doi:", "http://dx.doi.org/"),
],
processors=[
missing_doi_warning,
],
)
publication_date = fields.Date(
label=_('Publication date'),
icon='fa fa-calendar fa-fw',
description=_('Required. Format: YYYY-MM-DD.'),
default=date.today(),
validators=[validators.DataRequired()],
widget=date_widget,
widget_classes='input-sm',
export_key='imprint.date',
)
title = fields.TextField(
label=_('Title'),
export_key='title.title',
icon='fa fa-book fa-fw',
widget_classes="form-control",
validators=[validators.DataRequired()],
)
authors = fields.DynamicFieldList(
fields.FormField(
AuthorInlineForm,
widget=ExtendedListWidget(
item_widget=ItemWidget(),
html_tag='div',
),
),
label=_('Authors'),
add_label=_('Add another author'),
icon='fa fa-user fa-fw',
min_entries=1,
widget_classes='',
export_key='authors',
validators=[validators.DataRequired(), list_length(
min_num=1, element_filter=filter_empty_helper(),
)],
)
abstract = fields.TextAreaField(
label=_("Description"),
description=_('Required.'),
default='',
icon='fa fa-pencil fa-fw',
validators=[validators.DataRequired(), ],
widget=CKEditorWidget(
toolbar=[
['PasteText', 'PasteFromWord'],
['Bold', 'Italic', 'Strike', '-',
'Subscript', 'Superscript', ],
['NumberedList', 'BulletedList'],
['Undo', 'Redo', '-', 'Find', 'Replace', '-', 'RemoveFormat'],
['SpecialChar', 'ScientificChar'], ['Source'], ['Maximize'],
],
disableNativeSpellChecker=False,
extraPlugins='scientificchar',
removePlugins='elementspath',
),
filters=[
sanitize_html(),
strip_string,
],
export_key='abstract.summary',
)
journal_title = fields.TextField(
label=_("Journal title"),
description=_("Optional."),
validators=[
required_if(
'journal_volume', [lambda x: bool(x.strip()), ], # non-empty
message=_("Journal title is required if you specify either "
"volume, issue or pages.")
),
required_if(
'journal_issue', [lambda x: bool(x.strip()), ], # non-empty
message=_("Journal title is required if you specify either "
"volume, issue or pages.")
),
required_if(
'journal_pages', [lambda x: bool(x.strip()), ], # non-empty
message=_("Journal title is required if you specify either "
"volume, issue or pages.")
),
],
export_key='journal_info.title',
widget_classes='form-control',
)
journal_volume = fields.TextField(
label=_("Volume"),
description=_("Optional."),
export_key='journal_info.volume',
widget_classes='form-control',
)
journal_issue = fields.TextField(
label=_("Issue"),
description=_("Optional."),
export_key='journal_info.issue',
widget_classes='form-control',
)
journal_pages = fields.TextField(
label=_("Pages"),
description=_("Optional."),
export_key='journal_info.pagination',
widget_classes='form-control',
)
language = fields.SelectField(
choices=LocalProxy(lambda: language_list_long(
enabled_langs_only=False)),
default='english',
icon='fa fa-globe fa-fw',
widget_classes='form-control',
)
keywords = fields.DynamicFieldList(
fields.TextField(
widget_classes='form-control',
autocomplete=keywords_autocomplete,
widget=ColumnInput(class_="col-xs-10"),
),
label=_('Keywords'),
add_label=_('Add another keyword'),
icon='fa fa-tags fa-fw',
widget_classes='',
min_entries=1,
)
notes = fields.TextAreaField(
label=_("Notes"),
description=_('Optional.'),
default='',
validators=[validators.optional()],
filters=[
strip_string,
],
widget_classes='form-control',
icon='fa fa-pencil fa-fw',
export_key='comment',
)
plupload_file = fields.FileUploadField(
label="",
widget=plupload_widget,
export_key=False
)
#
# Form configuration
#
_title = _('New article')
_subtitle = _('Instructions: (i) Press "Save" to save your upload for '
'editing later, as many times you like. (ii) Upload or '
'remove extra files in the bottom of the form. (iii) When '
'ready, press "Submit" to finalize your upload.')
groups = [
('Basic Information',
['doi', 'publication_date', 'title', 'authors', 'abstract', ],
{
'indication': 'required',
}),
('Journal',
['journal_title', 'journal_volume', 'journal_issue',
'journal_pages'],
{
'indication': 'required'
}),
('Additional information',
['language', 'keywords', 'notes'],
{
'indication': 'optional',
})
]
field_sizes = {
'plupload_file': 'col-md-12',
}
#
# Workflow
#
class article(SimpleRecordDeposition):
"""Article."""
name = _("Article")
name_plural = _("Articles")
group = _("Articles & Preprints")
draft_definitions = {
'default': ArticleForm,
}
@classmethod
def process_sip_metadata(cls, deposition, metadata):
"""Map keywords to match jsonalchemy configuration."""
metadata['keywords'] = map(
lambda x: {'term': x},
metadata['keywords']
)
|
hachreak/invenio-demosite
|
invenio_demosite/modules/deposit/workflows/article.py
|
Python
|
gpl-2.0
| 9,880
|
from attest import Tests, assert_hook
from attest.hook import ExpressionEvaluator
suite = Tests()
@suite.test
def eval():
value = 1 + 1
samples = {
'isinstance(value, int)': 'True',
'value == int("2")': "(2 == 2)",
'type(value).__name__': "'int'",
'value == 5 - 3': '(2 == 2)',
'{"value": value}': "{'value': 2}",
'[v for v in [value]]': '[2]',
}
for expr, result in samples.iteritems():
expr = ExpressionEvaluator(expr, globals(), locals())
expr.late_visit()
ev = repr(expr)
assert ev == result
assert bool(ev) is True
@suite.test
def initpy_with_relative_import():
# Ensure that packages with an __init__.py file that use both assert_hook
# and relative imports are hooked properly.
from . import dummy
|
dag/attest
|
attest/tests/hook.py
|
Python
|
bsd-2-clause
| 829
|
# Copyright 2012 Knowledge Economy Developments Ltd
#
# Henry Gomersall
# heng@kedevelopments.co.uk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyfftw import FFTW, n_byte_align, n_byte_align_empty
import numpy
import struct
from timeit import Timer
import unittest
class FFTWBaseTest(unittest.TestCase):
def reference_fftn(self, a, axes):
return numpy.fft.fftn(a, axes=axes)
def __init__(self, *args, **kwargs):
super(FFTWBaseTest, self).__init__(*args, **kwargs)
self.make_shapes()
if not hasattr(self, 'assertRaisesRegex'):
self.assertRaisesRegex = self.assertRaisesRegexp
def setUp(self):
self.input_dtype = numpy.complex64
self.output_dtype = numpy.complex64
self.np_fft_comparison = numpy.fft.fft
self.direction = 'FFTW_FORWARD'
return
def tearDown(self):
return
def get_input_dtype_alignment(self):
return self.input_dtype([]).real.dtype.alignment
def get_output_dtype_alignment(self):
return self.input_dtype([]).real.dtype.alignment
def make_shapes(self):
self.input_shapes = {
'small_1d': (16,),
'1d': (2048,),
'2d': (256, 2048),
'3d': (5, 256, 2048)}
self.output_shapes = {
'small_1d': (16,),
'1d': (2048,),
'2d': (256, 2048),
'3d': (5, 256, 2048)}
def create_test_arrays(self, input_shape, output_shape, axes=None):
a = self.input_dtype(numpy.random.randn(*input_shape)
+1j*numpy.random.randn(*input_shape))
b = self.output_dtype(numpy.random.randn(*output_shape)
+1j*numpy.random.randn(*output_shape))
return a, b
def timer_routine(self, pyfftw_callable, numpy_fft_callable,
comparison_string='numpy.fft'):
N = 100
t = Timer(stmt=pyfftw_callable)
t_numpy_fft = Timer(stmt=numpy_fft_callable)
t_str = ("%.2f" % (1000.0/N*t.timeit(N)))+' ms'
t_numpy_str = ("%.2f" % (1000.0/N*t_numpy_fft.timeit(N)))+' ms'
print('One run: '+ t_str + \
' (versus ' + t_numpy_str + ' for ' + comparison_string + \
')')
def run_validate_fft(self, a, b, axes, fft=None, ifft=None,
force_unaligned_data=False, create_array_copies=True,
threads=1, flags=('FFTW_ESTIMATE',)):
''' Run a validation of the FFTW routines for the passed pair
of arrays, a and b, and the axes argument.
a and b are assumed to be the same shape (but not necessarily
the same layout in memory).
fft and ifft, if passed, should be instantiated FFTW objects.
If force_unaligned_data is True, the flag FFTW_UNALIGNED
will be passed to the fftw routines.
The threads argument runs the validation with multiple threads.
flags is passed to the creation of the FFTW object.
'''
if create_array_copies:
# Don't corrupt the original mutable arrays
a = a.copy()
b = b.copy()
a_orig = a.copy()
flags = list(flags)
if force_unaligned_data:
flags.append('FFTW_UNALIGNED')
if fft == None:
fft = FFTW(a,b,axes=axes, direction='FFTW_FORWARD',
flags=flags, threads=threads)
else:
fft.update_arrays(a,b)
if ifft == None:
ifft = FFTW(b, a, axes=axes, direction='FFTW_BACKWARD',
flags=flags, threads=threads)
else:
ifft.update_arrays(b,a)
a[:] = a_orig
# Test the forward FFT by comparing it to the result from numpy.fft
fft.execute()
ref_b = self.reference_fftn(a, axes=axes)
# This is actually quite a poor relative error, but it still
# sometimes fails. I assume that numpy.fft has different internals
# to fftw.
self.assertTrue(numpy.allclose(b, ref_b, rtol=1e-2, atol=1e-3))
# Test the inverse FFT by comparing the result to the starting
# value (which is scaled as per FFTW being unnormalised).
ifft.execute()
# The scaling is the product of the lengths of the fft along
# the axes along which the fft is taken.
scaling = numpy.prod(numpy.array(a.shape)[list(axes)])
self.assertEqual(ifft.N, scaling)
self.assertEqual(fft.N, scaling)
self.assertTrue(numpy.allclose(a/scaling, a_orig, rtol=1e-2, atol=1e-3))
return fft, ifft
def run_test_suites(test_suites, run_tests=None):
'''From each test case (derived from TestCase) in test_suites,
load and run all the test cases within.
If run_tests is not None, then it should be a dictionary with
keys being the test suite class name, and the values being
a list of test methods to run. Alternatively, the key can
be 'all' in which case all the test suites will be run with
the provided list of test suites.
'''
suite = unittest.TestSuite()
for test_class in test_suites:
tests = unittest.TestLoader().loadTestsFromTestCase(test_class)
if run_tests is not None:
if test_class.__name__ in run_tests:
this_suite_run = set(run_tests[test_class.__name__])
else:
this_suite_run = set()
if 'all' in run_tests:
this_suite_run = this_suite_run.union(run_tests['all'])
_tests = []
for each_test in tests:
if (each_test.id().split('.')[-1] in this_suite_run):
_tests.append(each_test)
tests = _tests
suite.addTests(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
fredRos/pyFFTW
|
test/test_pyfftw_base.py
|
Python
|
gpl-3.0
| 6,478
|
#! /usr/bin/env python3
"""Iterator for BLAST M8 (BLAST+ output format 6) files
Copyright:
b6.py monitor iterate over and return entries of a B6/M8 file
Copyright (C) 2015 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from collections import OrderedDict
import os
import sys
__author__ = 'Alex Hyer'
__email__ = 'theonehyer@gmail.com'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Production'
__version__ = '5.0.1'
class FormatError(Exception):
"""A simple exception that is raised when an input file is formatted
incorrectly
"""
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
class B6Entry:
"""A simple class to store data from B6/M8 entries and write them
Attributes:
fs_order (dict): dictionary storing the original order of the format
specifiers with names as keys and indices as values
query (str): query ID (sequence aligned with)
subject (str): subject ID (sequence aligned to)
identity (float): percentage of aligned bases between the subject
and query that are identical
length (int): length of alignment (number of aligned bases)
mismatches (int): number of mismatches in the alignment
gaps (int): number of gaps in the alignment
query_start (int): alignment start position in query sequence
query_end (int): alignment end position in query sequence
subject_start (int): alignment start position in subject sequence
subject_end (int): alignment end position in subject sequence
evalue (float): E-value of alignment
bitscore (float): Bit score of alignment
custom_fs (dict): OrderedDict of non-default format specifiers
"""
def __init__(self):
"""Initialize variables to store B6/M8 entry data"""
self.fs_order = None
self.query = None
self.subject = None
self.identity = None
self.length = None
self.mismatches = None
self.gaps = None
self.query_start = None
self.query_end = None
self.subject_start = None
self.subject_end = None
self.evalue = None
self.bitscore = None
self.custom_fs = None # Store additional format specifiers
def write(self, default: bool=False):
"""Restore B6/M8 entry to original format
Args:
default (bool): output entry in default BLAST+ B6 format
Returns:
str: properly formatted string containing the B6/M8 entry
"""
none_type = type(None)
if default: # Default order of format specifiers
ordered_vals = [getattr(self, i) for i in ['query', 'subject',
'identity', 'length', 'mismatches', 'gaps',
'query_start', 'query_end', 'subject_start',
'subject_end', 'evalue', 'bitscore']]
else: # Original order of B6 entry format specifiers
try:
ordered_vals = [self.custom_fs[i] if i in self.custom_fs
else getattr(self, i) for i in self.fs_order]
except TypeError:
ordered_vals = [getattr(self, i) for i in self.fs_order]
# Format entry for writing
fstr = "\t".join(['-' if type(i) == none_type else str(i) for i in
ordered_vals])
return '{}{}'.format(fstr, os.linesep)
class B6Reader():
"""Class to read from B6/M8 files and store lines as B6Entry objects
Attributes:
handle (file): B6/M8 file handle, can be any iterator so long as it
it returns subsequent "lines" of a B6/M8 entry
filename (str): name of the B6 file
current_line (int): current line in file [default: 0]
"""
def __init__(self, handle):
"""Initialize variables to store B6/M8 file information"""
self.handle = handle
self.filename = handle.name
self.current_line = 0
def iterate(self, start_line=None, header: list=['qaccver', 'saccver',
'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart',
'send', 'evalue', 'bitscore'], comments: bool=False):
"""Iterate over B6/M8 file and return B6/M8 entries
Args:
start_line (str): Next B6/M8 entry. If 'handle' has been partially
read and you want to start iterating at the next entry, read
the next B6/M8 entry and pass it to this variable when calling
b6_iter. See 'Examples' for proper usage.
header (list): List of custom format specifiers if B6 file not in
default Blast+ 6 format
comments (bool): Yields comments if True, else skips lines starting
with "#"
Yields:
B6Entry: class containing all B6/M8 data
Examples:
The following two examples demonstrate how to use b6_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in b6_iter(open('test.b6out')):
... print(entry.query) # Query ID
... print(entry.subject) # Subject ID
... print(entry.identity) # Percent identity between seqs
... print(entry.mismatches) # Number mismatches in alignment
... print(entry.gaps) # Number gaps in alignment
... print(entry.query_start) # Start of alignment on query
... print(entry.query_end) # End of alignment on query
... print(entry.subject_start) # Start of align on subject
... print(entry.subject_end) # End of alignment on subject
... print(entry.evalue) # E-value of alignment
... print(entry.bitscore) # Bitscore of alignment
... print(entry.write()) # Reconsitituted B6 entry
>>> b6_handle = open('test.b6out')
>>> next(b6_handle) # Skip first line/entry
>>> next_line = next(b6_handle) # Store next entry
>>> for entry in b6_iter(b6_handle, start_line=next_line):
... print(entry.query) # Query ID
... print(entry.subject) # Subject ID
... print(entry.identity) # Percent identity between seqs
... print(entry.mismatches) # Number mismatches in alignment
... print(entry.gaps) # Number gaps in alignment
... print(entry.query_start) # Start of alignment on query
... print(entry.query_end) # End of alignment on query
... print(entry.subject_start) # Start of align on subject
... print(entry.subject_end) # End of alignment on subject
... print(entry.evalue) # E-value of alignment
... print(entry.bitscore) # Bitscore of alignment
... print(entry.write()) # Reconstituted B6 entry
"""
handle = self.handle
# Speed tricks: reduces function calls
split = str.split
strip = str.strip
# Map attribute names to default format specifier names
def_map = {'query_end': ('qend', str),
'mismatches': ('mismatch', int),
'identity': ('pident', float),
'query': ('qaccver', str),
'query_start': ('qstart', int),
'subject_start': ('sstart', int),
'bitscore': ('bitscore', float),
'evalue': ('evalue', float),
'gaps': ('gapopen', int),
'subject_end': ('send', int),
'length': ('length', int),
'subject': ('saccver', str)
}
def_map_rev = {j[0]: k for k, j in def_map.items()}
def_specs = list(def_map_rev.keys())
uheader = list(OrderedDict.fromkeys(header))
spec_order = [def_map_rev[i] if i in def_map_rev else i \
for i in uheader]
# Store order of format specifiers
h = {}
for index, specifier in enumerate(header):
if specifier not in h: # Ignor duplicate columns
h[specifier] = index
# Begin reading text
if start_line is None:
line = next(handle) # Read first B6/M8 entry
else:
line = start_line # Set header to given header
# Check if input is text or bytestream
if (isinstance(line, bytes)):
def next_line(i):
return next(i).decode('utf-8')
line = strip(line.decode('utf-8'))
else:
next_line = next
line = strip(line)
# Manual 'for' loop isn't needed to read the file properly and quickly,
# unlike fasta_iter and fastq_iter, but it is necessary begin iterating
# partway through a file when the user gives a starting line.
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
self.current_line += 1
data = B6Entry()
data.fs_order = spec_order # All entries store original order
if line.startswith('#') and not comments:
line = strip(next_line(handle))
continue
elif line.startswith('#') and comments:
yield line
line = strip(next_line(handle))
continue
split_line = split(line, '\t')
# Replace empty values with None
spec_values = [None if i == '-' else i for i in split_line]
# Add default specifiers
def_attrs = data.__dict__.keys()
for attr in def_attrs:
try:
def_spec, spec_type = def_map[attr]
except KeyError:
continue
try:
value = spec_values[h[def_spec]]
except KeyError: # Custom format, no value
continue
except IndexError:
current_line = self.current_line
raise FormatError("line {!s}: the number of columns "
"is less than the number of specifiers"\
.format(current_line))
if type(value) != type(None):
try:
value = spec_type(value)
except ValueError:
current_line = self.current_line
raise FormatError("line {!s}: {} is of wrong type"\
.format(current_line, def_spec))
setattr(data, attr, value)
# Add non-default specifiers to custom_fs attribute
custom_specs = [i for i in sorted(h, key=h.get, reverse=False) \
if i not in def_specs]
if custom_specs:
data.custom_fs = OrderedDict()
for key in custom_specs:
try:
value = spec_values[h[key]]
except IndexError:
current_line = self.current_line
raise FormatError("line {!s}: the number of "
"columns is less than the number of "
"specifiers".format(current_line))
data.custom_fs[key] = value
line = strip(next_line(handle)) # Raises StopIteration at EOF
yield data
except StopIteration: # Yield last B6/M8 entry
yield data
|
Brazelton-Lab/bio_utils
|
bio_utils/iterators/b6.py
|
Python
|
gpl-3.0
| 12,735
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige.ospace.Const import *
from ige.IDataHolder import makeIDataHolder
## General
turnsPerDay = 24
rotationMod = 384.0
playerTimeout = 60 * 60 * 24 * 21 # 21 days
novicePlayerTimeout = 60 * 60 * 24 * 7 # 7 days
messageTimeout = 60 * 60 * 24 * 14 # 14 days
## New player
startingPopulation = 9000
startingBio = 1000
startingMin = 1000
startingEn = 1000
startingScannerPwr = 100
## Production
maxProdQueueLen = 10
buildOnSamePlanetMod = 1
buildOnAnotherPlanetMod = 2
unusedProdMod = 0.75
## Environment
envInterval = 1000
envAutoMod = 10.0
envMax = 200
envSelfUpgradeChance = {"H": 5, "C": 1, "B": 500} # in ten thousandths (10 000)
planetSpec = {}
planetSpec[u'A'] = makeIDataHolder(
minBio = 0,
maxBio = 0,
upgradeTo = None,
downgradeTo = None,
)
planetSpec[u'G'] = makeIDataHolder(
minBio = 0,
maxBio = 0,
upgradeTo = None,
downgradeTo = None,
)
planetSpec[u'C'] = makeIDataHolder(
minBio = 0,
maxBio = 6,
upgradeTo = u'D',
upgradeEnReqs = (5, 180),
downgradeTo = None,
)
planetSpec[u'R'] = makeIDataHolder(
minBio = 0,
maxBio = 6,
upgradeTo = u'D',
upgradeEnReqs = (5, 180),
downgradeTo = None,
)
planetSpec[u'D'] = makeIDataHolder(
minBio = 6,
maxBio = 12,
upgradeTo = u'H',
upgradeEnReqs = (25, 150),
downgradeTo = u'R',
)
planetSpec[u'H'] = makeIDataHolder(
minBio = 12,
maxBio = 25,
upgradeTo = u'M',
upgradeEnReqs = (50, 125),
downgradeTo = u'D',
)
planetSpec[u'M'] = makeIDataHolder(
minBio = 25,
maxBio = 75,
upgradeTo = u'E',
upgradeEnReqs = (50, 100),
downgradeTo = u'H',
)
planetSpec[u'E'] = makeIDataHolder(
minBio = 75,
maxBio = 125,
upgradeTo = u"I",
upgradeEnReqs = (50, 100),
downgradeTo = u'M',
)
planetSpec[u"I"] = makeIDataHolder( # gaia
minBio = 125,
maxBio = 200,
upgradeTo = None,
downgradeTo = u"E",
)
## New colony settings
colonyMinBio = 600
colonyMinMin = 600
colonyMinEn = 600
## Storage
popPerSlot = 0
bioPerSlot = 0
minPerSlot = 0
enPerSlot = 0
popBaseStor = 4800
bioBaseStor = 4800
minBaseStor = 4800
enBaseStor = 4800
autoMinStorTurns = 6
autoReqStorTurns = 1
maxPopReserve = 1.125
tlPopReserve = 250
## Resources
stratResRate = turnsPerDay * 6
## Population
popGrowthRate = 0.02
popMinGrowthRate = int(1000 * popGrowthRate)
popDieRate = 0.1
popMinDieRate = 100
popKillMod = 0.25
popSlotKillMod = 5 # how many people per 1 DMG get killed when slot is hit
popSlotHP = 100 # HP of habitable structures on slot (where people live)
## Research
maxRsrchQueueLen = 10
techBaseImprovement = 1
techMaxImprovement = 5
techImprCostMod = {1:480, 2:480, 3:720, 4:960, 5:1200, 6: 1440, 7: 1680} #per level
sciPtsPerCitizen = {1: 0, 2: 0.00075, 3: 0.00150, 4: 0.00175, 5: 0.00200, 6: 0.002125, 7: 0.00225, 99: 0} #per level
techImprEff = {1:0.750, 2:0.875, 3:1.000, 4:1.125, 5:1.250} #per sublevel
#maxSciPtsTL = {1:100, 2:200, 3:300, 4:400, 5:500, 6:600, 7:700}
#sciPtsStepFraction = 0.25
## Scanner
maxSignature = 100
scannerMinPwr = 1
level1InfoScanPwr = 1000
level2InfoScanPwr = 1200
level3InfoScanPwr = 1400
level4InfoScanPwr = 1600
maxScanPwr = 200000
mapForgetScanPwr = 0.94
partnerScanPwr = 300000
## Fleets
maxCmdQueueLen = 10
signatureBase = 1.10
operProdRatio = 0.001
combatRetreatWait = 3
starGateDamage = 0.2 # damage for 100% speed boost (double for 200%, etc...)
shipDecayRatio = 0.04
maxDamageAbsorb = 5 # max absorbed damage for tech "damageAbsorb" property.
# max seq_mod equipments of equipType; anything not in list is unlimited
maxEquipType = {
'ECM' : 1, # +Missile DEF
'Combat Bonuses' : 1, # +%ATT, +%DEF
'Combat Modifiers' : 1, # +ATT, +DEF
'Shields' : 1, # not hardshields
'Stealth' : 1,
'Auto Repair' : 1,
}
## Buildings
repairRatio = 0.02
repairRunningRatio = 0.01
decayRatio = 0.02
storCapacityOfOfflineStruct = 1.0
plShieldRegen = 0.05 #regen rate of planetary shield
## Diplomacy
baseRelationChange = -5
relLostWhenAttacked = -1000000
defaultRelation = REL_NEUTRAL
contactTimeout = 6 * turnsPerDay
voteForImpPeriod = 6 * turnsPerDay
ratioNeededForImp = 0.6666
pactDescrs = {}
pactDescrs[PACT_ALLOW_CIVILIAN_SHIPS] = makeIDataHolder(
targetRel = 500,
relChng = 10,
validityInterval = (0, 10000),
)
pactDescrs[PACT_ALLOW_MILITARY_SHIPS] = makeIDataHolder(
targetRel = 750,
relChng = 8,
validityInterval = (0, 10000),
)
pactDescrs[PACT_ALLOW_TANKING] = makeIDataHolder(
targetRel = 750,
relChng = 7,
validityInterval = (0, 10000),
)
pactDescrs[PACT_MINOR_CP_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 6,
effectivity = 0.05,
validityInterval = (625, 10000),
)
pactDescrs[PACT_MAJOR_CP_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
effectivity = 0.05,
validityInterval = (875, 10000),
)
pactDescrs[PACT_SHARE_SCANNER] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
validityInterval = (625, 10000),
)
pactDescrs[PACT_MINOR_SCI_COOP] = makeIDataHolder(
targetRel = 750,
relChng = 1,
effectivity = 0.05,
validityInterval = (625, 10000),
)
pactDescrs[PACT_MAJOR_SCI_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
effectivity = 0.05,
validityInterval = (875, 10000),
)
## Morale
baseGovPwr = 50000
maxMorale = 100.0
minMoraleTrgt = 30.0
revoltThr = 25.0
moraleChngPerc = 0.03
moraleHighPopPenalty = 2.0
moraleBasePop = 10000
moraleLowPop = 5000
moraleLowPopBonus = 40.0
moraleLostWhenSurrender = 0.0
moraleLostNoFood = 1.0
moraleModPlHit = 96.0 # how many morale point per 1 per cent of damage
moralePerPointChance = 5.0 # for every point below revoltThr % chance for revolt
moraleProdStep = 10
moraleProdBonus = [-0.875, -0.75, -0.625, -0.50, -0.375, -0.25, -0.125, 0.0, 0.0, 0.125, 0.25]
## Revolt
revoltDestrBio = 0.05
revoltDestrMin = 0.05
revoltDestrEn = 0.05
revoltPenalty = 0.75
## Messages
messageMaxAge = turnsPerDay * 3
## Asteroid
asteroidPerHPBioMod = - 0.01
asteroidPerHPMinMod = + 0.001
asteroidGenerPerc = 0.001
asteroidMinPlMinAbund = 10
asteroidModPwr = 2.0
asteroidTargetInSystem = 0.2
asteroidMinHP = 100
asteroidMaxHP = 1000
asteroidMinSpeed = 2.0
asteroidMaxSpeed = 4.0
asteroidMisDef = 1
asteroidDef = 4
asteroidAttack = 4
asteroidImpactDelay = 6
## Projects
projECOINIT3PlBio = 1
## Ships
shipImprovementMod = 1.05
shipMaxImprovements = 5
shipMaxDesigns = 40
shipExpToLevel = {0:1, 1:2, 2:2, 3:3, 4:3, 5:3, 6:3, 7:4, 8:4, 9:4, 10:4, 11:4,
12:4, 13:4, 15:5}
shipDefLevel = 5
shipLevelEff = {1:0.50, 2:0.75, 3:1.00, 4:1.25, 5:1.50}
shipBaseExpMod = 20
shipBaseExp = {0:10, 1:20, 2:40, 3:80, 4:160}
shipTargetPerc = [25, 50, 90, 100]
shipMinUpgrade = 120
shipUpgradeMod = 1.375
shipUpgradePts = [1, 3, 10]
weaponDmgDegrade = [1.0, 0.5, 0.25, 0.125]
## EMR
emrMinDuration = 36
emrMaxDuration = 60
emrPeriod = 576
emrSeasons = [None, None, None, None]
emrSeasons[0] = makeIDataHolder(
name = "spring",
startTime = 0,
endTime = 143,
emrLevelMin = 0.75,
emrLevelMax = 1.25,
)
emrSeasons[1] = makeIDataHolder(
name = "summer",
startTime = 144,
endTime = 287,
emrLevelMin = 0.50,
emrLevelMax = 1.00,
)
emrSeasons[2] = makeIDataHolder(
name = "fall",
startTime = 287,
endTime = 431,
emrLevelMin = 0.50,
emrLevelMax = 1.50,
)
emrSeasons[3] = makeIDataHolder(
name = "winter",
startTime = 432,
endTime = 575,
emrLevelMin = 1.00,
emrLevelMax = 1.50,
)
## General
pirateInfluenceRange = 7.5 # in parsecs
pirateGovPwr = int(500000 * 1.25)
## Fame
pirateGainFamePropability = lambda d: 2 - d * 0.2
pirateLoseFameProbability = lambda d: 1 - (15 - d) * 0.2
pirateCaptureInRangeFame = 1
pirateSurvivalFame = 1
pirateCaptureOutOfRangeFame = -1
## Colonization
pirateColonyCostMod = 1.5 # base multiplier - all other multipliers are multiplied by this
pirateTL3StratResColonyCostMod = 0.25
piratePlayerZoneCostMod = 1.25
pirateColonyFameZoneCost = lambda d: min(d * 0.1 + pirateTL3StratResColonyCostMod,1)
pirateColonyPlayerZoneCost = lambda d: piratePlayerZoneCostMod + (d - 15) * 0.01 * piratePlayerZoneCostMod
## Techs
pirateCanStealImprovements = 3
pirateGrantHSE = 60*24*3600 #60 days; AI only
pirateGrantASSEM = 105*24*3600 #105 days; AI only
pirateGrantCOND = 105*24*3600 #105 days; AI only
## Timed events (not implemented)
pirateTimerMod = 3*24*3600 # +/- up to 3 days for each grant
pirateTimerRum = 20*24*3600 #20 days; grant Brewery, Rum strategic resource, and Drunken Factory (110% Pirate Prison; requires Rum)
pirateTimerEnslavement = 60*24*3600 #60 days; grant Prison
pirateTimerEDENStructure = 120*24*3600 #120 days; grant EDEN Factory (you have discovered a prototype factory...; 135% Pirate Prison; requires Rum)
pirateTimerBerserk = 150*24*3600 #150 days; grant "Berserk" ship module (major defense penalty; major ATT bonus; requires Rum)
pirateTimerSlaveMine = 180*24*3600 #180 days; grant Slave Mine (mining facility with hamster wheel for power; 160% Pirate Prison; requires Rum)
## Bonuses
galLeaderBonus = 0.05
galImperatorBonus = 0.10
## Combat
combatStructureHitMod = 0.75
combatShipHitMod = 0.75
combatHitXferMod = 3.00
combatStructDefense = 1
|
mozts2005/OuterSpace
|
server/res/rules/standard/rules.py
|
Python
|
gpl-2.0
| 10,205
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
video decoding for the AR.Drone.
parts of code from Bastian Venthur, Jean-Baptiste Passot, Florian Lacrampe.
"""
# < imports >--------------------------------------------------------------------------------------
import array
import cProfile
import datetime
import logging
import struct
import sys
try:
import psyco
except ImportError:
pass
# print "Please install psyco for better video decoding performance."
import bitReader as bitReader
# < variáveis globais >----------------------------------------------------------------------------
# logging level
w_logLvl = logging.ERROR
# from zig-zag back to normal
wa_ZIG_ZAG_POSITIONS = array.array ( 'B',
( 0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63 ))
# inverse quantization
wa_IQUANT_TAB = array.array ( 'B',
( 3, 5, 7, 9, 11, 13, 15, 17,
5, 7, 9, 11, 13, 15, 17, 19,
7, 9, 11, 13, 15, 17, 19, 21,
9, 11, 13, 15, 17, 19, 21, 23,
11, 13, 15, 17, 19, 21, 23, 25,
13, 15, 17, 19, 21, 23, 25, 27,
15, 17, 19, 21, 23, 25, 27, 29,
17, 19, 21, 23, 25, 27, 29, 31 ))
# upscaling the 8x8 b- and r-blocks to 16x16
wa_SCALE_TAB = array.array ( 'B',
( 0, 0, 1, 1, 2, 2, 3, 3,
0, 0, 1, 1, 2, 2, 3, 3,
8, 8, 9, 9, 10, 10, 11, 11,
8, 8, 9, 9, 10, 10, 11, 11,
16, 16, 17, 17, 18, 18, 19, 19,
16, 16, 17, 17, 18, 18, 19, 19,
24, 24, 25, 25, 26, 26, 27, 27,
24, 24, 25, 25, 26, 26, 27, 27,
4, 4, 5, 5, 6, 6, 7, 7,
4, 4, 5, 5, 6, 6, 7, 7,
12, 12, 13, 13, 14, 14, 15, 15,
12, 12, 13, 13, 14, 14, 15, 15,
20, 20, 21, 21, 22, 22, 23, 23,
20, 20, 21, 21, 22, 22, 23, 23,
28, 28, 29, 29, 30, 30, 31, 31,
28, 28, 29, 29, 30, 30, 31, 31,
32, 32, 33, 33, 34, 34, 35, 35,
32, 32, 33, 33, 34, 34, 35, 35,
40, 40, 41, 41, 42, 42, 43, 43,
40, 40, 41, 41, 42, 42, 43, 43,
48, 48, 49, 49, 50, 50, 51, 51,
48, 48, 49, 49, 50, 50, 51, 51,
56, 56, 57, 57, 58, 58, 59, 59,
56, 56, 57, 57, 58, 58, 59, 59,
36, 36, 37, 37, 38, 38, 39, 39,
36, 36, 37, 37, 38, 38, 39, 39,
44, 44, 45, 45, 46, 46, 47, 47,
44, 44, 45, 45, 46, 46, 47, 47,
52, 52, 53, 53, 54, 54, 55, 55,
52, 52, 53, 53, 54, 54, 55, 55,
60, 60, 61, 61, 62, 62, 63, 63,
60, 60, 61, 61, 62, 62, 63, 63 ))
# count leading zeros look up table
wa_CLZLUT = array.array ( 'B',
( 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
# map pixels from four 8x8 blocks to one 16x16
wa_MB_TO_GOB_MAP = array.array ( 'B',
[ 0, 1, 2, 3, 4, 5, 6, 7,
16, 17, 18, 19, 20, 21, 22, 23,
32, 33, 34, 35, 36, 37, 38, 39,
48, 49, 50, 51, 52, 53, 54, 55,
64, 65, 66, 67, 68, 69, 70, 71,
80, 81, 82, 83, 84, 85, 86, 87,
96, 97, 98, 99, 100, 101, 102, 103,
112, 113, 114, 115, 116, 117, 118, 119,
8, 9, 10, 11, 12, 13, 14, 15,
24, 25, 26, 27, 28, 29, 30, 31,
40, 41, 42, 43, 44, 45, 46, 47,
56, 57, 58, 59, 60, 61, 62, 63,
72, 73, 74, 75, 76, 77, 78, 79,
88, 89, 90, 91, 92, 93, 94, 95,
104, 105, 106, 107, 108, 109, 110, 111,
120, 121, 122, 123, 124, 125, 126, 127,
128, 129, 130, 131, 132, 133, 134, 135,
144, 145, 146, 147, 148, 149, 150, 151,
160, 161, 162, 163, 164, 165, 166, 167,
176, 177, 178, 179, 180, 181, 182, 183,
192, 193, 194, 195, 196, 197, 198, 199,
208, 209, 210, 211, 212, 213, 214, 215,
224, 225, 226, 227, 228, 229, 230, 231,
240, 241, 242, 243, 244, 245, 246, 247,
136, 137, 138, 139, 140, 141, 142, 143,
152, 153, 154, 155, 156, 157, 158, 159,
168, 169, 170, 171, 172, 173, 174, 175,
184, 185, 186, 187, 188, 189, 190, 191,
200, 201, 202, 203, 204, 205, 206, 207,
216, 217, 218, 219, 220, 221, 222, 223,
232, 233, 234, 235, 236, 237, 238, 239,
248, 249, 250, 251, 252, 253, 254, 255 ] )
wa_MB_ROW_MAP = array.array ( 'B', [ i / 16 for i in wa_MB_TO_GOB_MAP ] )
wa_MB_COL_MAP = array.array ( 'B', [ i % 16 for i in wa_MB_TO_GOB_MAP ] )
# an array of zeros. It is much faster to take the zeros from here than to
# generate a new list when needed.
wa_ZEROS = array.array ( 'i', [ 0 for i in xrange ( 256 ) ] )
# constants needed for the inverse discrete cosine transform.
wi_FIX_0_298631336 = 2446
wi_FIX_0_390180644 = 3196
wi_FIX_0_541196100 = 4433
wi_FIX_0_765366865 = 6270
wi_FIX_0_899976223 = 7373
wi_FIX_1_175875602 = 9633
wi_FIX_1_501321110 = 12299
wi_FIX_1_847759065 = 15137
wi_FIX_1_961570560 = 16069
wi_FIX_2_053119869 = 16819
wi_FIX_2_562915447 = 20995
wi_FIX_3_072711026 = 25172
wi_CONST_BITS = 13
wi_PASS1_BITS = 1
wi_F1 = wi_CONST_BITS - wi_PASS1_BITS - 1
wi_F2 = wi_CONST_BITS - wi_PASS1_BITS
wi_F3 = wi_CONST_BITS + wi_PASS1_BITS + 3
# tuning parameter for get_block
wi_TRIES = 16
wi_MASK = 2 ** ( wi_TRIES * 32 ) - 1
wi_SHIFT = 32 * ( wi_TRIES - 1 )
# -------------------------------------------------------------------------------------------------
# _first_half
# -------------------------------------------------------------------------------------------------
def _first_half ( f_data ):
"""
helper function used to precompute the zero values in a 12 bit datum.
"""
# sherlock logger
# l_log = logging.getLogger ( "_first_half" )
# l_log.setLevel ( w_logLvl )
# l_log.debug ( ">>" )
# f_data has to be 12 bits wide
li_stream_len = 0
# count the zeros
li_zero_count = wa_CLZLUT [ f_data >> 4 ] ;
f_data = ( f_data << ( li_zero_count + 1 )) & 0b111111111111 # 0xfff
li_stream_len += li_zero_count + 1
# get number of remaining bits to read
li_to_read = 0 if li_zero_count <= 1 else li_zero_count - 1
li_additional = f_data >> ( 12 - li_to_read )
f_data = ( f_data << li_to_read ) & 0b111111111111 # 0xfff
li_stream_len += li_to_read
# add as many zeros to out_list as indicated by li_additional bits
# if li_zero_count is 0, tmp = 0 else the 1 merged with li_additional bits
li_tmp = 0 if ( 0 == li_zero_count ) else (( 1 << li_to_read ) | li_additional )
# sherlock logger
# l_log.debug ( "<<" )
return [ li_stream_len, li_tmp ]
# -------------------------------------------------------------------------------------------------
# _second_half
# -------------------------------------------------------------------------------------------------
def _second_half ( f_data ):
"""
helper function to precompute the nonzeror values in a 15 bit datum.
"""
# sherlock logger
# l_log = logging.getLogger ( "_second_half" )
# l_log.setLevel ( w_logLvl )
# l_log.debug ( ">>" )
# f_data has to be 15 bits wide
li_stream_len = 0
li_zero_count = wa_CLZLUT [ f_data >> 7 ]
f_data = ( f_data << ( li_zero_count + 1 )) & 0b111111111111111 # 0x7fff
li_stream_len += li_zero_count + 1
# 01 == EOB
lv_eob = False
if li_zero_count == 1:
lv_eob = True
# sherlock logger
# l_log.debug ( "<< (E01)" )
return [ li_stream_len, None, lv_eob ]
# get number of remaining bits to read
li_to_read = 0 if li_zero_count == 0 else li_zero_count - 1
li_additional = f_data >> ( 15 - li_to_read )
f_data = ( f_data << li_to_read ) & 0b111111111111111 # 0x7fff
li_stream_len += li_to_read
li_tmp = ( 1 << li_to_read ) | li_additional
# get one more bit for the sign
li_tmp = -li_tmp if f_data >> ( 15 - 1 ) else li_tmp
li_tmp = int ( li_tmp )
li_stream_len += 1
# sherlock logger
# l_log.debug ( "<<" )
return [ li_stream_len, li_tmp, lv_eob ]
# precompute all 12 and 15 bit values for the entropy decoding process
wai_FH = [ _first_half ( li_i ) for li_i in xrange ( 2 ** 12 ) ]
wai_SH = [ _second_half ( li_i ) for li_i in xrange ( 2 ** 15 ) ]
# -------------------------------------------------------------------------------------------------
# get_block
# -------------------------------------------------------------------------------------------------
def get_block ( f_bitreader, fv_has_coeff ):
"""
read a 8x8 block from the data stream.
takes care of the huffman-, RLE, zig-zag and idct and returns a list of 64 ints.
"""
# sherlock logger
# l_log = logging.getLogger ( "get_block" )
# l_log.setLevel ( w_logLvl )
# l_log.debug ( ">>" )
# read the first 10 bits in a 16 bit datum
la_out_list = wa_ZEROS [ 0:64 ]
la_out_list [ 0 ] = int ( f_bitreader.read ( 10 )) * wa_IQUANT_TAB [ 0 ]
if ( not fv_has_coeff ):
# sherlock logger
# l_log.debug ( "<< (E01)" )
return inverse_dct ( la_out_list )
li_i = 1
while ( True ):
_ = f_bitreader.read ( 32 * wi_TRIES, False )
li_stream_len = 0
#######################################################################
for li_j in xrange ( wi_TRIES ):
l_data = ( _ << li_stream_len ) & wi_MASK
l_data >>= wi_SHIFT
li_l, li_tmp = wai_FH [ l_data >> 20 ]
li_stream_len += li_l
l_data = ( l_data << li_l ) & 0xffffffff
li_i += li_tmp
li_l, li_tmp, lv_eob = wai_SH [ l_data >> 17 ]
li_stream_len += li_l
if ( lv_eob ):
f_bitreader.read ( li_stream_len )
# sherlock logger
# l_log.debug ( "<< (E02)" )
return inverse_dct ( la_out_list )
li_j = wa_ZIG_ZAG_POSITIONS [ li_i ]
la_out_list [ li_j ] = li_tmp * wa_IQUANT_TAB [ li_j ]
li_i += 1
#######################################################################
f_bitreader.read ( li_stream_len )
# sherlock logger
# l_log.debug ( "<<" )
return inverse_dct ( la_out_list )
# -------------------------------------------------------------------------------------------------
# get_gob
# -------------------------------------------------------------------------------------------------
def get_gob ( f_bitreader, f_picture, f_slicenr, f_width ):
"""
read a group of blocks.
does not return data, the picture parameter is modified in place instead.
"""
# sherlock logger
# l_log = logging.getLogger ( "get_gob" )
# l_log.setLevel ( w_logLvl )
# l_log.debug ( ">>" )
# the first gob has a special header
if ( f_slicenr > 0 ):
f_bitreader.align ()
li_gobsc = f_bitreader.read ( 22 )
if ( 0b0000000000000000111111 == li_gobsc ):
print "weeeee"
# sherlock logger
# l_log.debug ( "<< (E01)" )
return False
elif ( not ( li_gobsc & 0b0000000000000000100000 ) or # 0x20
( li_gobsc & 0b1111111111111111000000 )): # 0x3fffc0
print "Got wrong GOBSC, aborting.", bin ( li_gobsc )
# sherlock logger
# l_log.debug ( "<< (E02)" )
return False
_ = f_bitreader.read ( 5 )
li_offset = f_slicenr * 16 * f_width
for li_i in xrange ( f_width / 16 ):
get_mb ( f_bitreader, f_picture, f_width, li_offset + 16 * li_i )
# sherlock logger
# l_log.debug ( "<<" )
# -------------------------------------------------------------------------------------------------
# get_mb
# -------------------------------------------------------------------------------------------------
def get_mb ( f_bitreader, f_picture, f_width, f_offset ):
"""
get macro block.
does not return data but modifies the picture parameter in place.
"""
# sherlock logger
# l_log = logging.getLogger ( "get_mb" )
# l_log.setLevel ( w_logLvl )
# l_log.debug ( ">>" )
li_mbc = f_bitreader.read ( 1 )
if ( 0 == li_mbc ):
li_mbdesc = f_bitreader.read ( 8 )
# assert ( li_mbdesc >> 7 & 1 )
if ( li_mbdesc >> 6 & 1 ):
li_mbdiff = f_bitreader.read ( 2 )
li_y = get_block ( f_bitreader, li_mbdesc & 1 )
li_y.extend ( get_block ( f_bitreader, li_mbdesc >> 1 & 1 ))
li_y.extend ( get_block ( f_bitreader, li_mbdesc >> 2 & 1 ))
li_y.extend ( get_block ( f_bitreader, li_mbdesc >> 3 & 1 ))
la_cb = get_block ( f_bitreader, li_mbdesc >> 4 & 1 )
la_cr = get_block ( f_bitreader, li_mbdesc >> 5 & 1 )
# ycbcr to rgb
for li_i in xrange ( 256 ):
li_j = wa_SCALE_TAB [ li_i ]
li_Y = li_y [ li_i ] - 16
li_B = la_cb [ li_j ] - 128
li_R = la_cr [ li_j ] - 128
li_r = ( 298 * li_Y + 409 * li_R + 128 ) >> 8
li_g = ( 298 * li_Y - 100 * li_B - 208 * li_R + 128 ) >> 8
li_b = ( 298 * li_Y + 516 * li_B + 128 ) >> 8
li_r = 0 if ( li_r < 0 ) else li_r
li_r = 255 if ( li_r > 255 ) else li_r
li_g = 0 if ( li_g < 0 ) else li_g
li_g = 255 if ( li_g > 255 ) else li_g
li_b = 0 if ( li_b < 0 ) else li_b
li_b = 255 if ( li_b > 255 ) else li_b
# re-order the pixels
li_row = wa_MB_ROW_MAP [ li_i ]
li_col = wa_MB_COL_MAP [ li_i ]
f_picture [ f_offset + li_row * f_width + li_col ] = ''.join ((chr ( li_r ), chr ( li_g ), chr ( li_b )))
else:
print "mbc was not zero"
# sherlock logger
# l_log.debug ( "<<" )
# -------------------------------------------------------------------------------------------------
# get_pheader
# -------------------------------------------------------------------------------------------------
def get_pheader ( f_bitreader ):
"""
read the picture header.
returns the width and height of the image.
"""
# sherlock logger
# l_log = logging.getLogger ( "get_pheader" )
# l_log.setLevel ( w_logLvl )
# l_log.debug ( ">>" )
f_bitreader.align ()
li_psc = f_bitreader.read ( 22 )
# assert ( 0b0000000000000000100000 == li_psc )
li_pformat = f_bitreader.read ( 2 )
# assert ( 0b00 != li_pformat )
if ( 1 == li_pformat ):
# CIF
li_width, li_height = 88, 72
else:
# VGA
li_width, li_height = 160, 120
li_presolution = f_bitreader.read ( 3 )
# assert ( 0b000 != li_presolution )
# double resolution presolution - 1 times
li_width = li_width << ( li_presolution - 1 )
li_height = li_height << ( li_presolution - 1 )
# l_log.debug ( "width: %d / height: %d" % ( li_width, li_height ))
li_ptype = f_bitreader.read ( 3 )
li_pquant = f_bitreader.read ( 5 )
li_pframe = f_bitreader.read ( 32 )
# sherlock logger
# l_log.debug ( "<<" )
return li_width, li_height
# -------------------------------------------------------------------------------------------------
# inverse_dct
# -------------------------------------------------------------------------------------------------
def inverse_dct ( f_block ):
"""
inverse discrete cosine transform.
"""
# sherlock logger
# l_log = logging.getLogger ( "inverse_dct" )
# l_log.setLevel ( w_logLvl )
# l_log.debug ( ">>" )
la_workspace = wa_ZEROS [ 0:64 ]
la_data = wa_ZEROS [ 0:64 ]
for li_pointer in xrange ( 8 ):
if (( 0 == f_block [ li_pointer + 8 ] ) and ( 0 == f_block [ li_pointer + 16 ] ) and
( 0 == f_block [ li_pointer + 24 ] ) and ( 0 == f_block [ li_pointer + 32 ] ) and
( 0 == f_block [ li_pointer + 40 ] ) and ( 0 == f_block [ li_pointer + 48 ] ) and
( 0 == f_block [ li_pointer + 56 ] )):
li_dcval = f_block [ li_pointer ] << wi_PASS1_BITS
for li_i in xrange ( 8 ):
la_workspace [ li_pointer + li_i * 8 ] = li_dcval
continue
li_z2 = f_block [ li_pointer + 16 ]
li_z3 = f_block [ li_pointer + 48 ]
li_z1 = ( li_z2 + li_z3 ) * wi_FIX_0_541196100
li_tmp2 = li_z1 + li_z3 * -wi_FIX_1_847759065
li_tmp3 = li_z1 + li_z2 * wi_FIX_0_765366865
li_z2 = f_block [ li_pointer ]
li_z3 = f_block [ li_pointer + 32 ]
li_tmp0 = ( li_z2 + li_z3 ) << wi_CONST_BITS
li_tmp1 = ( li_z2 - li_z3 ) << wi_CONST_BITS
li_tmp10 = li_tmp0 + li_tmp3
li_tmp13 = li_tmp0 - li_tmp3
li_tmp11 = li_tmp1 + li_tmp2
li_tmp12 = li_tmp1 - li_tmp2
li_tmp0 = f_block [ li_pointer + 56 ]
li_tmp1 = f_block [ li_pointer + 40 ]
li_tmp2 = f_block [ li_pointer + 24 ]
li_tmp3 = f_block [ li_pointer + 8 ]
li_z1 = li_tmp0 + li_tmp3
li_z2 = li_tmp1 + li_tmp2
li_z3 = li_tmp0 + li_tmp2
li_z4 = li_tmp1 + li_tmp3
li_z5 = ( li_z3 + li_z4 ) * wi_FIX_1_175875602
li_tmp0 *= wi_FIX_0_298631336
li_tmp1 *= wi_FIX_2_053119869
li_tmp2 *= wi_FIX_3_072711026
li_tmp3 *= wi_FIX_1_501321110
li_z1 *= -wi_FIX_0_899976223
li_z2 *= -wi_FIX_2_562915447
li_z3 *= -wi_FIX_1_961570560
li_z4 *= -wi_FIX_0_390180644
li_z3 += li_z5
li_z4 += li_z5
li_tmp0 += li_z1 + li_z3
li_tmp1 += li_z2 + li_z4
li_tmp2 += li_z2 + li_z3
li_tmp3 += li_z1 + li_z4
la_workspace [ li_pointer + 0 ] = (( li_tmp10 + li_tmp3 + ( 1 << wi_F1 )) >> wi_F2 )
la_workspace [ li_pointer + 56 ] = (( li_tmp10 - li_tmp3 + ( 1 << wi_F1 )) >> wi_F2 )
la_workspace [ li_pointer + 8 ] = (( li_tmp11 + li_tmp2 + ( 1 << wi_F1 )) >> wi_F2 )
la_workspace [ li_pointer + 48 ] = (( li_tmp11 - li_tmp2 + ( 1 << wi_F1 )) >> wi_F2 )
la_workspace [ li_pointer + 16 ] = (( li_tmp12 + li_tmp1 + ( 1 << wi_F1 )) >> wi_F2 )
la_workspace [ li_pointer + 40 ] = (( li_tmp12 - li_tmp1 + ( 1 << wi_F1 )) >> wi_F2 )
la_workspace [ li_pointer + 24 ] = (( li_tmp13 + li_tmp0 + ( 1 << wi_F1 )) >> wi_F2 )
la_workspace [ li_pointer + 32 ] = (( li_tmp13 - li_tmp0 + ( 1 << wi_F1 )) >> wi_F2 )
for li_pointer in xrange ( 0, 64, 8 ):
li_z2 = la_workspace [ li_pointer + 2 ]
li_z3 = la_workspace [ li_pointer + 6 ]
li_z1 = ( li_z2 + li_z3 ) * wi_FIX_0_541196100
li_tmp2 = li_z1 + li_z3 * -wi_FIX_1_847759065
li_tmp3 = li_z1 + li_z2 * wi_FIX_0_765366865
li_tmp0 = ( la_workspace [ li_pointer ] + la_workspace [ li_pointer + 4 ] ) << wi_CONST_BITS
li_tmp1 = ( la_workspace [ li_pointer ] - la_workspace [ li_pointer + 4 ] ) << wi_CONST_BITS
li_tmp10 = li_tmp0 + li_tmp3
li_tmp13 = li_tmp0 - li_tmp3
li_tmp11 = li_tmp1 + li_tmp2
li_tmp12 = li_tmp1 - li_tmp2
li_tmp0 = la_workspace [ li_pointer + 7 ]
li_tmp1 = la_workspace [ li_pointer + 5 ]
li_tmp2 = la_workspace [ li_pointer + 3 ]
li_tmp3 = la_workspace [ li_pointer + 1 ]
li_z1 = li_tmp0 + li_tmp3
li_z2 = li_tmp1 + li_tmp2
li_z3 = li_tmp0 + li_tmp2
li_z4 = li_tmp1 + li_tmp3
li_z5 = ( li_z3 + li_z4 ) * wi_FIX_1_175875602
li_tmp0 *= wi_FIX_0_298631336
li_tmp1 *= wi_FIX_2_053119869
li_tmp2 *= wi_FIX_3_072711026
li_tmp3 *= wi_FIX_1_501321110
li_z1 *= -wi_FIX_0_899976223
li_z2 *= -wi_FIX_2_562915447
li_z3 *= -wi_FIX_1_961570560
li_z4 *= -wi_FIX_0_390180644
li_z3 += li_z5
li_z4 += li_z5
li_tmp0 += li_z1 + li_z3
li_tmp1 += li_z2 + li_z4
li_tmp2 += li_z2 + li_z3
li_tmp3 += li_z1 + li_z4
la_data [ li_pointer + 0 ] = ( li_tmp10 + li_tmp3 ) >> wi_F3
la_data [ li_pointer + 7 ] = ( li_tmp10 - li_tmp3 ) >> wi_F3
la_data [ li_pointer + 1 ] = ( li_tmp11 + li_tmp2 ) >> wi_F3
la_data [ li_pointer + 6 ] = ( li_tmp11 - li_tmp2 ) >> wi_F3
la_data [ li_pointer + 2 ] = ( li_tmp12 + li_tmp1 ) >> wi_F3
la_data [ li_pointer + 5 ] = ( li_tmp12 - li_tmp1 ) >> wi_F3
la_data [ li_pointer + 3 ] = ( li_tmp13 + li_tmp0 ) >> wi_F3
la_data [ li_pointer + 4 ] = ( li_tmp13 - li_tmp0 ) >> wi_F3
# sherlock logger
# l_log.debug ( "<<" )
return la_data
# -------------------------------------------------------------------------------------------------
# read_picture
# -------------------------------------------------------------------------------------------------
def read_picture ( f_data ):
"""
convert an AR.Drone image packet to rgb-string.
@param f_data : image packet.
@return width, height, image and time to decode the image.
"""
# sherlock logger
# l_log = logging.getLogger ( "read_picture" )
# l_log.setLevel ( w_logLvl )
# l_log.debug ( ">>" )
f_bitreader = bitReader.bitReader ( f_data )
ll_ti = datetime.datetime.now ()
li_width, li_height = get_pheader ( f_bitreader )
li_slices = li_height / 16
li_blocks = li_width / 16
lai_image = [ 0 for li_i in xrange ( li_width * li_height ) ]
for li_i in xrange ( 0, li_slices ):
get_gob ( f_bitreader, lai_image, li_i, li_width )
f_bitreader.align ()
li_eos = f_bitreader.read ( 22 )
# assert ( 0b0000000000000000111111 == li_eos )
ll_tf = datetime.datetime.now ()
# sherlock logger
# l_log.debug ( "<<" )
return li_width, li_height, ''.join ( lai_image ), ( ll_tf - ll_ti ).microseconds / 1000000.
# -------------------------------------------------------------------------------------------------
#
# -------------------------------------------------------------------------------------------------
try:
psyco.bind ( bitReader )
psyco.bind ( get_block )
psyco.bind ( get_gob )
psyco.bind ( get_mb )
psyco.bind ( inverse_dct )
psyco.bind ( read_picture )
except NameError:
pass
# print "Unable to bind video decoding methods with psyco. Proceeding anyways, but video decoding will be slow!"
# -------------------------------------------------------------------------------------------------
# main
# -------------------------------------------------------------------------------------------------
def main ():
l_fh = open ( "framewireshark.raw", 'r' )
# l_fh = open ( "videoframe.raw", 'r' )
l_data = l_fh.read ()
l_fh.close ()
li_runs = 20
li_t = 0
for li_i in xrange ( li_runs ):
li_width, li_height, lai_image, li_ti = read_picture ( l_data )
#show_image ( lai_image, li_width, li_height )
li_t += li_ti
print
print "avg time:\t", li_t / li_runs, "sec"
print "avg fps.:\t", 1 / ( li_t / li_runs ), "fps"
if ( "image" in sys.argv ):
import pygame
pygame.init ()
li_W, li_H = 320, 240
l_screen = pygame.display.set_mode (( li_W, li_H ))
l_surface = pygame.image.fromstring ( lai_image, ( li_width, li_height ), "RGB" )
screen.blit ( l_surface, ( 0, 0 ))
pygame.display.flip ()
raw_input ()
# -------------------------------------------------------------------------------------------------
# the bootstrap process
# -------------------------------------------------------------------------------------------------
if ( "__main__" == __name__ ):
if ( "profile" in sys.argv ):
cProfile.run ( "main ()" )
else:
main ()
# < the end >-------------------------------------------------------------------------------------- #
|
projeto-si-lansab/si-lansab
|
ARDrone/arVideo.py
|
Python
|
gpl-2.0
| 27,307
|
import logging
from abc import (ABCMeta,
abstractmethod)
from treeherder.model.models import MatcherManager
logger = logging.getLogger(__name__)
class Detector(object):
__metaclass__ = ABCMeta
name = None
"""Class that is called with a list of lines that correspond to
unmatched, intermittent, failures from a specific job and that
returns the indicies of the subset of that list that should be
added as new targets for failure classification."""
def __init__(self, db_object):
self.db_object = db_object
@abstractmethod
def __call__(self, failure_lines):
pass
class TestFailureDetector(Detector):
def __call__(self, text_log_errors):
rv = []
with_failure_lines = [(i, item) for (i, item) in enumerate(text_log_errors)
if item.metadata and item.metadata.failure_line]
for i, text_log_error in with_failure_lines:
failure = text_log_error.metadata.failure_line
if (failure.action == "test_result" and failure.test and failure.status and
failure.expected):
rv.append(i)
return rv
class ManualDetector(Detector):
"""Small hack; this ensures that there's a matcher object indicating that a match
was by manual association, but which never automatically matches any lines"""
def __call__(self, text_log_errors):
return []
def register():
for obj in [ManualDetector, TestFailureDetector]:
MatcherManager.register_detector(obj)
|
MikeLing/treeherder
|
treeherder/autoclassify/detectors.py
|
Python
|
mpl-2.0
| 1,557
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = "python-gitlab",
version = "0.1",
packages = find_packages(),
install_requires = ['requests', 'markdown'],
# metadata for upload to PyPI
author = "Itxaka Serrano Garcia",
author_email = "itxakaserrano@gmail.com",
description = "See the README.md file for more information",
license = "GPL3",
keywords = "gitlab git wrapper",
url = "http://github.com/itxaka/python-gitlab/",
)
|
erikjwaxx/python-gitlab-1
|
setup.py
|
Python
|
gpl-3.0
| 502
|
#!/usr/bin/python3
from gi.repository import Gtk, GObject
from util import trackers
import singletons
import constants as c
import status
class PowerWidget(Gtk.Frame):
"""
PowerWidget is a child of InfoPanel, and is only shown if we're on
a system that can run on battery power. It is usually only visible
if the system is actually currently running on battery power.
"""
__gsignals__ = {
'power-state-changed': (GObject.SignalFlags.RUN_LAST, None, ()),
}
def __init__(self):
super(PowerWidget, self).__init__()
self.set_shadow_type(Gtk.ShadowType.NONE)
self.get_style_context().add_class("powerwidget")
self.path_widget_pairs = []
self.box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
self.add(self.box)
self.box.show_all()
self.power_client = singletons.UPowerClient
self.battery_critical = False
trackers.con_tracker_get().connect(self.power_client,
"power-state-changed",
self.on_power_state_changed)
trackers.con_tracker_get().connect(self.power_client,
"percentage-changed",
self.on_percentage_changed)
self.power_client.rescan_devices()
self.on_power_state_changed(self.power_client)
def refresh(self):
self.on_power_state_changed(self.power_client)
def on_power_state_changed(self, client):
for widget in self.box.get_children():
widget.destroy()
self.path_widget_pairs = []
self.battery_critical = False
self.construct_icons()
self.emit("power-state-changed")
def on_percentage_changed(self, client, battery):
battery_path = battery.get_object_path()
for path, widget in self.path_widget_pairs:
if path == battery_path:
self.update_battery_tooltip(widget, battery)
break
def construct_icons(self):
"""
The upower dbus interface actually tells us what icon name to use.
"""
batteries = self.power_client.get_batteries()
for path, battery in batteries:
if status.Debug:
print("powerWidget: Updating battery info: %s - icon: %s - percentage: %s" %
(path, battery.get_property("icon-name"), battery.get_property("percentage")))
image = Gtk.Image.new_from_icon_name(battery.get_property("icon-name"), Gtk.IconSize.LARGE_TOOLBAR)
self.update_battery_tooltip(image, battery)
self.box.pack_start(image, False, False, 4)
self.path_widget_pairs.append((path, image))
self._should_show = True
self.box.show_all()
def update_battery_tooltip(self, widget, battery):
text = ""
try:
pct = int(battery.get_property("percentage"))
if pct > 0:
text = _("%d%%" % pct)
if pct < c.BATTERY_CRITICAL_PERCENT:
self.battery_critical = True
except Exception as e:
pass
widget.set_tooltip_text(text)
def should_show(self):
return not self.power_client.full_and_on_ac_or_no_batteries()
|
leigh123linux/cinnamon-screensaver
|
src/widgets/powerWidget.py
|
Python
|
gpl-2.0
| 3,343
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Executes a command."""
import argparse
import os
import subprocess
import sys
def check_with_log(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
print >>sys.stderr, subprocess.check_call(cmd)
def check_no_log(*cmd):
"""Run the command, raising on errors, no logs"""
try:
subprocess.check_call(cmd)
except:
raise subprocess.CalledProcessError(cmd='subprocess.check_call', returncode=1)
def check_output(*cmd):
"""Log and run the command, return output, raising on errors."""
print >>sys.stderr, 'Run:', cmd
return subprocess.check_output(cmd)
def main(target, buildfile):
"""Build & push to canary."""
check_with_log(
'docker', 'build', '-t', target, '--no-cache=true',
'--pull=true', '--file=%s' % buildfile, '.'
)
check_with_log('docker', 'inspect', target)
email = os.environ.get('DOCKER_EMAIL')
user = os.environ.get('DOCKER_USER')
pwd = os.environ.get('DOCKER_PASSWORD')
if check_output('docker', 'version', '--format=\'{{.Client.Version}}\'').startswith('1.9'):
print >>sys.stderr, 'Docker 1.9, use --email'
email = '--email=not@val.id'
print >>sys.stderr, 'Logging in as %r' % user
check_no_log('docker', 'login', email or '', '--username=%s' % user, '--password=%s' % pwd)
os.environ.pop('DOCKER_EMAIL', None)
os.environ.pop('DOCKER_USER', None)
os.environ.pop('DOCKER_PASSWORD', None)
check_with_log('docker', 'push', target)
check_with_log('docker', 'logout')
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--owner', help='Owner of the job')
PARSER.add_argument(
'--target', help='Build target')
PARSER.add_argument(
'--file', help='Build files')
ARGS = PARSER.parse_args()
if not ARGS.target or not ARGS.file:
raise ValueError('--target and --file must be set!')
if ARGS.owner:
os.environ['OWNER'] = ARGS.owner
main(ARGS.target, ARGS.file)
|
mikedanese/test-infra
|
scenarios/canarypush.py
|
Python
|
apache-2.0
| 2,752
|
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main process for training classifiers."""
# Preconfigure before starting main worker process.
# This step should be performed before importing any of the
# third party libraries.
import logging
import sys
import time
import vm_config
vm_config.configure()
# pylint: disable=wrong-import-position
from core.domain import job_services
import vmconf
def main():
"""Main process of VM."""
try:
job_data = job_services.get_next_job()
if not job_data:
logging.info('No pending job requests.')
return
classifier_data = job_services.train_classifier(
job_data['algorithm_id'], job_data['training_data'])
status = job_services.store_job_result(
job_data['job_id'], classifier_data)
if status != 200:
logging.warning(
'Failed to store result of the job with \'%s\' job_id',
job_data['job_id'])
return
except KeyboardInterrupt:
logging.info('Exiting')
sys.exit(0)
except Exception as e: # pylint: disable=broad-except
# Log any exceptions that arises during processing of job.
logging.error(e.message)
finally:
if vmconf.DEFAULT_WAITING_METHOD == vmconf.FIXED_TIME_WAITING:
time.sleep(vmconf.FIXED_TIME_WAITING_PERIOD)
if __name__ == '__main__':
while True:
main()
|
prasanna08/oppia-ml
|
main.py
|
Python
|
apache-2.0
| 1,996
|
#!/usr/bin/env python
"""Spout tests"""
import time
import pytest
import python_pachyderm
from python_pachyderm.service import pps_proto, pfs_proto
def test_create_spout():
client = python_pachyderm.Client()
client.delete_all()
client.create_pipeline(
pipeline_name="pipeline-create-spout",
transform=pps_proto.Transform(
cmd=["sh"],
image="alpine",
),
spout=pps_proto.Spout(),
)
assert len(list(client.list_pipeline())) == 1
@pytest.mark.timeout(45)
def test_spout_commit():
client = python_pachyderm.Client()
client.delete_all()
client.create_pipeline(
pipeline_name="pipeline-spout-commit",
transform=pps_proto.Transform(
cmd=["bash"],
stdin=[
"echo 'commit time' >> file.txt",
"pachctl put file pipeline-spout-commit@master:/file.txt -f file.txt",
],
),
spout=pps_proto.Spout(),
)
c = client.subscribe_commit(
repo_name="pipeline-spout-commit",
branch="master",
state=pfs_proto.FINISHED,
origin_kind=pfs_proto.USER,
)
next(c)
commit_infos = list(client.list_commit("pipeline-spout-commit"))
assert len(commit_infos) == 1
|
kalugny/pypachy
|
tests/test_spout.py
|
Python
|
mit
| 1,279
|
from django.db import models
from django.core.urlresolvers import reverse
class TextBit(models.Model):
name = models.CharField(max_length=100, primary_key=True)
content = models.TextField()
def get_update_url(self):
return reverse('textbits:update', args=[self.pk])
def __unicode__(self):
return self.name
class Title(models.Model):
field = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
@classmethod
def load(cls):
try:
return cls.objects.get()
except cls.DoesNotExist:
return cls()
def __unicode__(self):
return self.name
|
DArtagan/teetimer
|
extra/models.py
|
Python
|
mit
| 674
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
================================================================================
mingus - Music theory Python package, midi package
Copyright (C) 2008-2009, Bart Spaans
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
================================================================================
"""
from Sequencer import Sequencer
from SequencerObserver import SequencerObserver
__all__ = [
'Sequencer',
'SequencerObserver',
'MidiFileIn',
'MidiFileOut',
'MidiTrack',
'fluidsynth',
]
|
spiderbit/canta-ng
|
mingus/midi/__init__.py
|
Python
|
gpl-3.0
| 1,177
|
# Copyright (C) 2012 David Morton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from scipy.cluster.vq import kmeans, vq
import numpy
from spikepy.developer.methods import ClusteringMethod
from spikepy.common.valid_types import ValidInteger, ValidOption
class ClusteringKMeans(ClusteringMethod):
'''
This class implements a k-means clustering method.
'''
name = 'K-means'
description = 'K-means clustering algorithm with random initial centroids.'
is_stochastic = True
restarts = ValidInteger(1, 10000, default=10)
choices = ['Use BIC'] + map(str, range(1, 21))
number_of_clusters = ValidOption(*choices, default='3', description=
'The number of clusters, or estimate the number of clusters via minimizing the Baysian Information Criterion.')
def run(self, features, number_of_clusters='3', restarts=10):
if number_of_clusters != 'Use BIC':
k = int(number_of_clusters)
if k == 1:
result = numpy.zeros(len(features), dtype=numpy.int32)
return [result]
return [vq(features, kmeans(features, k, iter=restarts)[0])[0]]
else:
return [vq(features, bic_kmeans(features, iter=restarts)[0])[0]]
def bic_kmeans(features, **kwargs):
'''
Run kmeans on features with **kwargs given to scipy.cluster.vq.kmeans for
different numbers of clusters, k. Choose, finally, the clustering that
minimizes the Beysian Information Criterion or BIC.
'''
max_k = int(2*numpy.log(len(features)))
base_distances = vq(features,
numpy.array([numpy.average(features, axis=0)]))[1]
base_std = numpy.std(base_distances)
centers_list = []
bic_list = []
distances_list = []
for k in range(1, max_k+1):
centers = kmeans(features, k, **kwargs)[0]
clusters, distances = vq(features, centers)
bic = calculate_bic(clusters, distances, base_std)
centers_list.append(centers)
distances_list.append(distances)
bic_list.append(bic)
best_index = numpy.argmin(bic_list)
return centers_list[best_index], distances_list[best_index]
def calculate_bic(clusters, distances, base_std):
'''
Calculates the most naive form of the BIC given the clusters (codebook) and
the distances from the cluster centroids.
'''
cluster_ids = numpy.unique(clusters)
variance = numpy.average(distances)
first_term = len(clusters)*numpy.log(variance/base_std)
k = len(cluster_ids)
second_term = k*numpy.log(len(clusters))
print 'k', k
print '1', first_term
print '2', second_term
return first_term + second_term
def get_clustered_data(clusters, data, cluster_id):
'''
Return the data which is part of cluster with cluster_id provided.
Inputs:
clusters: 1-d numpy array of integers
data: n-d numpy array who's first dimension (len) is equal to
len(clusters)
cluster_id: The particular cluster_id you want data for.
'''
return numpy.take(data, numpy.nonzero(clusters==cluster_id))
|
davidlmorton/spikepy
|
spikepy/builtins/methods/clustering_k_means/__init__.py
|
Python
|
gpl-3.0
| 3,719
|
from __future__ import unicode_literals
import sys
import io
from contextlib import contextmanager
import unittest
from mcinfo import cli
@contextmanager
def redirect_stdout_stdin(new_stdout, new_stdin):
old_stdout, sys.stdout = sys.stdout, new_stdout
old_stdin, sys.stdin = sys.stdin, new_stdin
try:
yield new_stdout, new_stdin
finally:
sys.stdout = old_stdout
sys.stdin = old_stdin
@contextmanager
def redirect_stdout(new_stdout):
old_stdout, sys.stdout = sys.stdout, new_stdout
try:
yield new_stdout
finally:
sys.stdout = old_stdout
class TestCLI(unittest.TestCase):
def test_handle_req_normal(self):
req = "test"
expected_out = 'Warning: This is test data! It is not at all useful ' \
'for you.\nRecipes:\n'
self.assertEqual(cli.handle_req(req), expected_out)
def test_handle_req_nbt(self):
req = "nbt:test"
expected_out = '{ } example nbt structure\n [All tags from 3]\n '\
' [All tags from other]\n [All tags from ' \
'things]\n a_byte: B A byte. This one has 256 ' \
'possible values.\n a_string: txt A string! what ' \
'did you expect, the spanish inquisition?\n list: ' \
'[ ] example list\n txt the string.'
self.assertEqual(cli.handle_req(req), expected_out)
def test_main_with_args(self):
expected_out = 'Warning: This is test data! It is not at all useful ' \
'for you.\nRecipes:\n'
args = ["test"]
new_stdout = io.StringIO()
with redirect_stdout(new_stdout):
cli.main(args)
new_stdout.seek(0)
out = new_stdout.read()
self.assertEqual(out, expected_out)
def test_main_interactive(self):
new_stdin = io.StringIO("test\nexit\n")
new_stdout = io.StringIO()
with redirect_stdout_stdin(new_stdout, new_stdin):
cli.main([])
new_stdout.seek(0)
out = new_stdout.read()
expected_out = "> Warning: This is test data! It is not at all " \
"useful for you.\nRecipes:\n\n> "
self.assertEqual(out, expected_out)
|
randomdude999/mcinfo
|
tests/test_cli.py
|
Python
|
mit
| 2,303
|
import soundcloud
class Player(object):
def __init__(self):
self.client = soundcloud.Client(client_id="REMOVED FOR SECURITY PURPOSES")
self.user_id = "REMOVED"
def get_url(self, resource):
"""Grabs the permalink for whatever SoundCloud resource you'd like.
For example, for the latest track, choose the '/tracks' resource.
See developers.soundcloud.com/docs/api/reference for more info."""
info = self.client.get(resource, user_id=self.user_id)
url = info[0].obj['permalink_url']
return url
def player(self, track_url, width="325px", height=None):
"""The actual Soundcloud player"""
embed_info = self.client.get('/oembed',
url=track_url,
maxwidth=width,
maxheight=height)
return embed_info.obj['html']
|
Acour83/ashflashtheorig
|
media.py
|
Python
|
mit
| 964
|
# -*- coding: utf-8 -*-
try:
import BaseHTTPServer as server
except ImportError:
import http.server as server
try:
from urlparse import parse_qs
except ImportError:
from urllib.parse import parse_qs
import json
import pyfttt
# https://wiki.python.org/moin/BaseHttpServer
# https://docs.python.org/2/library/basehttpserver.html
# http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
# make a key with hashlib.sha1(b"hello" + b"world").hexdigest() (should have something unique in there)
# https://docs.python.org/2/library/basehttpserver.html
# TODO: get charset from browser instead of assuming utf-8?
# TODO: handle HEAD (get response, but no body)
# Parse out the url: z=urllib.parse.urlparse('http://blah.com/api/1.0/somekey/blah?woop=21&blah=12')
# parse the query part urllib.parse.parse_qs(z.query)
VALID_KEYS = ['6adfb183a4a2c94a2f92dab5ade762a47889a5a1', 'KEY']
class basic_handler(server.BaseHTTPRequestHandler):
def parse_path(self):
""" parse the path, setting api_key, api_version, etc."""
print("PATH IS [{}]".format(self.path))
path = self.path[1:].split('/')
self.valid = True
if len(path) < 4:
self.send_error(400, message='Malformed path')
self.valid = False
return
pass
if path[0] != 'api':
self.send_error(404)
self.valid = False
return
try:
self.api_version = float(path[1])
except ValueError:
self.send_error(404, message='Unsupported API version')
self.api_version = None
self.valid = False
return
if self.api_version not in [1.0]:
self.send_error(404, message='Unsupported API version')
self.valid = False
return
self.api_key = path[2]
if self.api_key not in VALID_KEYS:
self.send_error(403)
self.valid = False
return
self.api_command = path[3]
if self.api_command not in ['test', 'blah']:
self.send_error(400, message="Unknown command '{}'".format(self.api_command))
self.valid = False
return
self.api_arguments = path[4:]
def do_GET(self):
return self.do_GETPOST()
def do_POST(self):
return self.do_GETPOST()
def do_GETPOST(self):
self.server_version = 'pyfttt/{}'.format(pyfttt.__version__)
self.parse_path()
if not self.valid:
return
#print("COMMAND: {}".format(self.api_command))
#print("OPTIONS: {}".format(self.api_arguments))
if self.command == 'POST':
data_length = int(self.headers['Content-Length'])
# Payload must be under 1k
if data_length > 1000:
self.send_error(413)
return
data_raw = self.rfile.read(data_length)
if self.headers['Content-Type'] == 'application/json':
data_parsed = json.loads(bytes.decode(data_raw))
elif self.headers['Content-Type'] == 'application/x-www-form-urlencoded':
data_parsed = parse_qs(data_raw)
else:
self.send_error(400, message="Unsupported Content Type '{}'".format(self.headers['Content-Type']))
return
#print("\tGot Data: [{}]".format(data_parsed))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
stuff = {'brian': 35, 'lori': 35, 'address': {'house': 2351, 'street': 'fairview ave e'}}
self.wfile.write(json.dumps(stuff, indent=4).encode('UTF-8'))
def run_server(host='', port=7777, handler=basic_handler):
httpd = server.HTTPServer(server_address=(host,port),
RequestHandlerClass=handler)
httpd.serve_forever()
run_server(port=7777, handler=basic_handler)
# print("Got a POST request")
# print("\tClient Address: {}".format(self.client_address))
# print("\tCommand: {}".format(self.command))
# print("\tPath: {}".format(self.path.split('/')))
# print("\tRequest Version: {}".format(self.request_version))
# print("\tHeaders: {}".format(self.headers))
# print("\t\tContent-Length: {}".format(int(self.headers['Content-Length'])))
# print("\tServer Version: {}".format(self.server_version))
|
Haynie-Research-and-Development/jarvis
|
deps/lib/python3.4/site-packages/pyfttt/server.py
|
Python
|
gpl-2.0
| 4,646
|
# -*- coding: utf-8 -*-
# YAFF is yet another force-field code.
# Copyright (C) 2011 Toon Verstraelen <Toon.Verstraelen@UGent.be>,
# Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of YAFF.
#
# YAFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# YAFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import division
import numpy as np
import h5py as h5
import pkg_resources
from yaff import *
from yaff.conversion.gaussian import _scan_g09_forces, _scan_g09_time, \
_scan_g09_pos_vel, _scan_to_line
def test_scan_forces():
fn_log = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.log')
with open(fn_log) as f:
numbers, frc = _scan_g09_forces(f)
assert numbers[0] == 14
assert numbers[1] == 8
assert numbers[-1] == 1
assert len(numbers) == 9
assert frc[0,0] == 0.000014646
assert frc[1,-1] == 0.005043566
assert frc[-1,1] == 0.002557226
assert frc.shape == (9, 3)
def test_scan_time():
fn_log = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.log')
with open(fn_log) as f:
time, step, ekin, epot, etot = _scan_g09_time(f)
assert time == 0.0
assert step == 2
assert ekin == 0.0306188
assert epot == -592.9048374
assert etot == -592.8742186
time, step, ekin, epot, etot = _scan_g09_time(f)
assert time == 1.125278*femtosecond
assert step == 3
assert ekin == 0.0244215
assert epot == -592.8986401
assert etot == -592.8742186
def test_scan_pos_vel():
vel_unit = np.sqrt(amu)/second
fn_log = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.log')
with open(fn_log) as f:
_scan_to_line(f, " Cartesian coordinates: (bohr)") # skip first one, has different format
pos, vel = _scan_g09_pos_vel(f)
assert pos[0,0] == -1.287811626725E-02
assert pos[-1,-1] == 2.710579145562E+00
assert pos.shape == (9, 3)
assert vel[1, 0] == 5.750552889614E+13*vel_unit
assert vel[-2, 2] == 1.741570818851E+13*vel_unit
assert vel.shape == (9, 3)
def test_to_hdf():
vel_unit = np.sqrt(amu)/second
fn_xyz = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.xyz')
fn_log = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.log')
with h5.File('yaff.conversion.test.test_gaussian.test_to_hdf5.h5', driver='core', backing_store=False) as f:
system = System.from_file(fn_xyz)
system.to_hdf5(f)
# Actual trajectory conversion, twice
for i in range(2):
offset = 2*i
g09log_to_hdf5(f, fn_log)
assert 'trajectory' in f
assert get_last_trajectory_row(f['trajectory']) == 2+offset
assert 'pos' in f['trajectory']
assert f['trajectory/pos'].shape == (2+offset, 9, 3)
assert f['trajectory/pos'][offset,0,0] == -1.287811626725E-02
assert f['trajectory/pos'][-1,-1,-1] == 2.710239686065E+00
assert 'vel' in f['trajectory']
assert f['trajectory/vel'].shape == (2+offset, 9, 3)
assert f['trajectory/vel'][offset,0,0] == -6.493457131863E+13*vel_unit
assert f['trajectory/vel'][-1,-1,-1] == 4.186482857132E+12*vel_unit
assert 'frc' in f['trajectory']
assert f['trajectory/frc'].shape == (2+offset, 9, 3)
assert f['trajectory/frc'][offset,0,0] == 0.002725302
assert f['trajectory/frc'][-1,-1,-1] == 0.008263482
assert 'time' in f['trajectory']
assert f['trajectory/time'].shape == (2+offset, 1)
assert f['trajectory/time'][offset] == 0.0
assert f['trajectory/time'][-1] == 1.125278*femtosecond
assert 'step' in f['trajectory']
assert f['trajectory/step'].shape == (2+offset, 1)
assert f['trajectory/step'][offset] == 2
assert f['trajectory/step'][-1] == 3
assert 'epot' in f['trajectory']
assert f['trajectory/epot'].shape == (2+offset, 1)
assert f['trajectory/epot'][offset] == -592.9048374
assert f['trajectory/epot'][-1] == -592.8986401
assert 'ekin' in f['trajectory']
assert f['trajectory/ekin'].shape == (2+offset, 1)
assert f['trajectory/ekin'][offset] == 0.0306188
assert f['trajectory/ekin'][-1] == 0.0244215
assert 'etot' in f['trajectory']
assert f['trajectory/etot'].shape == (2+offset, 1)
assert f['trajectory/etot'][offset] == -592.8742186
assert f['trajectory/etot'][-1] == -592.8742186
f.close()
|
molmod/yaff
|
yaff/conversion/test/test_gaussian.py
|
Python
|
gpl-3.0
| 5,408
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'This is a DMARC report parser that accepts either an XML or zipped file as input at an attachment via email',
'author': 'Elmer Thomas',
'url': '',
'download_url': '',
'author_email': 'elmer.thomas@sendgrid.com',
'version': '0.1',
'install_requires': ['nose', 'Flask', 'Flask-SQLAlchemy', 'Jinja2', 'Werkzeug', 'distribute', 'wsgiref', 'mysql-python', 'requests', 'simplejson', 'configobj'],
'packages': ['dmarc_parser'],
'scripts': [],
'name': 'DMARC Parser'
}
setup(**config)
|
thinkingserious/sendgrid-python-dmarc-parser
|
setup.py
|
Python
|
mit
| 608
|
__all__ = ('ForwardRefPolicy', 'TypeHintWarning', 'typechecked', 'check_return_type',
'check_argument_types', 'check_type', 'TypeWarning', 'TypeChecker',
'typeguard_ignore')
import collections.abc
import gc
import inspect
import sys
import threading
from collections import OrderedDict
from enum import Enum
from functools import wraps, partial
from inspect import Parameter, isclass, isfunction, isgeneratorfunction
from io import TextIOBase, RawIOBase, IOBase, BufferedIOBase
from traceback import extract_stack, print_stack
from types import CodeType, FunctionType
from typing import (
Callable, Any, Union, Dict, List, TypeVar, Tuple, Set, Sequence, get_type_hints, TextIO,
Optional, IO, BinaryIO, Type, Generator, overload, Iterable, AsyncIterable, Iterator,
AsyncIterator, AbstractSet, TYPE_CHECKING)
from unittest.mock import Mock
from warnings import warn
from weakref import WeakKeyDictionary, WeakValueDictionary
# Python 3.8+
try:
from typing_extensions import Literal
except ImportError:
try:
from typing import Literal
except ImportError:
Literal = None
# Python 3.5.4+ / 3.6.2+
try:
from typing_extensions import NoReturn
except ImportError:
try:
from typing import NoReturn
except ImportError:
NoReturn = None
# Python 3.6+
try:
from inspect import isasyncgenfunction, isasyncgen
from typing import AsyncGenerator
except ImportError:
AsyncGenerator = None
def isasyncgen(obj):
return False
def isasyncgenfunction(func):
return False
# Python 3.8+
try:
from typing import ForwardRef
evaluate_forwardref = ForwardRef._evaluate
except ImportError:
from typing import _ForwardRef as ForwardRef
evaluate_forwardref = ForwardRef._eval_type
if TYPE_CHECKING:
_F = TypeVar("_F")
def typeguard_ignore(f: _F) -> _F:
"""This decorator is a noop during static type-checking."""
return f
else:
from typing import no_type_check as typeguard_ignore
_type_hints_map = WeakKeyDictionary() # type: Dict[FunctionType, Dict[str, Any]]
_functions_map = WeakValueDictionary() # type: Dict[CodeType, FunctionType]
_missing = object()
T_CallableOrType = TypeVar('T_CallableOrType', bound=Callable[..., Any])
class ForwardRefPolicy(Enum):
"""Defines how unresolved forward references are handled."""
ERROR = 1 #: propagate the :exc:`NameError` from :func:`~typing.get_type_hints`
WARN = 2 #: remove the annotation and emit a TypeHintWarning
#: replace the annotation with the argument's class if the qualified name matches, else remove
#: the annotation
GUESS = 3
class TypeHintWarning(UserWarning):
"""
A warning that is emitted when a type hint in string form could not be resolved to an actual
type.
"""
class _TypeCheckMemo:
__slots__ = 'globals', 'locals', 'typevars'
def __init__(self, globals: Dict[str, Any], locals: Dict[str, Any]):
self.globals = globals
self.locals = locals
self.typevars = {} # type: Dict[Any, type]
def _strip_annotation(annotation):
if isinstance(annotation, str):
return annotation.strip("'")
else:
return annotation
class _CallMemo(_TypeCheckMemo):
__slots__ = 'func', 'func_name', 'arguments', 'is_generator', 'type_hints'
def __init__(self, func: Callable, frame_locals: Optional[Dict[str, Any]] = None,
args: tuple = None, kwargs: Dict[str, Any] = None,
forward_refs_policy=ForwardRefPolicy.ERROR):
super().__init__(func.__globals__, frame_locals)
self.func = func
self.func_name = function_name(func)
self.is_generator = isgeneratorfunction(func)
signature = inspect.signature(func)
if args is not None and kwargs is not None:
self.arguments = signature.bind(*args, **kwargs).arguments
else:
assert frame_locals is not None, 'frame must be specified if args or kwargs is None'
self.arguments = frame_locals
self.type_hints = _type_hints_map.get(func)
if self.type_hints is None:
while True:
if sys.version_info < (3, 5, 3):
frame_locals = dict(frame_locals)
try:
hints = get_type_hints(func, localns=frame_locals)
except NameError as exc:
if forward_refs_policy is ForwardRefPolicy.ERROR:
raise
typename = str(exc).split("'", 2)[1]
for param in signature.parameters.values():
if _strip_annotation(param.annotation) == typename:
break
else:
raise
func_name = function_name(func)
if forward_refs_policy is ForwardRefPolicy.GUESS:
if param.name in self.arguments:
argtype = self.arguments[param.name].__class__
stripped = _strip_annotation(param.annotation)
if stripped == argtype.__qualname__:
func.__annotations__[param.name] = argtype
msg = ('Replaced forward declaration {!r} in {} with {!r}'
.format(stripped, func_name, argtype))
warn(TypeHintWarning(msg))
continue
msg = 'Could not resolve type hint {!r} on {}: {}'.format(
param.annotation, function_name(func), exc)
warn(TypeHintWarning(msg))
del func.__annotations__[param.name]
else:
break
self.type_hints = OrderedDict()
for name, parameter in signature.parameters.items():
if name in hints:
annotated_type = hints[name]
# PEP 428 discourages it by MyPy does not complain
if parameter.default is None:
annotated_type = Optional[annotated_type]
if parameter.kind == Parameter.VAR_POSITIONAL:
self.type_hints[name] = Tuple[annotated_type, ...]
elif parameter.kind == Parameter.VAR_KEYWORD:
self.type_hints[name] = Dict[str, annotated_type]
else:
self.type_hints[name] = annotated_type
if 'return' in hints:
self.type_hints['return'] = hints['return']
_type_hints_map[func] = self.type_hints
def resolve_forwardref(maybe_ref, memo: _TypeCheckMemo):
if isinstance(maybe_ref, ForwardRef):
if sys.version_info < (3, 9, 0):
return evaluate_forwardref(maybe_ref, memo.globals, memo.locals)
else:
return evaluate_forwardref(maybe_ref, memo.globals, memo.locals, frozenset())
else:
return maybe_ref
def get_type_name(type_):
# typing.* types don't have a __name__ on Python 3.7+
return getattr(type_, '__name__', None) or getattr(type_, '_name', None) or str(type_)
def find_function(frame) -> Optional[Callable]:
"""
Return a function object from the garbage collector that matches the frame's code object.
This process is unreliable as several function objects could use the same code object.
Fortunately the likelihood of this happening with the combination of the function objects
having different type annotations is a very rare occurrence.
:param frame: a frame object
:return: a function object if one was found, ``None`` if not
"""
func = _functions_map.get(frame.f_code)
if func is None:
for obj in gc.get_referrers(frame.f_code):
if inspect.isfunction(obj):
if func is None:
# The first match was found
func = obj
else:
# A second match was found
return None
# Cache the result for future lookups
if func is not None:
_functions_map[frame.f_code] = func
else:
raise LookupError('target function not found')
return func
def qualified_name(obj) -> str:
"""
Return the qualified name (e.g. package.module.Type) for the given object.
Builtins and types from the :mod:`typing` package get special treatment by having the module
name stripped from the generated name.
"""
type_ = obj if inspect.isclass(obj) else type(obj)
module = type_.__module__
qualname = type_.__qualname__
return qualname if module in ('typing', 'builtins') else '{}.{}'.format(module, qualname)
def function_name(func: Callable) -> str:
"""
Return the qualified name of the given function.
Builtins and types from the :mod:`typing` package get special treatment by having the module
name stripped from the generated name.
"""
# For partial functions and objects with __call__ defined, __qualname__ does not exist
module = func.__module__
qualname = getattr(func, '__qualname__', repr(func))
return qualname if module == 'builtins' else '{}.{}'.format(module, qualname)
def check_callable(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not callable(value):
raise TypeError('{} must be a callable'.format(argname))
if getattr(expected_type, "__args__", None):
try:
signature = inspect.signature(value)
except (TypeError, ValueError):
return
if hasattr(expected_type, '__result__'):
# Python 3.5
argument_types = expected_type.__args__
check_args = argument_types is not Ellipsis
else:
# Python 3.6
argument_types = expected_type.__args__[:-1]
check_args = argument_types != (Ellipsis,)
if check_args:
# The callable must not have keyword-only arguments without defaults
unfulfilled_kwonlyargs = [
param.name for param in signature.parameters.values() if
param.kind == Parameter.KEYWORD_ONLY and param.default == Parameter.empty]
if unfulfilled_kwonlyargs:
raise TypeError(
'callable passed as {} has mandatory keyword-only arguments in its '
'declaration: {}'.format(argname, ', '.join(unfulfilled_kwonlyargs)))
num_mandatory_args = len([
param.name for param in signature.parameters.values()
if param.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD) and
param.default is Parameter.empty])
has_varargs = any(param for param in signature.parameters.values()
if param.kind == Parameter.VAR_POSITIONAL)
if num_mandatory_args > len(argument_types):
raise TypeError(
'callable passed as {} has too many arguments in its declaration; expected {} '
'but {} argument(s) declared'.format(argname, len(argument_types),
num_mandatory_args))
elif not has_varargs and num_mandatory_args < len(argument_types):
raise TypeError(
'callable passed as {} has too few arguments in its declaration; expected {} '
'but {} argument(s) declared'.format(argname, len(argument_types),
num_mandatory_args))
def check_dict(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isinstance(value, dict):
raise TypeError('type of {} must be a dict; got {} instead'.
format(argname, qualified_name(value)))
if expected_type is not dict:
if (hasattr(expected_type, "__args__") and
expected_type.__args__ not in (None, expected_type.__parameters__)):
key_type, value_type = expected_type.__args__
if key_type is not Any or value_type is not Any:
for k, v in value.items():
check_type('keys of {}'.format(argname), k, key_type, memo)
check_type('{}[{!r}]'.format(argname, k), v, value_type, memo)
def check_typed_dict(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
declared_keys = frozenset(expected_type.__annotations__)
if hasattr(expected_type, '__required_keys__'):
required_keys = expected_type.__required_keys__
else: # py3.8 and lower
required_keys = declared_keys if expected_type.__total__ else frozenset()
existing_keys = frozenset(value)
extra_keys = existing_keys - declared_keys
if extra_keys:
keys_formatted = ', '.join('"{}"'.format(key) for key in sorted(extra_keys))
raise TypeError('extra key(s) ({}) in {}'.format(keys_formatted, argname))
missing_keys = required_keys - existing_keys
if missing_keys:
keys_formatted = ', '.join('"{}"'.format(key) for key in sorted(missing_keys))
raise TypeError('required key(s) ({}) missing from {}'.format(keys_formatted, argname))
for key, argtype in get_type_hints(expected_type).items():
argvalue = value.get(key, _missing)
if argvalue is not _missing:
check_type('dict item "{}" for {}'.format(key, argname), argvalue, argtype, memo)
def check_list(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isinstance(value, list):
raise TypeError('type of {} must be a list; got {} instead'.
format(argname, qualified_name(value)))
if expected_type is not list:
if hasattr(expected_type, "__args__") and expected_type.__args__ not in \
(None, expected_type.__parameters__):
value_type = expected_type.__args__[0]
if value_type is not Any:
for i, v in enumerate(value):
check_type('{}[{}]'.format(argname, i), v, value_type, memo)
def check_sequence(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isinstance(value, collections.abc.Sequence):
raise TypeError('type of {} must be a sequence; got {} instead'.
format(argname, qualified_name(value)))
if hasattr(expected_type, "__args__") and expected_type.__args__ not in \
(None, expected_type.__parameters__):
value_type = expected_type.__args__[0]
if value_type is not Any:
for i, v in enumerate(value):
check_type('{}[{}]'.format(argname, i), v, value_type, memo)
def check_set(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isinstance(value, AbstractSet):
raise TypeError('type of {} must be a set; got {} instead'.
format(argname, qualified_name(value)))
if expected_type is not set:
if hasattr(expected_type, "__args__") and expected_type.__args__ not in \
(None, expected_type.__parameters__):
value_type = expected_type.__args__[0]
if value_type is not Any:
for v in value:
check_type('elements of {}'.format(argname), v, value_type, memo)
def check_tuple(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
# Specialized check for NamedTuples
is_named_tuple = False
if sys.version_info < (3, 8, 0):
is_named_tuple = hasattr(expected_type, '_field_types') # deprecated since python 3.8
else:
is_named_tuple = hasattr(expected_type, '__annotations__')
if is_named_tuple:
if not isinstance(value, expected_type):
raise TypeError('type of {} must be a named tuple of type {}; got {} instead'.
format(argname, qualified_name(expected_type), qualified_name(value)))
if sys.version_info < (3, 8, 0):
field_types = expected_type._field_types
else:
field_types = expected_type.__annotations__
for name, field_type in field_types.items():
check_type('{}.{}'.format(argname, name), getattr(value, name), field_type, memo)
return
elif not isinstance(value, tuple):
raise TypeError('type of {} must be a tuple; got {} instead'.
format(argname, qualified_name(value)))
if getattr(expected_type, '__tuple_params__', None):
# Python 3.5
use_ellipsis = expected_type.__tuple_use_ellipsis__
tuple_params = expected_type.__tuple_params__
elif getattr(expected_type, '__args__', None):
# Python 3.6+
use_ellipsis = expected_type.__args__[-1] is Ellipsis
tuple_params = expected_type.__args__[:-1 if use_ellipsis else None]
else:
# Unparametrized Tuple or plain tuple
return
if use_ellipsis:
element_type = tuple_params[0]
for i, element in enumerate(value):
check_type('{}[{}]'.format(argname, i), element, element_type, memo)
elif tuple_params == ((),):
if value != ():
raise TypeError('{} is not an empty tuple but one was expected'.format(argname))
else:
if len(value) != len(tuple_params):
raise TypeError('{} has wrong number of elements (expected {}, got {} instead)'
.format(argname, len(tuple_params), len(value)))
for i, (element, element_type) in enumerate(zip(value, tuple_params)):
check_type('{}[{}]'.format(argname, i), element, element_type, memo)
def check_union(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if hasattr(expected_type, '__union_params__'):
# Python 3.5
union_params = expected_type.__union_params__
else:
# Python 3.6+
union_params = expected_type.__args__
for type_ in union_params:
try:
check_type(argname, value, type_, memo)
return
except TypeError:
pass
typelist = ', '.join(get_type_name(t) for t in union_params)
raise TypeError('type of {} must be one of ({}); got {} instead'.
format(argname, typelist, qualified_name(value)))
def check_class(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isclass(value):
raise TypeError('type of {} must be a type; got {} instead'.format(
argname, qualified_name(value)))
# Needed on Python 3.7+
if expected_type is Type:
return
expected_class = None
if hasattr(expected_type, "__args__") and expected_type.__args__:
expected_class = expected_type.__args__[0]
if expected_class:
if expected_class is Any:
return
elif isinstance(expected_class, TypeVar):
check_typevar(argname, value, expected_class, memo, True)
elif not issubclass(value, expected_class):
raise TypeError('{} must be a subclass of {}; got {} instead'.format(
argname, qualified_name(expected_class), qualified_name(value)))
def check_typevar(argname: str, value, typevar: TypeVar, memo: _TypeCheckMemo,
subclass_check: bool = False) -> None:
bound_type = resolve_forwardref(memo.typevars.get(typevar, typevar.__bound__), memo)
value_type = value if subclass_check else type(value)
subject = argname if subclass_check else 'type of ' + argname
if bound_type is None:
# The type variable hasn't been bound yet -- check that the given value matches the
# constraints of the type variable, if any
if typevar.__constraints__:
constraints = [resolve_forwardref(c, memo) for c in typevar.__constraints__]
if value_type not in constraints:
typelist = ', '.join(get_type_name(t) for t in constraints if t is not object)
raise TypeError('{} must be one of ({}); got {} instead'.
format(subject, typelist, qualified_name(value_type)))
elif typevar.__covariant__ or typevar.__bound__:
if not issubclass(value_type, bound_type):
raise TypeError(
'{} must be {} or one of its subclasses; got {} instead'.
format(subject, qualified_name(bound_type), qualified_name(value_type)))
elif typevar.__contravariant__:
if not issubclass(bound_type, value_type):
raise TypeError(
'{} must be {} or one of its superclasses; got {} instead'.
format(subject, qualified_name(bound_type), qualified_name(value_type)))
else: # invariant
if value_type is not bound_type:
raise TypeError(
'{} must be exactly {}; got {} instead'.
format(subject, qualified_name(bound_type), qualified_name(value_type)))
if typevar not in memo.typevars:
# Bind the type variable to a concrete type
memo.typevars[typevar] = value_type
def check_literal(argname: str, value, expected_type, memo: _TypeCheckMemo):
def get_args(literal):
try:
args = literal.__args__
except AttributeError:
# Instance of Literal from typing_extensions
args = literal.__values__
retval = []
for arg in args:
if isinstance(arg, Literal.__class__) or getattr(arg, '__origin__', None) is Literal:
# The first check works on py3.6 and lower, the second one on py3.7+
retval.extend(get_args(arg))
elif isinstance(arg, (int, str, bytes, bool, type(None), Enum)):
retval.append(arg)
else:
raise TypeError('Illegal literal value: {}'.format(arg))
return retval
final_args = tuple(get_args(expected_type))
if value not in final_args:
raise TypeError('the value of {} must be one of {}; got {} instead'.
format(argname, final_args, value))
def check_number(argname: str, value, expected_type):
if expected_type is complex and not isinstance(value, (complex, float, int)):
raise TypeError('type of {} must be either complex, float or int; got {} instead'.
format(argname, qualified_name(value.__class__)))
elif expected_type is float and not isinstance(value, (float, int)):
raise TypeError('type of {} must be either float or int; got {} instead'.
format(argname, qualified_name(value.__class__)))
def check_io(argname: str, value, expected_type):
if expected_type is TextIO:
if not isinstance(value, TextIOBase):
raise TypeError('type of {} must be a text based I/O object; got {} instead'.
format(argname, qualified_name(value.__class__)))
elif expected_type is BinaryIO:
if not isinstance(value, (RawIOBase, BufferedIOBase)):
raise TypeError('type of {} must be a binary I/O object; got {} instead'.
format(argname, qualified_name(value.__class__)))
elif not isinstance(value, IOBase):
raise TypeError('type of {} must be an I/O object; got {} instead'.
format(argname, qualified_name(value.__class__)))
def check_protocol(argname: str, value, expected_type):
# TODO: implement proper compatibility checking and support non-runtime protocols
if getattr(expected_type, '_is_runtime_protocol', False):
if not isinstance(value, expected_type):
raise TypeError('type of {} ({}) is not compatible with the {} protocol'.
format(argname, type(value).__qualname__, expected_type.__qualname__))
# Equality checks are applied to these
origin_type_checkers = {
AbstractSet: check_set,
Callable: check_callable,
collections.abc.Callable: check_callable,
dict: check_dict,
Dict: check_dict,
list: check_list,
List: check_list,
Sequence: check_sequence,
collections.abc.Sequence: check_sequence,
collections.abc.Set: check_set,
set: check_set,
Set: check_set,
tuple: check_tuple,
Tuple: check_tuple,
type: check_class,
Type: check_class,
Union: check_union
}
_subclass_check_unions = hasattr(Union, '__union_set_params__')
if Literal is not None:
origin_type_checkers[Literal] = check_literal
generator_origin_types = (Generator, collections.abc.Generator,
Iterator, collections.abc.Iterator,
Iterable, collections.abc.Iterable)
asyncgen_origin_types = (AsyncIterator, collections.abc.AsyncIterator,
AsyncIterable, collections.abc.AsyncIterable)
if AsyncGenerator is not None:
asyncgen_origin_types += (AsyncGenerator,)
if hasattr(collections.abc, 'AsyncGenerator'):
asyncgen_origin_types += (collections.abc.AsyncGenerator,)
def check_type(argname: str, value, expected_type, memo: Optional[_TypeCheckMemo] = None, *,
globals: Optional[Dict[str, Any]] = None,
locals: Optional[Dict[str, Any]] = None) -> None:
"""
Ensure that ``value`` matches ``expected_type``.
The types from the :mod:`typing` module do not support :func:`isinstance` or :func:`issubclass`
so a number of type specific checks are required. This function knows which checker to call
for which type.
:param argname: name of the argument to check; used for error messages
:param value: value to be checked against ``expected_type``
:param expected_type: a class or generic type instance
:param globals: dictionary of global variables to use for resolving forward references
(defaults to the calling frame's globals)
:param locals: dictionary of local variables to use for resolving forward references
(defaults to the calling frame's locals)
"""
if expected_type is Any or isinstance(value, Mock):
return
if expected_type is None:
# Only happens on < 3.6
expected_type = type(None)
if memo is None:
frame = sys._getframe(1)
if globals is None:
globals = frame.f_globals
if locals is None:
locals = frame.f_locals
memo = _TypeCheckMemo(globals, locals)
expected_type = resolve_forwardref(expected_type, memo)
origin_type = getattr(expected_type, '__origin__', None)
if origin_type is not None:
checker_func = origin_type_checkers.get(origin_type)
if checker_func:
checker_func(argname, value, expected_type, memo)
else:
check_type(argname, value, origin_type, memo)
elif isclass(expected_type):
if issubclass(expected_type, Tuple):
check_tuple(argname, value, expected_type, memo)
elif issubclass(expected_type, (float, complex)):
check_number(argname, value, expected_type)
elif _subclass_check_unions and issubclass(expected_type, Union):
check_union(argname, value, expected_type, memo)
elif isinstance(expected_type, TypeVar):
check_typevar(argname, value, expected_type, memo)
elif issubclass(expected_type, IO):
check_io(argname, value, expected_type)
elif issubclass(expected_type, dict) and hasattr(expected_type, '__annotations__'):
check_typed_dict(argname, value, expected_type, memo)
elif getattr(expected_type, '_is_protocol', False):
check_protocol(argname, value, expected_type)
else:
expected_type = (getattr(expected_type, '__extra__', None) or origin_type or
expected_type)
if expected_type is bytes:
# As per https://github.com/python/typing/issues/552
if not isinstance(value, (bytearray, bytes, memoryview)):
raise TypeError('type of {} must be bytes-like; got {} instead'
.format(argname, qualified_name(value)))
elif not isinstance(value, expected_type):
raise TypeError(
'type of {} must be {}; got {} instead'.
format(argname, qualified_name(expected_type), qualified_name(value)))
elif isinstance(expected_type, TypeVar):
# Only happens on < 3.6
check_typevar(argname, value, expected_type, memo)
elif isinstance(expected_type, Literal.__class__):
# Only happens on < 3.7 when using Literal from typing_extensions
check_literal(argname, value, expected_type, memo)
elif (isfunction(expected_type) and
getattr(expected_type, "__module__", None) == "typing" and
getattr(expected_type, "__qualname__", None).startswith("NewType.") and
hasattr(expected_type, "__supertype__")):
# typing.NewType, should check against supertype (recursively)
return check_type(argname, value, expected_type.__supertype__, memo)
def check_return_type(retval, memo: Optional[_CallMemo] = None) -> bool:
"""
Check that the return value is compatible with the return value annotation in the function.
:param retval: the value about to be returned from the call
:return: ``True``
:raises TypeError: if there is a type mismatch
"""
if memo is None:
# faster than inspect.currentframe(), but not officially
# supported in all python implementations
frame = sys._getframe(1)
try:
func = find_function(frame)
except LookupError:
return True # This can happen with the Pydev/PyCharm debugger extension installed
memo = _CallMemo(func, frame.f_locals)
if 'return' in memo.type_hints:
if memo.type_hints['return'] is NoReturn:
raise TypeError('{}() was declared never to return but it did'.format(memo.func_name))
try:
check_type('the return value', retval, memo.type_hints['return'], memo)
except TypeError as exc: # suppress unnecessarily long tracebacks
raise TypeError(*exc.args) from None
return True
def check_argument_types(memo: Optional[_CallMemo] = None) -> bool:
"""
Check that the argument values match the annotated types.
Unless both ``args`` and ``kwargs`` are provided, the information will be retrieved from
the previous stack frame (ie. from the function that called this).
:return: ``True``
:raises TypeError: if there is an argument type mismatch
"""
if memo is None:
# faster than inspect.currentframe(), but not officially
# supported in all python implementations
frame = sys._getframe(1)
try:
func = find_function(frame)
except LookupError:
return True # This can happen with the Pydev/PyCharm debugger extension installed
memo = _CallMemo(func, frame.f_locals)
for argname, expected_type in memo.type_hints.items():
if argname != 'return' and argname in memo.arguments:
value = memo.arguments[argname]
description = 'argument "{}"'.format(argname)
try:
check_type(description, value, expected_type, memo)
except TypeError as exc: # suppress unnecessarily long tracebacks
raise TypeError(*exc.args) from None
return True
class TypeCheckedGenerator:
def __init__(self, wrapped: Generator, memo: _CallMemo):
rtype_args = []
if hasattr(memo.type_hints['return'], "__args__"):
rtype_args = memo.type_hints['return'].__args__
self.__wrapped = wrapped
self.__memo = memo
self.__yield_type = rtype_args[0] if rtype_args else Any
self.__send_type = rtype_args[1] if len(rtype_args) > 1 else Any
self.__return_type = rtype_args[2] if len(rtype_args) > 2 else Any
self.__initialized = False
def __iter__(self):
return self
def __next__(self):
return self.send(None)
def __getattr__(self, name: str) -> Any:
return getattr(self.__wrapped, name)
def throw(self, *args):
return self.__wrapped.throw(*args)
def close(self):
self.__wrapped.close()
def send(self, obj):
if self.__initialized:
check_type('value sent to generator', obj, self.__send_type, memo=self.__memo)
else:
self.__initialized = True
try:
value = self.__wrapped.send(obj)
except StopIteration as exc:
check_type('return value', exc.value, self.__return_type, memo=self.__memo)
raise
check_type('value yielded from generator', value, self.__yield_type, memo=self.__memo)
return value
class TypeCheckedAsyncGenerator:
def __init__(self, wrapped: AsyncGenerator, memo: _CallMemo):
rtype_args = memo.type_hints['return'].__args__
self.__wrapped = wrapped
self.__memo = memo
self.__yield_type = rtype_args[0]
self.__send_type = rtype_args[1] if len(rtype_args) > 1 else Any
self.__initialized = False
async def __aiter__(self):
return self
def __anext__(self):
return self.asend(None)
def __getattr__(self, name: str) -> Any:
return getattr(self.__wrapped, name)
def athrow(self, *args):
return self.__wrapped.athrow(*args)
def aclose(self):
return self.__wrapped.aclose()
async def asend(self, obj):
if self.__initialized:
check_type('value sent to generator', obj, self.__send_type, memo=self.__memo)
else:
self.__initialized = True
value = await self.__wrapped.asend(obj)
check_type('value yielded from generator', value, self.__yield_type, memo=self.__memo)
return value
@overload
def typechecked(*, always: bool = False) -> Callable[[T_CallableOrType], T_CallableOrType]:
...
@overload
def typechecked(func: T_CallableOrType, *, always: bool = False) -> T_CallableOrType:
...
def typechecked(func=None, *, always=False, _localns: Optional[Dict[str, Any]] = None):
"""
Perform runtime type checking on the arguments that are passed to the wrapped function.
The return value is also checked against the return annotation if any.
If the ``__debug__`` global variable is set to ``False``, no wrapping and therefore no type
checking is done, unless ``always`` is ``True``.
This can also be used as a class decorator. This will wrap all type annotated methods,
including ``@classmethod``, ``@staticmethod``, and ``@property`` decorated methods,
in the class with the ``@typechecked`` decorator.
:param func: the function or class to enable type checking for
:param always: ``True`` to enable type checks even in optimized mode
"""
if func is None:
return partial(typechecked, always=always, _localns=_localns)
if not __debug__ and not always: # pragma: no cover
return func
if isclass(func):
prefix = func.__qualname__ + '.'
for key, attr in func.__dict__.items():
if inspect.isfunction(attr) or inspect.ismethod(attr) or inspect.isclass(attr):
if attr.__qualname__.startswith(prefix) and getattr(attr, '__annotations__', None):
setattr(func, key, typechecked(attr, always=always, _localns=func.__dict__))
elif isinstance(attr, (classmethod, staticmethod)):
if getattr(attr.__func__, '__annotations__', None):
wrapped = typechecked(attr.__func__, always=always, _localns=func.__dict__)
setattr(func, key, type(attr)(wrapped))
elif isinstance(attr, property):
kwargs = dict(doc=attr.__doc__)
for name in ("fset", "fget", "fdel"):
property_func = getattr(attr, name)
if property_func is None:
continue
kwargs[name] = typechecked(
property_func, always=always, _localns=func.__dict__
)
setattr(func, key, property(**kwargs))
return func
# Find the frame in which the function was declared, for resolving forward references later
if _localns is None:
_localns = sys._getframe(1).f_locals
# Find either the first Python wrapper or the actual function
python_func = inspect.unwrap(func, stop=lambda f: hasattr(f, '__code__'))
if not getattr(func, '__annotations__', None):
warn('no type annotations present -- not typechecking {}'.format(function_name(func)))
return func
def wrapper(*args, **kwargs):
memo = _CallMemo(python_func, _localns, args=args, kwargs=kwargs)
check_argument_types(memo)
retval = func(*args, **kwargs)
try:
check_return_type(retval, memo)
except TypeError as exc:
raise TypeError(*exc.args) from None
# If a generator is returned, wrap it if its yield/send/return types can be checked
if inspect.isgenerator(retval) or isasyncgen(retval):
return_type = memo.type_hints.get('return')
if return_type:
origin = getattr(return_type, '__origin__', None)
if origin in generator_origin_types:
return TypeCheckedGenerator(retval, memo)
elif origin is not None and origin in asyncgen_origin_types:
return TypeCheckedAsyncGenerator(retval, memo)
return retval
async def async_wrapper(*args, **kwargs):
memo = _CallMemo(python_func, _localns, args=args, kwargs=kwargs)
check_argument_types(memo)
retval = await func(*args, **kwargs)
check_return_type(retval, memo)
return retval
if inspect.iscoroutinefunction(func):
if python_func.__code__ is not async_wrapper.__code__:
return wraps(func)(async_wrapper)
else:
if python_func.__code__ is not wrapper.__code__:
return wraps(func)(wrapper)
# the target callable was already wrapped
return func
class TypeWarning(UserWarning):
"""
A warning that is emitted when a type check fails.
:ivar str event: ``call`` or ``return``
:ivar Callable func: the function in which the violation occurred (the called function if event
is ``call``, or the function where a value of the wrong type was returned from if event is
``return``)
:ivar str error: the error message contained by the caught :class:`TypeError`
:ivar frame: the frame in which the violation occurred
"""
__slots__ = ('func', 'event', 'message', 'frame')
def __init__(self, memo: Optional[_CallMemo], event: str, frame,
exception: Union[str, TypeError]): # pragma: no cover
self.func = memo.func
self.event = event
self.error = str(exception)
self.frame = frame
if self.event == 'call':
caller_frame = self.frame.f_back
event = 'call to {}() from {}:{}'.format(
function_name(self.func), caller_frame.f_code.co_filename, caller_frame.f_lineno)
else:
event = 'return from {}() at {}:{}'.format(
function_name(self.func), self.frame.f_code.co_filename, self.frame.f_lineno)
super().__init__('[{thread_name}] {event}: {self.error}'.format(
thread_name=threading.current_thread().name, event=event, self=self))
@property
def stack(self):
"""Return the stack where the last frame is from the target function."""
return extract_stack(self.frame)
def print_stack(self, file: TextIO = None, limit: int = None) -> None:
"""
Print the traceback from the stack frame where the target function was run.
:param file: an open file to print to (prints to stdout if omitted)
:param limit: the maximum number of stack frames to print
"""
print_stack(self.frame, limit, file)
class TypeChecker:
"""
A type checker that collects type violations by hooking into :func:`sys.setprofile`.
:param packages: list of top level modules and packages or modules to include for type checking
:param all_threads: ``True`` to check types in all threads created while the checker is
running, ``False`` to only check in the current one
:param forward_refs_policy: how to handle unresolvable forward references in annotations
.. deprecated:: 2.6
Use :func:`~.importhook.install_import_hook` instead. This class will be removed in v3.0.
"""
def __init__(self, packages: Union[str, Sequence[str]], *, all_threads: bool = True,
forward_refs_policy: ForwardRefPolicy = ForwardRefPolicy.ERROR):
assert check_argument_types()
warn('TypeChecker has been deprecated and will be removed in v3.0. '
'Use install_import_hook() or the pytest plugin instead.', DeprecationWarning)
self.all_threads = all_threads
self.annotation_policy = forward_refs_policy
self._call_memos = {} # type: Dict[Any, _CallMemo]
self._previous_profiler = None
self._previous_thread_profiler = None
self._active = False
if isinstance(packages, str):
self._packages = (packages,)
else:
self._packages = tuple(packages)
@property
def active(self) -> bool:
"""Return ``True`` if currently collecting type violations."""
return self._active
def should_check_type(self, func: Callable) -> bool:
if not func.__annotations__:
# No point in checking if there are no type hints
return False
elif isasyncgenfunction(func):
# Async generators cannot be supported because the return arg is of an opaque builtin
# type (async_generator_wrapped_value)
return False
else:
# Check types if the module matches any of the package prefixes
return any(func.__module__ == package or func.__module__.startswith(package + '.')
for package in self._packages)
def start(self):
if self._active:
raise RuntimeError('type checker already running')
self._active = True
# Install this instance as the current profiler
self._previous_profiler = sys.getprofile()
sys.setprofile(self)
# If requested, set this instance as the default profiler for all future threads
# (does not affect existing threads)
if self.all_threads:
self._previous_thread_profiler = threading._profile_hook
threading.setprofile(self)
def stop(self):
if self._active:
if sys.getprofile() is self:
sys.setprofile(self._previous_profiler)
else: # pragma: no cover
warn('the system profiling hook has changed unexpectedly')
if self.all_threads:
if threading._profile_hook is self:
threading.setprofile(self._previous_thread_profiler)
else: # pragma: no cover
warn('the threading profiling hook has changed unexpectedly')
self._active = False
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def __call__(self, frame, event: str, arg) -> None: # pragma: no cover
if not self._active:
# This happens if all_threads was enabled and a thread was created when the checker was
# running but was then stopped. The thread's profiler callback can't be reset any other
# way but this.
sys.setprofile(self._previous_thread_profiler)
return
# If an actual profiler is running, don't include the type checking times in its results
if event == 'call':
try:
func = find_function(frame)
except Exception:
func = None
if func is not None and self.should_check_type(func):
memo = self._call_memos[frame] = _CallMemo(
func, frame.f_locals, forward_refs_policy=self.annotation_policy)
if memo.is_generator:
return_type_hint = memo.type_hints['return']
if return_type_hint is not None:
origin = getattr(return_type_hint, '__origin__', None)
if origin in generator_origin_types:
# Check the types of the yielded values
memo.type_hints['return'] = return_type_hint.__args__[0]
else:
try:
check_argument_types(memo)
except TypeError as exc:
warn(TypeWarning(memo, event, frame, exc))
if self._previous_profiler is not None:
self._previous_profiler(frame, event, arg)
elif event == 'return':
if self._previous_profiler is not None:
self._previous_profiler(frame, event, arg)
if arg is None:
# a None return value might mean an exception is being raised but we have no way of
# checking
return
memo = self._call_memos.get(frame)
if memo is not None:
try:
if memo.is_generator:
check_type('yielded value', arg, memo.type_hints['return'], memo)
else:
check_return_type(arg, memo)
except TypeError as exc:
warn(TypeWarning(memo, event, frame, exc))
if not memo.is_generator:
del self._call_memos[frame]
elif self._previous_profiler is not None:
self._previous_profiler(frame, event, arg)
|
glenngillen/dotfiles
|
.vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/jedilsp/typeguard/__init__.py
|
Python
|
mit
| 46,364
|
#!/usr/bin/env python
# Calculate a table of dihedral angle interactions used in the alpha-helix
# and beta-sheet regions of the frustrated protein model described in
# provided in figure 8 of the supplemental materials section of:
# AI Jewett, A Baumketner and J-E Shea, PNAS, 101 (36), 13192-13197, (2004)
# Note that the "A" and "B" parameters were incorrectly reported to be
# 5.4*epsilon and 6.0*epsilon. The values used were 5.6 and 6.0 epsilon.
# The phiA and phiB values were 57.29577951308232 degrees (1 rad)
# and 180 degrees, respectively. Both expA and expB were 6.0.
#
# To generate the table used for the alpha-helix (1 degree resolution) use this:
# ./calc_dihedral_table.py 6.0 57.29577951308232 6 5.6 180 6 0.0 359 360
# To generate the table used for the beta-sheets (1 degree resolution) use this:
# ./calc_dihedral_table.py 5.6 57.29577951308232 6 6.0 180 6 0.0 359 360
#
# (If you're curious as to why I set the location of the minima at phi_alpha
# to 1.0 radians (57.2957795 degrees), there was no particularly good reason.
# I think the correct value turns out to be something closer to 50 degrees.)
from math import *
import sys
# The previous version included the repulsive core term
def U(phi, A, phiA, expA, B, phiB, expB, use_radians=False):
conv_units = pi/180.0
if use_radians:
conv_units = 1.0
termA = pow(cos(0.5*(phi-phiA)*conv_units), expA)
termB = pow(cos(0.5*(phi-phiB)*conv_units), expB)
return -A*termA - B*termB
# The previous version included the repulsive core term
def F(phi, A, phiA, expA, B, phiB, expB, use_radians=False):
conv_units = pi/180.0
if use_radians:
conv_units = 1.0
termA = (0.5*sin(0.5*(phi-phiA)*conv_units) *
expA * pow(cos(0.5*(phi-phiA)*conv_units), expA-1.0))
termB = (0.5*sin(0.5*(phi-phiB)*conv_units) *
expB * pow(cos(0.5*(phi-phiB)*conv_units), expB-1.0))
return -conv_units*(A*termA + B*termB)
if len(sys.argv) != 10:
sys.stderr.write("Error: expected 9 arguments:\n"
"\n"
"Usage: "+sys.argv[0]+" A phiA expA B phiB expB phiMin phiMax N\n\n")
sys.exit(-1)
A = float(sys.argv[1])
phiA = float(sys.argv[2])
expA = float(sys.argv[3])
B = float(sys.argv[4])
phiB = float(sys.argv[5])
expB = float(sys.argv[6])
phi_min = float(sys.argv[7])
phi_max = float(sys.argv[8])
N = int(sys.argv[9])
for i in range(0,N):
phi = phi_min + i*(phi_max - phi_min)/(N-1)
U_phi = U(phi, A, phiA, expA, B, phiB, expB, use_radians=False)
F_phi = F(phi, A, phiA, expA, B, phiB, expB, use_radians=False)
print(str(i+1)+' '+str(phi)+' '+str(U_phi)+' '+str(F_phi))
|
crtrott/lammps
|
tools/moltemplate/examples/coarse_grained_examples/protein_folding_examples/1bead+chaperone/frustrated/moltemplate_files/generate_tables/calc_dihedral_table.py
|
Python
|
gpl-2.0
| 2,713
|
# -*- coding: utf-8 -*-
# 213. House Robber II
# Note: This is an extension of House Robber.
#
# After robbing those houses on that street, the thief has found himself a new place for his thievery
# so that he will not get too much attention. This time, all houses at this place are arranged in a circle.
# That means the first house is the neighbor of the last one.
# Meanwhile, the security system for these houses remain the same as for those in the previous street.
#
# Given a list of non-negative integers representing the amount of money of each house,
# determine the maximum amount of money you can rob tonight without alerting the police.
# http://bookshadow.com/weblog/2015/05/20/leetcode-house-robber-ii/
#
# 讨论是否抢劫第一件房屋。如果是,则不可以抢最后一件房屋。否则,可以抢最后一间房屋。
#
# 以此为依据,将环形DP问题转化为两趟线性DP问题,可以复用House Robber的代码。
#
# 另外需要特判一下只有一件房屋的情形。
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 1:
return nums[0]
return max(self.robLinear(nums[1:]), self.robLinear(nums[:-1]))
def robLinear(self, nums):
dp = [0] * len(nums)
if not nums:
return 0
elif len(nums) == 1:
return nums[0]
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i in range(2, len(nums)):
dp[i] = max(dp[i - 2] + nums[i], dp[i - 1])
return dp[-1]
if __name__ == '__main__':
print Solution().rob([1, 2, 2, 1])
|
gengwg/leetcode
|
213_house_robber_ii.py
|
Python
|
apache-2.0
| 1,670
|
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
import simplejson as json
except ImportError:
import json
from rest_framework.views import APIView
from rest_framework import permissions, status
from rest_framework.response import Response
from rest_framework.exceptions import NotFound
from django.db import IntegrityError
from reviews_manager.models import ROIsAnnotationStep, ClinicalAnnotationStep
from reviews_manager.serializers import ClinicalAnnotationStepROIsTreeSerializer
from clinical_annotations_manager.models import SliceAnnotation, CoreAnnotation, FocusRegionAnnotation
from clinical_annotations_manager.serializers import SliceAnnotationSerializer, SliceAnnotationDetailsSerializer,\
CoreAnnotationSerializer, CoreAnnotationDetailsSerializer, FocusRegionAnnotationSerializer, \
FocusRegionAnnotationDetailsSerializer
import logging
logger = logging.getLogger('promort')
class AnnotatedROIsTreeList(APIView):
permission_classes = (permissions.IsAuthenticated,)
def _get_clinical_annotation_step_id(self, clinical_annotation_step_label):
try:
obj = ClinicalAnnotationStep.objects.get(label=clinical_annotation_step_label)
return obj.id
except ClinicalAnnotationStep.DoesNotExist:
raise NotFound('There is no Clinical Annotation step with label \'%s\'' % clinical_annotation_step_label)
def _update_annotation(self, roi_data, clinical_annotation_step_label):
clinical_annotation_id = self._get_clinical_annotation_step_id(clinical_annotation_step_label)
annotation_status = {'annotated': False}
annotations = roi_data.pop('clinical_annotations')
for annotation in annotations:
if annotation['annotation_step'] == int(clinical_annotation_id):
annotation_status['annotated'] = True
roi_data.update(annotation_status)
def get(self, request, rois_annotation_step, clinical_annotation_step, format=None):
try:
obj = ROIsAnnotationStep.objects.get(label=rois_annotation_step)
except ROIsAnnotationStep.DoesNotExist:
raise NotFound('There is no ROIsAnnotationStep with ID %s' % rois_annotation_step)
serializer = ClinicalAnnotationStepROIsTreeSerializer(obj)
rois_tree = serializer.data
for slice in rois_tree['slices']:
self._update_annotation(slice, clinical_annotation_step)
for core in slice['cores']:
self._update_annotation(core, clinical_annotation_step)
for focus_region in core['focus_regions']:
self._update_annotation(focus_region, clinical_annotation_step)
return Response(rois_tree, status=status.HTTP_200_OK)
class ClinicalAnnotationStepAnnotationsList(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, clinical_annotation_step):
annotations = []
slice_annotations = SliceAnnotation.objects.filter(annotation_step__label=clinical_annotation_step)
annotations.extend(SliceAnnotationSerializer(slice_annotations, many=True).data)
core_annotations = CoreAnnotation.objects.filter(annotation_step__label=clinical_annotation_step)
annotations.extend(CoreAnnotationSerializer(core_annotations, many=True).data)
focus_region_annotations = FocusRegionAnnotation.objects.filter(annotation_step__label=clinical_annotation_step)
annotations.extend(FocusRegionAnnotationSerializer(focus_region_annotations, many=True).data)
return Response(annotations, status=status.HTTP_200_OK)
def delete(self, request, clinical_annotation_step):
SliceAnnotation.objects.filter(annotation_step__label=clinical_annotation_step).delete()
CoreAnnotation.objects.filter(annotation_step__label=clinical_annotation_step).delete()
FocusRegionAnnotation.objects.filter(annotation_step__label=clinical_annotation_step).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SliceAnnotationList(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, slice_id, format=None):
slice_annotations = SliceAnnotation.objects.filter(slice=slice_id)
serializer = SliceAnnotationSerializer(slice_annotations, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ClinicalAnnotationStepObject(APIView):
def _get_clinical_annotation_step_id(self, clinical_annotation_step_label):
try:
obj = ClinicalAnnotationStep.objects.get(label=clinical_annotation_step_label)
return obj.id
except ClinicalAnnotationStep.DoesNotExist:
raise NotFound('There is no Clinical Annotation step with label \'%s\'' % clinical_annotation_step_label)
class SliceAnnotationDetail(ClinicalAnnotationStepObject):
permission_classes = (permissions.IsAuthenticated,)
def _get_annotation(self, slice_id, annotation_step_label):
annotation_step_id = self._get_clinical_annotation_step_id(annotation_step_label)
try:
return SliceAnnotation.objects.get(slice=slice_id, annotation_step=annotation_step_id)
except SliceAnnotation.DoesNotExist:
raise NotFound('There is no annotation for slice %r related to annotation step %r' %
(slice_id, annotation_step_label))
def get(self, request, slice_id, label, format=None):
slice_annotation = self._get_annotation(slice_id, label)
serializer = SliceAnnotationDetailsSerializer(slice_annotation)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, slice_id, label, format=None):
slice_annotation_data = request.data
slice_annotation_data['slice'] = slice_id
slice_annotation_data['annotation_step'] = self._get_clinical_annotation_step_id(label)
slice_annotation_data['author'] = request.user.username
serializer = SliceAnnotationSerializer(data=slice_annotation_data)
if serializer.is_valid():
try:
serializer.save()
except IntegrityError:
return Response({
'status': 'ERROR',
'message': 'duplicated annotation for slice %d of annotation step %s' %
(slice_id, label)
}, status=status.HTTP_409_CONFLICT)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, slice_id, label, format=None):
slice_annotation = self._get_annotation(slice_id, label)
try:
slice_annotation.delete()
except IntegrityError:
return Response({
'status': 'ERROR',
'message': 'unable to complete delete operation, there are still references to this object'
}, status=status.HTTP_409_CONFLICT)
return Response(status=status.HTTP_204_NO_CONTENT)
class CoreAnnotationList(APIView):
permissions = (permissions.IsAuthenticated,)
def get(self, request, core_id, format=None):
core_annotations = CoreAnnotation.objects.filter(core=core_id)
serializer = CoreAnnotationSerializer(core_annotations, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class CoreAnnotationDetail(ClinicalAnnotationStepObject):
permission_classes = (permissions.IsAuthenticated,)
def _get_annotation(self, core_id, annotation_step_label):
annotation_step_id = self._get_clinical_annotation_step_id(annotation_step_label)
try:
return CoreAnnotation.objects.get(core=core_id, annotation_step=annotation_step_id)
except CoreAnnotation.DoesNotExist:
raise NotFound('There is no annotation for core %r related to annotation step %r' %
(core_id, annotation_step_id))
def get(self, request, core_id, label, format=None):
core_annotation = self._get_annotation(core_id, label)
serializer = CoreAnnotationDetailsSerializer(core_annotation)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, core_id, label, format=None):
core_annotation_data = request.data
core_annotation_data['core'] = core_id
core_annotation_data['annotation_step'] = self._get_clinical_annotation_step_id(label)
core_annotation_data['author'] = request.user.username
serializer = CoreAnnotationSerializer(data=core_annotation_data)
if serializer.is_valid():
try:
serializer.save()
except IntegrityError:
return Response({
'status': 'ERROR',
'message': 'duplicated annotation for core %d of annotation step %s' %
(core_id, label)
}, status=status.HTTP_409_CONFLICT)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, core_id, label, format=None):
core_annotation = self._get_annotation(core_id, label)
try:
core_annotation.delete()
except IntegrityError:
return Response({
'status': 'ERROR',
'message': 'unable to complete delete operation, there are still references to this object'
}, status=status.HTTP_409_CONFLICT)
return Response(status=status.HTTP_204_NO_CONTENT)
class FocusRegionAnnotationList(APIView):
permissions = (permissions.IsAuthenticated,)
def get(self, request, focus_region_id, format=None):
focus_region_annotations = FocusRegionAnnotation.objects.filter(focus_region=focus_region_id)
serializer = FocusRegionAnnotationSerializer(focus_region_annotations, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class FocusRegionAnnotationDetail(ClinicalAnnotationStepObject):
permission_classes = (permissions.IsAuthenticated,)
def _get_annotation(self, focus_region_id, annotation_step_label):
annotation_step_id = self._get_clinical_annotation_step_id(annotation_step_label)
try:
return FocusRegionAnnotation.objects.get(focus_region=focus_region_id,
annotation_step=annotation_step_id)
except FocusRegionAnnotation.DoesNotExist:
raise NotFound('There is no annotation for focus_region %r related to annotation step %r' %
(focus_region_id, annotation_step_id))
def get(self, request, focus_region_id, label, format=None):
focus_region_annotation = self._get_annotation(focus_region_id, label)
serializer = FocusRegionAnnotationDetailsSerializer(focus_region_annotation)
return Response(serializer.data, status=status.HTTP_200_OK)
def _prepare_gleason_elements(self, gleason_elements):
for element in gleason_elements:
element['json_path'] = json.dumps(element['json_path'])
try:
element['cellular_density_helper_json'] = json.dumps(element['cellular_density_helper_json'])
except KeyError:
element['cellular_density_helper_json'] = None
return gleason_elements
def post(self, request, focus_region_id, label, format=None):
focus_region_annotation_data = request.data
focus_region_annotation_data['focus_region'] = focus_region_id
focus_region_annotation_data['annotation_step'] = self._get_clinical_annotation_step_id(label)
focus_region_annotation_data['author'] = request.user.username
if focus_region_annotation_data.get('gleason_elements'):
focus_region_annotation_data['gleason_elements'] = \
self._prepare_gleason_elements(focus_region_annotation_data['gleason_elements'])
if focus_region_annotation_data.get('cellular_density_helper_json'):
focus_region_annotation_data['cellular_density_helper_json'] = \
json.dumps(focus_region_annotation_data['cellular_density_helper_json'])
serializer = FocusRegionAnnotationSerializer(data=focus_region_annotation_data)
if serializer.is_valid():
try:
serializer.save()
except IntegrityError:
return Response({
'status': 'ERROR',
'message': 'duplicated annotation for focus region %d of annotation step %d' %
(focus_region_id, label)
}, status=status.HTTP_409_CONFLICT)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, focus_region_id, label, format=None):
focus_region_annotation = self._get_annotation(focus_region_id, label)
try:
focus_region_annotation.delete()
except IntegrityError:
return Response({
'status': 'ERROR',
'message': 'unable to complete delete operation, there are still references to this object'
}, status=status.HTTP_409_CONFLICT)
return Response(status=status.HTTP_204_NO_CONTENT)
|
lucalianas/ProMort
|
promort/clinical_annotations_manager/views.py
|
Python
|
mit
| 14,550
|
from changes.models import User
from changes.testutils import APITestCase
class UserDetailsTest(APITestCase):
def test_simple(self):
user = self.create_user(email='foobar@example.com')
path = '/api/0/users/{0}/'.format(user.id)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == user.id.hex
class UpdateUserTest(APITestCase):
def test_simple(self):
user = self.create_user(
email='foobar@example.com',
is_admin=False,
)
path = '/api/0/users/{0}/'.format(user.id)
# ensure endpoint requires authentication
resp = self.client.post(path, data={
'is_admin': '1'
})
assert resp.status_code == 401
self.login_default()
# ensure endpoint requires admin
resp = self.client.post(path, data={
'is_admin': '1'
})
assert resp.status_code == 403
self.login_default_admin()
# test setting is_admin
resp = self.client.post(path, data={
'is_admin': '1'
})
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['isAdmin'] is True
user = User.query.get(user.id)
assert user.is_admin is True
|
alex/changes
|
tests/changes/api/test_user_details.py
|
Python
|
apache-2.0
| 1,350
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2016-01-18
git sha : $Format:%H$
copyright : (C) 2016 by Luiz Andrade - Cartographic Engineer @ Brazilian Army
email : luiz.claudio@dsg.eb.mil.br
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
##DSG=group
##Inventario=vector
##Override_CRS=boolean False
##CRS=crs
##VRT=output raster
import processing
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from qgis.core import QgsVectorLayer, QgsRasterLayer, QgsSpatialIndex, QgsFeatureRequest, QgsCoordinateTransform, QgsFeature, QgsCoordinateReferenceSystem
from qgis.PyQt.QtCore import QSettings
import os
#script methods
def createVrt(inventario, vrt):
#Camada de inventario
layer = processing.getObject(Inventario)
count = 0
size = layer.featureCount()
p = 0
progress.setPercentage(p)
rasterList = []
for feature in layer.getFeatures():
filename = feature['fileName']
raster = QgsRasterLayer(filename, filename)
if Override_CRS:
raster.setCrs( QgsCoordinateReferenceSystem(int(CRS.split(':')[-1]), QgsCoordinateReferenceSystem.EpsgCrsId) )
rasterList.append(raster)
ovr = filename+'.ovr'
if not os.path.isfile(ovr):
progress.setText('Fazendo Pirâmides...')
#('gdalogr:overviews', input, levels=8, clean=False, resampling_method=0(nearest), format=1(Gtiff .ovr))
processing.runalg('gdalogr:overviews', raster, '4 8 32 128', True, 0, 1)
if int(float(count)/size*100) != p:
p = int(float(count)/size*100)
progress.setPercentage(p)
count += 1
progress.setText('Fazendo raster virtual...')
processing.runalg('gdalogr:buildvirtualraster', rasterList, 0, False, False, VRT)
#end of script methods
#Making the actual work
s = QSettings()
oldValidation = s.value( "/Projections/defaultBehaviour")
s.setValue( "/Projections/defaultBehaviour", "useGlobal" )
createVrt(Inventario, VRT)
s.setValue( "/Projections/defaultBehaviour", oldValidation )
#ending the actual work
|
lcoandrade/DsgTools
|
core/Misc/QGIS_Scripts/virtual_raster_inloco.py
|
Python
|
gpl-2.0
| 3,081
|
"""
Django settings for chatbot_website project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['CHATBOT_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'chatbot_interface',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatbot_website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatbot_website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
},
"ROUTING": "chatbot_interface.routing.channel_routing",
},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file_django': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/debug_django.log',
},
'file_chatbot': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'logs/debug_chatbot.log',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
'django': {
'handlers': ['console', 'file_django'],
'level': 'INFO',
'propagate': True,
},
'chatbot_interface': {
'handlers': ['console', 'file_chatbot'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
Catherine-Chu/DeepQA
|
chatbot_website/chatbot_website/settings.py
|
Python
|
apache-2.0
| 4,300
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import fs_uae_launcher.fsui as fsui
from .IRCPanel import IRCPanel
from .Netplay import Netplay
class LobbyPanel(IRCPanel):
def __init__(self, parent):
IRCPanel.__init__(self, parent)
self.channel = Netplay.lobby
#self.layout = fsui.VerticalLayout()
"""
label = fsui.BoldLabel(self, "Lobby (and Server Messages)")
self.layout.add(label, fill=True)
self.layout.add_spacer(6)
self.main = fsui.Panel(self)
self.main.layout = fsui.HorizontalLayout()
self.output = fsui.TextArea(self.main)
self.main.layout.add(self.output, expand=4, fill=True)
self.main.layout.add_spacer(6)
self.users = fsui.TextArea(self.main)
self.main.layout.add(self.users, expand=1, fill=True)
self.layout.add(self.main, expand=True, fill=True)
self.layout.add_spacer(6)
self.input = fsui.TextField(self)
self.input.on_activate = self.on_input
self.layout.add(self.input, fill=True)
#self.browse_button = fsui.Button(self, "Browse")
#self.layout.add(self.browse_button)
#self.browse_button = fsui.Button(self, "Browse")
#self.layout.add(self.browse_button)
#self.channel = "#FS-UAE"
"""
#def on_destroy(self):
# print("on_destroy")
# self.irc.stop()
"""
def append_text(self, message):
self.output.append_text(message + "\n")
def on_input(self):
message = self.input.get_text()
self.input.set_text("")
if message.startswith("/"):
#message = message[1:]
#parts = message.split(" ")
#command = parts[0].lower()
#args = parts[1:]
#name = "command_" + command
#try:
# method = getattr(self, name)
#except AttributeError:
# self.append_text("Unknown command: " + command)
#else:
# method(args)
if not self.irc.handle_command_string(message):
self.append_text("Unknown command: " + message)
else:
self.append_text(u"<{0}> {1}".format(IRC.my_nick, message))
self.irc.privmsg(Netplay.lobby, message)
"""
|
lunixbochs/fs-uae-gles
|
launcher/fs_uae_launcher/LobbyPanel.py
|
Python
|
gpl-2.0
| 2,483
|
# txt2sif.py -- Convert .txt into a .sif format
import sys
import argparse
def main():
parser = argparse.ArgumentParser(description='Convert .txt to .sif')
parser.add_argument('txt', help="""txt file to convert. Fields are :
<gene 1 name in ACSN> <name of relationship> <gene 2 name in ACSN> <semi-colon separated list of PubMedID >. Columns are TAB separated
""")
parser.add_argument('sif', help='SIF file to create.')
args = parser.parse_args()
# read the txt file and convert it into a sif file
with open(args.txt, 'r') as fdr:
with open(args.sif, 'w') as fdw:
for line in fdr:
line_split = line.split('\t')
fdw.write("%s\t%s\t%s\n" % (line_split[0], line_split[1], line_split[2]))
fdw.close()
fdr.close()
if __name__ == "__main__":
main()
|
chagaz/sfan
|
code/txt2sif.py
|
Python
|
mit
| 857
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "particles, Flower"
import pyglet
import summa
from summa.director import director
from summa.actions import *
from summa.layer import *
from summa.particle_systems import *
class L(Layer):
def __init__(self):
super( L, self).__init__()
# p = Fireworks()
# p = Explosion()
# p = Fire()
p = Flower()
# p = Sun()
# p = Spiral()
# p = Meteor()
# p = Galaxy()
p.position = (320,240)
self.add( p )
def main():
director.init( resizable=True )
main_scene = summa.scene.Scene()
main_scene.add( L() )
director.run( main_scene )
if __name__ == '__main__':
main()
|
shackra/thomas-aquinas
|
tests/test_particle_flower.py
|
Python
|
bsd-3-clause
| 911
|
import wx
import sys
import os
for item in ("libsrvr", "../server/lib"):
if os.path.exists(item):
sys.path.insert(0, item)
break
import sequip
from FinishConstrHandlers import FinishConstrHandlers
from DependencyPanel import DependencyPanel
from config import Config
from ige.ospace import Rules
from ige.ospace.Rules import Techs
from ConstructionDlg import ConstructionDlg
from AttributesListCtrl import AttributesListCtrl
stratRes = {
0: "None",
1: "Uranium",
2: "Titanium",
3: "Chromium",
4: "Silicium",
5: "Carboneum",
6: "Antimatter",
7: "Plutonium",
8: "Wolframium",
100: "Mutagen",
1000: "Unnilseptium"
}
MENU_EXIT = 101
MENU_SHOW_B = 102
MENU_SHOW_C = 103
MENU_SHOW_H = 104
MENU_IMPROVEMENT_1 = 105
MENU_IMPROVEMENT_2 = 106
MENU_IMPROVEMENT_3 = 107
MENU_IMPROVEMENT_4 = 108
MENU_IMPROVEMENT_5 = 109
MENU_LEVEL_1 = 110
MENU_LEVEL_2 = 111
MENU_LEVEL_3 = 112
MENU_LEVEL_4 = 113
MENU_LEVEL_5 = 114
MENU_LEVEL_6 = 120
MENU_LEVEL_99 = 115
MENU_VIEW_LEVEL = 116
MENU_VIEW_RACE = 117
MENU_VIEW_REQ_LEVEL = 118
MENU_CONSTRUCTION_DLG = 119
def bool2Text(value):
if value:
return "Yes"
else:
return "No"
def structWeapons2Text(array):
string = ''
i = 0
j = 0
for weaponNum in array:
if i > 3:
break
if weaponNum > 0:
if j > 0:
string += ', '
string += '%d' % ( weaponNum )
string += ["S", "M", "L", "P", "?","?","?","?","?","?"][i]
j += 1
i += 1
return string
def perc2Text(value):
string = '%d%%' % (value * 100)
return string
def perc100_2Text(value):
string = '%d%%' % (value)
return string
V_NONE = 0x00
V_STRUCT = 0x01
V_HULL = 0x02
V_SEQUIP = 0x04
V_PROJECT = 0x08
V_EFF = 0x10
V_ALL = V_STRUCT|V_HULL|V_SEQUIP|V_PROJECT
techAttrs = {}
defaultAttr = ('Not specified', V_NONE, True, None, int)
def addAttr(attr, descr, props, showIfDefault, default = 0, convertor = str):
global techAttrs
techAttrs[attr] = (descr, props, showIfDefault, default, convertor)
addAttr('buildProd', 'Constr. reqs - construction points', V_ALL, 0)
addAttr('operBio', 'Operational reqs - biomatter', V_ALL, 0)
addAttr('operMin', 'Operational reqs - minerals', V_ALL, 0)
addAttr('operEn', 'Operational reqs - energy', V_ALL, 0)
addAttr('operWorkers', 'Operational reqs - workers', V_ALL, 0)
addAttr('prodBio', 'Production - biomatter', V_STRUCT|V_EFF, 0)
addAttr('prodMin', 'Production - minerals', V_STRUCT|V_EFF, 0)
addAttr('prodEn', 'Production - energy', V_STRUCT|V_EFF, 0)
addAttr('prodPop', 'Production - population', V_STRUCT|V_EFF, 0)
addAttr('prodProd', 'Production - constr. points', V_STRUCT|V_PROJECT|V_EFF, 0)
addAttr('prodSci', 'Production - research points', V_STRUCT|V_PROJECT|V_EFF, 0)
addAttr('prodEnv', 'Production - env. effect', V_STRUCT|V_EFF, 0)
addAttr('storBio', 'Storage - biomatter', V_STRUCT|V_EFF, 0)
addAttr('storMin', 'Storage - minerals', V_STRUCT|V_EFF, 0)
addAttr('storEn', 'Storage - energy', V_ALL|V_EFF, 0)
addAttr('storPop', 'Accommodate population', V_STRUCT|V_EFF, 0)
addAttr('revoltThr', 'Lowers revolt threshold by', V_STRUCT|V_PROJECT|V_EFF, 0)
addAttr('moraleTrgt', 'Increases max morale by', V_STRUCT|V_PROJECT|V_EFF, 0)
addAttr('govPwr', 'Government power', V_STRUCT|V_EFF, 0)
addAttr('maxHP', 'Hit points', V_STRUCT|V_HULL|V_SEQUIP|V_EFF, 0)
addAttr('scannerPwr', 'Scanner power', V_STRUCT|V_SEQUIP|V_EFF, 0)
addAttr('planetShield', 'Planetary shield', V_STRUCT|V_EFF, 0)
addAttr('systemAtt', 'Fleet attack (bonus)', V_STRUCT|V_EFF, 0)
addAttr('systemDef', 'Fleet defense (bonus)', V_STRUCT|V_EFF, 0)
addAttr('refuelMax', 'Maximum refuel percent', V_STRUCT|V_EFF, 0, convertor = perc100_2Text)
addAttr('refuelInc', 'Refuel increase percent', V_STRUCT|V_EFF, 0, convertor = perc100_2Text)
addAttr('trainShipInc', 'Exp. points per turn', V_STRUCT|V_EFF, 0, convertor = float)
addAttr('trainShipMax', 'Exp. cap (base exp multiple)', V_STRUCT|V_EFF, 0)
addAttr('fleetSpeedBoost', 'Boost speed of fleets', V_STRUCT|V_EFF, 0, convertor = float)
addAttr('structWeapons', 'Weapons', V_STRUCT, 0, convertor = structWeapons2Text)
addAttr('weaponClass', 'Target class', V_SEQUIP, True, convertor = sequip.cclass2Text)
addAttr('weaponDmgMin', 'Weapon minimum damage', V_SEQUIP|V_EFF, 0)
addAttr('weaponDmgMax', 'Weapon maximum damage', V_SEQUIP|V_EFF, 0)
addAttr('weaponIsMissile', 'Missile weapon (ECM counts)', V_SEQUIP|V_HULL, 0, convertor = bool2Text)
addAttr('weaponIgnoreShield', 'Weapon ignore shield', V_SEQUIP|V_HULL, 0, convertor = bool2Text)
addAttr('weaponAtt', 'Weapon attack', V_SEQUIP|V_EFF, 0)
addAttr('weaponROF', 'Weapon Rate Of Fire', V_SEQUIP, 0, convertor = float)
addAttr('minHull', 'Minimum required hull', V_SEQUIP|V_HULL, 0, convertor = sequip.cclass2Text)
addAttr('weight', 'Weight', V_SEQUIP|V_HULL, 0)
addAttr('slots', 'Slots', V_SEQUIP|V_HULL, 0)
addAttr('maxWeight', 'Maximum payload', V_HULL, 0)
addAttr('engPwr', 'Engine power', V_SEQUIP|V_EFF, 0)
addAttr('signature', 'Scan signature', V_SEQUIP|V_HULL, 0)
addAttr('signatureCloak', 'Signature visibility', V_SEQUIP|V_HULL, 0)
addAttr('signatureDecloak', 'Signature visibility', V_SEQUIP|V_HULL, 0)
addAttr('minSignature', 'Min. signature', V_SEQUIP|V_HULL, 0)
addAttr('combatDef', 'Combat defence', V_SEQUIP|V_HULL|V_EFF, 0)
addAttr('combatAtt', 'Combat attack', V_SEQUIP|V_HULL|V_EFF, 0)
addAttr('missileDef', 'Missile defence', V_SEQUIP|V_EFF, 0)
addAttr('combatAttPerc', 'Combat defense (extra)', V_SEQUIP|V_HULL|V_EFF, 0, convertor = perc2Text)
addAttr('combatDefPerc', 'Combat attack (extra)', V_SEQUIP|V_HULL|V_EFF, 0, convertor = perc2Text)
addAttr('missileDefPerc', 'Missile defence (extra)', V_SEQUIP|V_EFF, 0, convertor = perc2Text)
addAttr('shieldPerc', 'Shield strength', V_SEQUIP|V_HULL|V_EFF, 0, convertor = perc2Text)
addAttr('shieldRechargeFix', 'Shield recharge fixed', V_SEQUIP|V_HULL|V_EFF, 0)
addAttr('shieldRechargePerc', 'Shield recharge percent', V_SEQUIP|V_HULL|V_EFF, 0, convertor = perc2Text)
addAttr('damageAbsorb', 'Armor damage absorbstion', V_SEQUIP|V_HULL, 0)
addAttr('addMP', 'Device MP', V_SEQUIP|V_HULL, 0)
def getChildren(tech):
return tech.researchEnables[1] + tech.researchEnables[2] + tech.researchEnables[3] + \
tech.researchEnables[4] + tech.researchEnables[5]
def getParent(tech):
if tech.researchRequires:
return Rules.techs[tech.researchRequires[0][0]]
else:
return None
class App(wx.App):
"""Application class."""
def OnInit(self):
self.frame = TechViewer(None, -1, "OuterSpace Technology viewer")
self.frame.Show()
self.SetTopWindow(self.frame)
return True
class TechViewer(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, pos = wx.DefaultPosition, size = (800, 600))
wx.EVT_CLOSE(self, self.OnClose)
self.selected = None
global config
if config.Races.Bionic == None:
config.Races.Bionic = 1
if config.Races.Human == None:
config.Races.Human = 1
if config.Races.Cyborg == None:
config.Races.Cyborg = 1
if config.View.Improvement == None:
config.View.Improvement = 3
if config.View.Level == None:
config.View.Level = 1
if config.View.Race == None:
config.View.Race = 1
if config.Levels.Level1 == None:
config.Levels.Level1 = 1
if config.Levels.Level2 == None:
config.Levels.Level2 = 1
if config.Levels.Level3 == None:
config.Levels.Level3 = 1
if config.Levels.Level4 == None:
config.Levels.Level4 = 1
if config.Levels.Level5 == None:
config.Levels.Level5 = 1
if config.Levels.Level6 == None:
config.Levels.Level6 = 1
if config.Levels.Level99 == None:
config.Levels.Level99 = 1
self.showBionic = config.Races.Bionic == "1"
self.showHuman = config.Races.Human == "1"
self.showCyborg = config.Races.Cyborg == "1"
self.improvement = int(config.View.Improvement)
self.levels = { 1: config.Levels.Level1 == "1",
2: config.Levels.Level2 == "1",
3: config.Levels.Level3 == "1",
4: config.Levels.Level4 == "1",
5: config.Levels.Level5 == "1",
6: config.Levels.Level6 == "1",
99: config.Levels.Level99 == "1"
}
self.viewLevel = config.View.Level == "1"
self.viewRace = config.View.Race == "1"
self.viewReqLevel = config.View.ReqLevel == "1"
self.currentTech = None
self.Centre(wx.BOTH)
splitter = wx.SplitterWindow(self, -1, style = wx.SP_3D)# | wx.SP_NOBORDER | wx.NO_3D)
def EmptyHandler(evt): pass
wx.EVT_ERASE_BACKGROUND(splitter, EmptyHandler)
tID = wx.NewId()
self.tree = wx.TreeCtrl(splitter, tID, style = wx.TR_HAS_BUTTONS)
wx.EVT_TREE_SEL_CHANGED(self, tID, self.OnSelChanged)
self.rightPanel = wx.Panel(splitter, -1, style = wx.SP_NOBORDER | wx.NO_3D)
box = wx.BoxSizer(wx.VERTICAL)
tID = wx.NewId()
self.list = AttributesListCtrl(self.rightPanel, tID, style = wx.LC_REPORT | wx.SUNKEN_BORDER)
self.list.InsertColumn(0, "Name")
self.list.InsertColumn(1, "Value")
self.list.SetColumnWidth(0, 350)
self.list.SetColumnWidth(1, wx.LIST_AUTOSIZE)
self.finishHandlers = FinishConstrHandlers(self.list, self.improvement)
self.prodBio = DependencyPanel(self.rightPanel, "Bio production depends on:", wx.GREEN, wx.BLACK)
self.prodEn = DependencyPanel(self.rightPanel, "Energy production depends on:", wx.BLUE, wx.WHITE)
self.prodCon = DependencyPanel(self.rightPanel, "Construction points production depends on:", wx.BLACK, wx.WHITE)
box.Add(self.list, 1, wx.EXPAND)
box.Add(wx.StaticLine(self.rightPanel, -1), 0, wx.EXPAND | wx.ALL, 4)
box.Add(self.prodBio, 0, wx.EXPAND | wx.WEST, 4)
box.Add(wx.StaticLine(self.rightPanel, -1), 0, wx.EXPAND | wx.ALL, 4)
box.Add(self.prodEn, 0, wx.EXPAND | wx.WEST, 4)
box.Add(wx.StaticLine(self.rightPanel, -1), 0, wx.EXPAND | wx.ALL, 4)
box.Add(self.prodCon, 0, wx.EXPAND | wx.WEST, 4)
box.Fit(self.rightPanel)
self.rightPanel.SetSizer(box)
splitter.SplitVertically(self.tree, self.rightPanel, 360)
splitter.SetMinimumPaneSize(20)
menuBar = wx.MenuBar()
menu = wx.Menu()
menu.AppendCheckItem(MENU_SHOW_B, "Show &Bionic")
menu.Check(MENU_SHOW_B, self.showBionic)
menu.AppendCheckItem(MENU_SHOW_C, "Show &Cyborg")
menu.Check(MENU_SHOW_C, self.showCyborg)
menu.AppendCheckItem(MENU_SHOW_H, "Show &Human")
menu.Check(MENU_SHOW_H, self.showHuman)
menu.AppendSeparator()
menu.Append(MENU_EXIT, "E&xit")
menuBar.Append(menu, "&Race")
menu = wx.Menu()
menu.AppendRadioItem(MENU_IMPROVEMENT_1, "Improvement 1")
menu.AppendRadioItem(MENU_IMPROVEMENT_2, "Improvement 2")
menu.AppendRadioItem(MENU_IMPROVEMENT_3, "Improvement 3")
menu.AppendRadioItem(MENU_IMPROVEMENT_4, "Improvement 4")
menu.AppendRadioItem(MENU_IMPROVEMENT_5, "Improvement 5")
menuBar.Append(menu, "&Improvement")
menu = wx.Menu()
menu.AppendCheckItem(MENU_LEVEL_1, "Level 1")
menu.AppendCheckItem(MENU_LEVEL_2, "Level 2")
menu.AppendCheckItem(MENU_LEVEL_3, "Level 3")
menu.AppendCheckItem(MENU_LEVEL_4, "Level 4")
menu.AppendCheckItem(MENU_LEVEL_5, "Level 5")
menu.AppendCheckItem(MENU_LEVEL_6, "Level 6")
menu.AppendSeparator()
menu.AppendCheckItem(MENU_LEVEL_99, "Additional")
menuBar.Append(menu, "&Level")
menu = wx.Menu()
menu.AppendCheckItem(MENU_VIEW_LEVEL, "Level")
menu.AppendCheckItem(MENU_VIEW_RACE, "Race")
menu.AppendCheckItem(MENU_VIEW_REQ_LEVEL, "Required parent tech level")
menuBar.Append(menu, "&View")
menu = wx.Menu()
menu.Append(MENU_CONSTRUCTION_DLG, "Show construction dialog")
menuBar.Append(menu, "Ship &construction")
self.SetMenuBar(menuBar)
wx.EVT_MENU(self, MENU_EXIT, self.OnQuitMenu)
wx.EVT_MENU(self, MENU_SHOW_B, self.OnShowBionic)
wx.EVT_MENU(self, MENU_SHOW_C, self.OnShowCyborg)
wx.EVT_MENU(self, MENU_SHOW_H, self.OnShowHuman)
wx.EVT_MENU(self, MENU_IMPROVEMENT_1, self.OnImprovement1)
wx.EVT_MENU(self, MENU_IMPROVEMENT_2, self.OnImprovement2)
wx.EVT_MENU(self, MENU_IMPROVEMENT_3, self.OnImprovement3)
wx.EVT_MENU(self, MENU_IMPROVEMENT_4, self.OnImprovement4)
wx.EVT_MENU(self, MENU_IMPROVEMENT_5, self.OnImprovement5)
wx.EVT_UPDATE_UI(self, MENU_IMPROVEMENT_1, self.OnImprovement1Update)
wx.EVT_UPDATE_UI(self, MENU_IMPROVEMENT_2, self.OnImprovement2Update)
wx.EVT_UPDATE_UI(self, MENU_IMPROVEMENT_3, self.OnImprovement3Update)
wx.EVT_UPDATE_UI(self, MENU_IMPROVEMENT_4, self.OnImprovement4Update)
wx.EVT_UPDATE_UI(self, MENU_IMPROVEMENT_5, self.OnImprovement5Update)
wx.EVT_MENU(self, MENU_LEVEL_1, self.OnLevel1)
wx.EVT_MENU(self, MENU_LEVEL_2, self.OnLevel2)
wx.EVT_MENU(self, MENU_LEVEL_3, self.OnLevel3)
wx.EVT_MENU(self, MENU_LEVEL_4, self.OnLevel4)
wx.EVT_MENU(self, MENU_LEVEL_5, self.OnLevel5)
wx.EVT_MENU(self, MENU_LEVEL_6, self.OnLevel6)
wx.EVT_MENU(self, MENU_LEVEL_99, self.OnLevel99)
wx.EVT_UPDATE_UI(self, MENU_LEVEL_1, self.OnLevel1Update)
wx.EVT_UPDATE_UI(self, MENU_LEVEL_2, self.OnLevel2Update)
wx.EVT_UPDATE_UI(self, MENU_LEVEL_3, self.OnLevel3Update)
wx.EVT_UPDATE_UI(self, MENU_LEVEL_4, self.OnLevel4Update)
wx.EVT_UPDATE_UI(self, MENU_LEVEL_5, self.OnLevel5Update)
wx.EVT_UPDATE_UI(self, MENU_LEVEL_6, self.OnLevel6Update)
wx.EVT_UPDATE_UI(self, MENU_LEVEL_99, self.OnLevel99Update)
wx.EVT_MENU(self, MENU_VIEW_LEVEL, self.OnViewLevel)
wx.EVT_MENU(self, MENU_VIEW_RACE, self.OnViewRace)
wx.EVT_MENU(self, MENU_VIEW_REQ_LEVEL, self.OnViewReqLevel)
wx.EVT_UPDATE_UI(self, MENU_VIEW_LEVEL, self.OnViewLevelUpdate)
wx.EVT_UPDATE_UI(self, MENU_VIEW_RACE, self.OnViewRaceUpdate)
wx.EVT_UPDATE_UI(self, MENU_VIEW_REQ_LEVEL, self.OnViewReqLevelUpdate)
wx.EVT_MENU(self, MENU_CONSTRUCTION_DLG, self.OnConstruction)
self.PopulateTree()
return None
def OnClose(self, event):
global config
config.Races.Bionic = int(self.showBionic)
config.Races.Human = int(self.showHuman)
config.Races.Cyborg = int(self.showCyborg)
config.View.Improvement = self.improvement
config.View.Level = int(self.viewLevel)
config.View.Race = int(self.viewRace)
config.View.ReqLevel = int(self.viewReqLevel)
config.Levels.Level1 = int(self.levels[1])
config.Levels.Level2 = int(self.levels[2])
config.Levels.Level3 = int(self.levels[3])
config.Levels.Level4 = int(self.levels[4])
config.Levels.Level5 = int(self.levels[5])
config.Levels.Level6 = int(self.levels[6])
config.Levels.Level99 = int(self.levels[99])
self.Destroy()
def OnSelChanged(self, event):
item = event.GetItem()
tech = self.tree.GetItemData(item).GetData()
self.DisplayTech(tech)
self.currentTech = tech
def OnQuitMenu(self, event):
self.Close(True)
def OnConstruction(self, event):
global config
constrDlg = ConstructionDlg(None, -1, config, self.improvement)
constrDlg.ShowModal()
constrDlg.Destroy()
def OnImprovement1(self, event):
self.SetImprovement(1)
def OnImprovement2(self, event):
self.SetImprovement(2)
def OnImprovement3(self, event):
self.SetImprovement(3)
def OnImprovement4(self, event):
self.SetImprovement(4)
def OnImprovement5(self, event):
self.SetImprovement(5)
def SetImprovement(self, number):
self.improvement = number
self.finishHandlers.improvement = number
self.DisplayTech(self.currentTech)
def OnImprovement1Update(self, event):
event.Check(self.improvement == 1)
def OnImprovement2Update(self, event):
event.Check(self.improvement == 2)
def OnImprovement3Update(self, event):
event.Check(self.improvement == 3)
def OnImprovement4Update(self, event):
event.Check(self.improvement == 4)
def OnImprovement5Update(self, event):
event.Check(self.improvement == 5)
def OnLevel1(self, event):
if self.levels[1]:
self.levels[1] = False
self.levels[2] = False
self.levels[3] = False
self.levels[4] = False
self.levels[5] = False
self.levels[6] = False
else:
self.levels[1] = True
self.PopulateTree()
def OnLevel2(self, event):
if self.levels[2]:
self.levels[2] = False
self.levels[3] = False
self.levels[4] = False
self.levels[5] = False
self.levels[6] = False
else:
self.levels[1] = True
self.levels[2] = True
self.PopulateTree()
def OnLevel3(self, event):
if self.levels[3]:
self.levels[3] = False
self.levels[4] = False
self.levels[5] = False
self.levels[6] = False
else:
self.levels[1] = True
self.levels[2] = True
self.levels[3] = True
self.PopulateTree()
def OnLevel4(self, event):
if self.levels[4]:
self.levels[4] = False
self.levels[5] = False
self.levels[6] = False
else:
self.levels[1] = True
self.levels[2] = True
self.levels[3] = True
self.levels[4] = True
self.PopulateTree()
def OnLevel5(self, event):
if self.levels[5]:
self.levels[5] = False
self.levels[6] = False
else:
self.levels[1] = True
self.levels[2] = True
self.levels[3] = True
self.levels[4] = True
self.levels[5] = True
self.PopulateTree()
def OnLevel6(self, event):
if self.levels[6]:
self.levels[6] = False
else:
self.levels[1] = True
self.levels[2] = True
self.levels[3] = True
self.levels[4] = True
self.levels[5] = True
self.levels[6] = True
self.PopulateTree()
def OnLevel99(self, event):
self.levels[99] = not self.levels[99]
self.PopulateTree()
def OnLevel1Update(self, event):
event.Check(self.levels[1])
def OnLevel2Update(self, event):
event.Check(self.levels[2])
def OnLevel3Update(self, event):
event.Check(self.levels[3])
def OnLevel4Update(self, event):
event.Check(self.levels[4])
def OnLevel5Update(self, event):
event.Check(self.levels[5])
def OnLevel6Update(self, event):
event.Check(self.levels[6])
def OnLevel99Update(self, event):
event.Check(self.levels[99])
def OnViewLevel(self, event):
self.viewLevel = not self.viewLevel
self.PopulateTree()
def OnViewLevelUpdate(self, event):
event.Check(self.viewLevel)
def OnViewRace(self, event):
self.viewRace = not self.viewRace
self.PopulateTree()
def OnViewRaceUpdate(self, event):
event.Check(self.viewRace)
def OnViewReqLevel(self, event):
self.viewReqLevel = not self.viewReqLevel
self.PopulateTree()
def OnViewReqLevelUpdate(self, event):
event.Check(self.viewReqLevel)
def OnShowBionic(self, event):
self.showBionic = event.IsChecked()
self.PopulateTree()
def OnShowCyborg(self, event):
self.showCyborg = event.IsChecked()
self.PopulateTree()
def OnShowHuman(self, event):
self.showHuman = event.IsChecked()
self.PopulateTree()
def PopulateTree(self):
self.tree.Freeze()
self.tree.DeleteAllItems()
root = self.tree.AddRoot("Technologies")
self.techs = {}
self.AppendRootTechs(root)
self.AppendChildTechs(root)
self.tree.Expand(root)
self.tree.SetScrollPos(wx.VERTICAL, 0)
self.tree.Thaw()
def DisplayTech(self, tech):
self.list.DeleteAllItems()
self.prodBio.Clear()
self.prodEn.Clear()
self.prodCon.Clear()
if tech == None:
return
i = 0
techType = V_NONE & (
getattr(tech ,'isStructure', 0) * V_STRUCT |
getattr(tech ,'isShipHull', 0) * V_HULL |
getattr(tech ,'isShipEquip', 0) * V_SEQUIP |
getattr(tech ,'isProject', 0) * V_PROJECT
)
if techType == V_NONE:
if getattr(tech ,'isStructure', 0): techType = V_STRUCT
elif getattr(tech ,'isShipHull', 0): techType = V_HULL
elif getattr(tech ,'isShipEquip', 0): techType = V_SEQUIP
elif getattr(tech ,'isProject', 0): techType = V_PROJECT
for attribute in dir(tech):
value = getattr(tech, attribute)
descr, props, showIfDef, default, convertor = techAttrs.get(attribute, defaultAttr)
if techType & props and (value != default or showIfDef):
self.list.InsertStringItem(i, descr)
if V_EFF & props:
item = self.list.GetItem(i)
itemFont = item.GetFont()
itemFont.SetWeight(wx.BOLD)
item.SetFont(itemFont)
self.list.SetItem(item)
if convertor != str:
value = Rules.techImprEff[self.improvement] * value
else:
value = int(round(Rules.techImprEff[self.improvement] * value))
self.list.SetStringItem(i, 1, str(convertor(value)))
i = i + 1
text = ""
for res in getattr(tech, "researchReqSRes", [0]):
text += stratRes[res]
text += ", "
text = text[:-2]
if len(text) > 0:
self.list.InsertStringItem(i, "Strategic resource")
self.list.SetStringItem(i, 1, text)
if getattr(tech, "finishConstrHandler", None) != None or getattr(tech, "deployHandlerFunction", None) != None:
fceF = None
fceD = None
if getattr(tech, "finishConstrHandler", None) != None:
fceF = tech.finishConstrHandler.__name__
if getattr(tech, "deployHandlerFunction", None) != None:
fceD = tech.deployHandlerFunction.__name__
fce = None
if fceF != "noop":
fce = fceF
if fce == None and fceD != "noop":
fce = fceD
if fce != None and fce in dir(self.finishHandlers):
i = getattr(self.finishHandlers, fce)(i, tech)
if getattr(tech, "prodProdMod", None) != None:
b, m, e, d = tech.prodProdMod
self.prodCon.SetEnv(b)
self.prodCon.SetMineral(m)
self.prodCon.SetEnergy(e)
self.prodCon.SetNothing(d)
if getattr(tech, "prodBioMod", None) != None:
b, m, e, d = tech.prodBioMod
self.prodBio.SetEnv(b)
self.prodBio.SetMineral(m)
self.prodBio.SetEnergy(e)
self.prodBio.SetNothing(d)
if getattr(tech, "prodEnMod", None) != None:
b, m, e, d = tech.prodEnMod
self.prodEn.SetEnv(b)
self.prodEn.SetMineral(m)
self.prodEn.SetEnergy(e)
self.prodEn.SetNothing(d)
def addNode(self, parentNode, tech):
raceColours = {
"C": wx.RED,
"B": wx.NamedColour("MEDIUM FOREST GREEN"),
"H": wx.BLUE,
"HC": wx.NamedColour("BROWN"),
"CH": wx.NamedColour("BROWN"),
"BH": wx.NamedColour("GOLD"),
"HB": wx.NamedColour("GOLD"),
"BC": wx.NamedColour("LIGHT MAGENTA"),
"CB": wx.NamedColour("LIGHT MAGENTA")
}
dataItem = wx.TreeItemData(tech)
title = tech.name
colour = wx.BLACK
if len(tech.researchRaces) < 3 and self.viewRace:
title += " - " + tech.researchRaces
appendByRace = False
if self.showBionic and "B" in tech.researchRaces:
appendByRace |= True
if self.showHuman and "H" in tech.researchRaces:
appendByRace |= True
if self.showCyborg and "C" in tech.researchRaces:
appendByRace |= True
appendByLevel = False
if self.levels[tech.level]:
appendByLevel |= True
treeItem = None
parent = getParent(tech)
reqlvl = 0
if parent != None:
for lvl in range (1,6):
todo = parent.researchEnables[lvl]
for current in todo:
if 1000 in Rules.techs[current].researchReqSRes:
continue
if tech.id == Rules.techs[current].id:
reqlvl = lvl
break
if reqlvl == lvl:
break
if appendByRace and appendByLevel and parentNode != None:
if self.viewReqLevel:
itemText = "[%d]--" % reqlvl
else:
itemText = ""
if self.viewLevel:
itemText = "%s%s (%d)" % (itemText, title, tech.level)
else:
itemText = "%s%s" % (itemText, title)
treeItem = self.tree.AppendItem(parentNode, itemText, data = dataItem)
if tech.researchRaces in raceColours:
self.tree.SetItemTextColour(treeItem, raceColours[tech.researchRaces])
return treeItem
def AppendRootTechs(self, root):
for tl in range(1, 10):
for tech in Rules.techs.itervalues():
if tech.researchRequires or tl != tech.level or 1000 in tech.researchReqSRes:
continue
self.techs[tech] = self.addNode(root, tech)
if self.levels[99]:
self.level99Root = self.tree.AppendItem(root, "Level 99", data = None)
for tech in Rules.techs.itervalues():
if tech.researchRequires or 99 != tech.level or 1000 in tech.researchReqSRes:
continue
self.techs[tech] = self.addNode(self.level99Root, tech)
self.tree.SortChildren(self.level99Root)
self.tree.Expand(self.level99Root)
def AppendChildTechs(self, root):
for tech in Rules.techs.itervalues():
if tech.researchRequires:
continue
todo = getChildren(tech)
while todo:
current = Rules.techs[todo.pop(0)]
if 1000 in current.researchReqSRes:
continue
self.AppendChild(root, current)
todo.extend(getChildren(current))
def AppendChild(self, root, child):
parent = getParent(child)
self.techs[child] = self.addNode(self.techs[parent], child)
if self.techs[parent] != None:
self.tree.Expand(self.techs[parent])
self.tree.SortChildren(self.techs[parent])
config = Config("config.ini")
app = App(False)
app.MainLoop()
config.save("config.ini")
|
Lukc/ospace-lukc
|
client-TechViewer/TechViewer.py
|
Python
|
gpl-2.0
| 24,696
|
from xssnotifier.main import main
main()
|
mattiaslundberg/xssnotifier-server
|
run.py
|
Python
|
bsd-2-clause
| 42
|
import os
import sys
import unittest
sys.path.append(os.getcwd())
from parsing.file_parser import *
class FileParserTest(unittest.TestCase):
def test_parse_tape(self):
self.assertEqual(parse_tape_from_file('tape: (asd,a,asd)', 0),
['asd', 'a', 'asd'])
self.assertEqual(parse_tape_from_file('tape: (,a,asd)', 0),
['', 'a', 'asd'])
self.assertEqual(parse_tape_from_file('tape: (,,)', 0),
['', '', ''])
self.assertEqual(parse_tape_from_file('tape: (asd,a,)', 0),
['asd', 'a', ''])
self.assertEqual(parse_tape_from_file('tape: (ada#dsd,a,afdas)d)', 0),
['ada#dsd', 'a', 'afdas)d'])
with self.assertRaises(SyntacticError):
self.assertEqual(parse_tape_from_file('tpe: (asd,a,asd)', 0), '')
with self.assertRaises(SyntacticError):
self.assertEqual(parse_tape_from_file('tape: (asda,asd)', 0), '')
with self.assertRaises(SyntacticError):
self.assertEqual(parse_tape_from_file('tape: (asdaasd)', 0), '')
with self.assertRaises(SyntacticError):
self.assertEqual(parse_tape_from_file('tape: (asd,a,asd', 0), '')
with self.assertRaises(SyntacticError):
self.assertEqual(parse_tape_from_file('tape: asd,a,asd)', 0), '')
with self.assertRaises(SyntacticError):
self.assertEqual(parse_tape_from_file('tape (asd,a,asd)', 0), '')
with self.assertRaises(SyntacticError):
self.assertEqual(parse_tape_from_file('(tape asd,a,asd)', 0), '')
def test_parse_states(self):
self.assertEqual(parse_states_from_file('states: {1,2,3,4,5}', 0),
set([1, 2, 3, 4, 5]))
self.assertEqual(parse_states_from_file('states: {1}', 0), set([1]))
self.assertEqual(parse_states_from_file('states: {}', 0), set())
with self.assertRaises(SyntacticError):
self.assertEqual(parse_states_from_file('stes: {}', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_states_from_file('states {}', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_states_from_file('states: {1,2,3', 0), 0)
def test_parse_initial(self):
self.assertEqual(parse_initial_from_file('initial: 0', 0), 0)
self.assertEqual(parse_initial_from_file('initial: 123', 0), 123)
self.assertEqual(parse_initial_from_file('initial: 10', 0), 10)
self.assertEqual(parse_initial_from_file('initial: 00', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_initial_from_file('initial 0', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_initial_from_file('inial: 0', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_initial_from_file('inial:', 0), 0)
def test_parse_rule(self):
self.assertEqual(parse_rule_from_file('(0,b,a,b,0,Right)', 0),
[0, 'b', 'a', 'b', 0, 'Right'])
self.assertEqual(parse_rule_from_file('(0,_,_,_,1,None)', 0),
[0, '_', '_', '_', 1, 'None'])
self.assertEqual(parse_rule_from_file('(0,_,_,_,1,Left)', 0),
[0, '_', '_', '_', 1, 'Left'])
with self.assertRaises(SyntacticError):
self.assertEqual(parse_rule_from_file('(0,_,_,_,1,No)', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_rule_from_file('(0,_,_,_,None)', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_rule_from_file('(a,_,_,_,1,None)', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_rule_from_file('(0,_,_,_,b,None)', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_rule_from_file('(0,_,_,_,1,None', 0), 0)
def test_parse_accept_states(self):
self.assertEqual(parse_accept_states_from_file('accept: {1}', 0),
set([1]))
self.assertEqual(parse_accept_states_from_file('accept: {1}', 0),
set([1]))
self.assertEqual(parse_accept_states_from_file('accept: {}', 0),
set())
with self.assertRaises(SyntacticError):
self.assertEqual(parse_accept_states_from_file('acce: {}', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_accept_states_from_file('accept {}', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_accept_states_from_file('accept: {1,2,3',
0), 0)
def test_parse_reject_states(self):
self.assertEqual(parse_reject_states_from_file('reject: {1}', 0),
set([1]))
self.assertEqual(parse_reject_states_from_file('reject: {1}', 0),
set([1]))
self.assertEqual(parse_reject_states_from_file('reject: {}', 0), set())
with self.assertRaises(SyntacticError):
self.assertEqual(parse_reject_states_from_file('acce: {}', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_reject_states_from_file('reject {}', 0), 0)
with self.assertRaises(SyntacticError):
self.assertEqual(parse_reject_states_from_file('reject: {1,2,3',
0), 0)
if __name__ == '__main__':
unittest.main()
|
ytsvetkov/TuringMachine
|
unittests/test_file_parser.py
|
Python
|
gpl-3.0
| 5,658
|
from django.contrib import admin
from admin_views.admin import AdminViews
from django.shortcuts import redirect
from example_project.example_app.models import TestModel
class TestAdmin(AdminViews):
admin_views = (
('Process This', 'process'), # Admin view
('Go to LJW', 'http://www.ljworld.com'), # Direct URL
)
def process(self, *args, **kwargs):
return redirect('http://www.cnn.com')
admin.site.register(TestModel, TestAdmin)
|
frankwiles/django-admin-views
|
example_project/example_project/example_app/admin.py
|
Python
|
bsd-3-clause
| 490
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013- Yan Shoshitaishvili aka. zardus
# Ruoyu Wang aka. fish
# Andrew Dutcher aka. rhelmot
# Kevin Borgolte aka. cao
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Standard library imports
import logging
import os
import random
import shlex
import socket
import subprocess
import time
import warnings
LOG = logging.getLogger("idalink")
# Local imports
from .rpyc import classic as rpyc_classic
from .memory import CachedIDAMemory, CachedIDAPermissions
# Constants
MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
IDA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "support/")
LOGFILE = "/tmp/idalink-{port}.log"
def which(filename):
if '/' in filename:
if os.path.exists(filename) and \
os.access(filename, os.X_OK):
return filename
return None
path_entries = os.getenv('PATH').split(os.path.pathsep)
for entry in path_entries:
filepath = os.path.join(entry, filename)
if os.path.exists(filepath) and \
os.access(filepath, os.X_OK):
return filepath
return None
# Helper functions
def _ida_spawn(filename, ida_path, port, mode="oneshot",
processor_type="metapc"):
"""Internal helper function to open IDA on the the file we want to
analyse.
"""
ida_realpath = os.path.expanduser(ida_path)
file_realpath = os.path.realpath(os.path.expanduser(filename))
logfile = LOGFILE.format(port=port)
LOG.info("Launching IDA (%s) on %s, listening on port %d, logging to %s",
ida_realpath, file_realpath, port, logfile)
# :note: We run IDA through screen because otherwise its UI will hang.
# We also setup the environment for IDA.
# The other parameters are:
# -A Automatic mode
# -S Run a script (our server script)
# -L Log all output to our logfile
# -p Set the processor type
command_tpl = "screen -S idalink-{server_port} -d -m " \
"'{module_dir}/support/ida_env.sh' '{ida_path}' " \
"-M -A "\
"-S'{module_dir}/server.py {server_port} {server_mode}' " \
"-L'{logfile}' -p{processor} '{file}'"
command = shlex.split(command_tpl.format(module_dir=MODULE_DIR,
ida_path=ida_realpath,
server_port=port,
server_mode=mode,
logfile=logfile,
processor=processor_type,
file=file_realpath))
# :note: The above is a bit more Pythonic, if it breaks, use this:
# screen_name = "idalink-{}".format(port)
# screen = ["screen", "-S", screen_name, "-d", "-m", "-L", "--"]
#
# ida_env = "{}/support/ida_env.sh".format(MODULE_DIR)
# ida_options = ["-M", "-A",
# "-p{}".format(processor_type),
# "-S{}/server.py {}".format(MODULE_DIR, port), # idalink
# "-L{}".format(logfile)] # logfile
#
# ida = [ida_env, ida_realpath]
# command = screen + ida + ida_options + [file_realpath]
LOG.debug("IDA command is %s", " ".join(command))
subprocess.call(command)
def _ida_connect(port):
link = rpyc_classic.connect("localhost", port)
LOG.debug("Connected to port %d", port)
idc = link.root.getmodule("idc")
idaapi = link.root.getmodule("idaapi")
idautils = link.root.getmodule("idautils")
return link, idc, idaapi, idautils
class IDALinkError(Exception):
pass
class RemoteIDALink(object):
def __init__(self, filename):
self.filename = filename
self.link = None
self.idc = __import__('idc')
self.idaapi = __import__('idaapi')
self.idautils = __import__('idautils')
self.memory = CachedIDAMemory(self)
self.permissions = CachedIDAPermissions(self)
class IDALink(object):
def __init__(self, link, idc, idaapi, idautils, filename=None,
pull_memory=True):
self.filename = filename
self.link = link
self.idc = idc
self.idaapi = idaapi
self.idautils = idautils
self.remote_idalink_module = link.root.getmodule("idalink")
self.remote_link = self.remote_idalink_module.RemoteIDALink(filename)
self._memory = None
self.pull_memory = pull_memory
self._permissions = None
@property
def memory(self):
if self._memory is None:
self._memory = CachedIDAMemory(self)
if self.pull_memory:
self._memory.pull_defined()
return self._memory
@memory.deleter
def memory(self):
self._memory = None
@property
def permissions(self):
if self._permissions is None:
self._permissions = CachedIDAPermissions(self)
return self._permissions
@permissions.deleter
def permissions(self):
self._permissions = None
class idalink(object):
def __init__(self, filename, ida_prog, retries=60, port=None,
spawn=True, pull_memory=True, processor_type="metapc"):
if port is None:
port = random.randint(40000, 49999)
# TODO: check if port is in use
self._link = None
self._filename = os.path.realpath(os.path.join(os.getcwd(), filename))
self._retries = retries
self._port = port
self._pull_memory = pull_memory
progname = which(ida_prog)
if progname is None:
raise IDALinkError("Could not find executable %s" % ida_prog)
if spawn:
_ida_spawn(self._filename, progname, port, processor_type)
def __enter__(self):
for _ in range(self._retries):
# TODO: detect IDA failure intelligently
try:
time.sleep(1)
LOG.debug("Trying to connect to IDA on port %d", self._port)
self._link = IDALink(*_ida_connect(self._port),
filename=self._filename,
pull_memory=self._pull_memory)
break
except socket.error:
LOG.debug("... failed. Retrying.")
if self._link is None:
raise IDALinkError(("Failed to connect to IDA on port {} for "
"file {}").format(self._port, self._filename))
return self._link
def __exit__(self, type_, value, traceback):
try:
if self._link:
self._link.idc.Exit(0)
except EOFError:
LOG.warning("EOF on link socket, IDA might still be running!")
@property
def link(self):
"""Helper property to support the use of idalink without having to
use a with statement. This property will likely be deprecated and
might be removed at any point in the future.
"""
warnings.warn("link property is pending deprecation",
PendingDeprecationWarning)
if self._link is None:
self.__enter__()
return self._link
|
chubbymaggie/idalink
|
idalink/idalink.py
|
Python
|
gpl-3.0
| 8,058
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# <REGION compute_custom_machine_type_extra_mem_no_helper>
# <IMPORTS/>
# <INGREDIENT get_image_from_family />
# <INGREDIENT disk_from_image />
# <INGREDIENT create_instance />
# <INGREDIENT create_custom_instances_extra_mem />
# </REGION compute_custom_machine_type_extra_mem_no_helper>
|
googleapis/python-compute
|
samples/recipes/instances/custom_machine_types/extra_mem_no_helper.py
|
Python
|
apache-2.0
| 895
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from copy import copy
import os
import shutil
from StringIO import StringIO
import sys
import tempfile
import unittest2 as unittest
import uuid
from pkg_resources import resource_filename
from mock import Mock, patch
from pkg_resources import resource_filename
from xml.parsers.expat import ExpatError
# ParseError not present in xml module for python2.6
try:
from xml.etree.ElementTree import ParseError
except ImportError:
from xml.parsers.expat import ExpatError as ParseError
import nupic
import nupic.support.configuration_custom as configuration
import configuration_test
class ConfigurationCustomTest(unittest.TestCase):
def setUp(self):
if "NTA_DYNAMIC_CONF_DIR" in os.environ:
# Remove it to make sure our in-proc tests won't accidentally
# mess with actual files
oldNtaDynamicConfDir = os.environ["NTA_DYNAMIC_CONF_DIR"]
del os.environ["NTA_DYNAMIC_CONF_DIR"]
self.addCleanup(os.environ.update,
dict(NTA_DYNAMIC_CONF_DIR=oldNtaDynamicConfDir))
self.files = dict()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml-unittest'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-default.xml')) as inp:
fp.write(inp.read())
self.files['nupic-default.xml'] = fp.name
with open(os.path.join(tmpDir, 'nupic-site.xml-unittest'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-site.xml')) as inp:
fp.write(inp.read())
self.files['nupic-site.xml'] = fp.name
with open(os.path.join(tmpDir, 'nupic-custom.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-custom.xml')) as inp:
fp.write(inp.read())
self.files['nupic-custom.xml'] = fp.name
self.customParam = 'nupic.custom.hello'
self.customValue = 'world'
configuration.Configuration.clear()
####################################################################
# Custom Configuration Tests
# Todo: Share tests between two configuration_test files
####################################################################
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomFileCreated(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.setCustomProperty('param', 'val')
self.assertTrue(os.path.exists(self.files['nupic-custom.xml']))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGet(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>'+self.customParam+'</name>',
' <value>'+self.customValue+'</value>',
' </property>',
'</configuration>')))
self.assertEqual(
configuration.Configuration.get(self.customParam),
self.customValue)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetCustomProperty(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
configuration.Configuration.clear()
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>' + self.customParam + '</name>',
' <value>' + self.customValue + '</value>',
' </property>',
'</configuration>')))
configuration.Configuration.setCustomProperty('PersistProp', 'PersistVal')
self.assertEqual(
configuration.Configuration.get('PersistProp'),'PersistVal')
configuration.Configuration.clear()
self.assertEqual(
configuration.Configuration.get('PersistProp'),'PersistVal')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetCustomProperties(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>' + self.customParam + '</name>',
' <value>' + self.customValue + '</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
originalProps = copy(configuration.Configuration.dict())
configuration.Configuration.setCustomProperties(
{'PersistProp' : 'PersistVal', 'apple' : 'pear'})
expectedProps = {'PersistProp' : 'PersistVal', 'apple' : 'pear'}
expectedProps.update(originalProps)
self.assertEqual(configuration.Configuration.dict(), expectedProps)
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.dict(), expectedProps)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictWithTemp(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>param</name>',
' <value>value</value>',
' </property>',
' <property>',
' <name>param2</name>',
' <value>value2</value>',
' </property>',
'</configuration>')))
customDict = configuration.Configuration.dict()
self.assertTrue('param' in customDict)
self.assertTrue('param2' in customDict)
self.assertEqual(customDict['param'], 'value')
self.assertEqual(customDict['param2'], 'value2')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomConfigOverrides(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
paramNames = configuration.Configuration.dict().keys()
customValue = 'NewValue'
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>'+paramNames[0]+'</name>',
' <value>'+customValue+'</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.get(paramNames[0]), \
customValue)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomConfigDict(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>CustomParam</name>',
' <value>CustomValue</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.get('CustomParam'), \
'CustomValue')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testClearInvalidFile(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
configuration.Configuration.clear()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetInvalidFile(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
with patch('sys.stderr', new_callable=StringIO):
with self.assertRaises(RuntimeError) as cm:
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertIn("Expected top-level element to be 'configuration'",
cm.exception.args[0])
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join(('')))
with patch('sys.stderr', new_callable=StringIO):
with self.assertRaises(RuntimeError) as cm:
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertIn("File contents of custom configuration is corrupt.",
cm.exception.args[0])
# NTA_CONF_PATH is not being mocked out in this test, so we have to mock out
# findConfigFile to return the right path to the config file.
findConfigFile.return_value = self.files['nupic-custom.xml']
configuration.Configuration.resetCustomConfig()
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertEqual(configuration.Configuration.getCustomDict(), {'foo': 'value'})
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetCustomDict(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>CustomParam</name>',
' <value>CustomValue</value>',
' </property>',
'</configuration>')))
self.assertEqual(configuration.Configuration.getCustomDict(),
dict(CustomParam='CustomValue'))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetCustomDictNoFile(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.resetCustomConfig()
self.assertEqual(configuration.Configuration.getCustomDict(), dict())
del self.files['nupic-custom.xml']
###############################################
# Replicated Tests From configuration_test.py
###############################################
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetStringMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getString(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetString(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
result = configuration.Configuration.getString('foo')
self.assertEqual(result, 'bar')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBoolMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getBool(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBoolOutOfRangeRaisesValueError(self, findConfigFileMock,
environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foobool2', '2')
with self.assertRaises(ValueError):
configuration.Configuration.getBool('foobool2')
configuration.Configuration.set('fooboolneg1', '-1')
with self.assertRaises(ValueError):
configuration.Configuration.getBool('fooboolneg1')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBool(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foobool0', '0')
result = configuration.Configuration.getBool('foobool0')
self.assertEqual(result, False)
configuration.Configuration.set('foobool1', '1')
result = configuration.Configuration.getBool('foobool1')
self.assertEqual(result, True)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetIntMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getInt(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetInt(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('fooint', '-127')
result = configuration.Configuration.getInt('fooint')
self.assertEqual(result, -127)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFloatMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getFloat(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFloat(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foofloat', '-127.65')
result = configuration.Configuration.getFloat('foofloat')
self.assertEqual(result, -127.65)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetMissingReturnsNone(self, findConfigFile, environ):
findConfigFile.side_effect = self.files.get
environ.get.return_value = None
configuration.Configuration.clear()
result = configuration.Configuration.get(str(uuid.uuid4()))
self.assertTrue(result is None)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetAndGet(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
result = configuration.Configuration.get('foo')
self.assertTrue(result == 'bar')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDict(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
configuration.Configuration.set('apple', 'banana')
result = configuration.Configuration.dict()
self.assertTrue(isinstance(result, dict))
self.assertTrue('foo' in result)
self.assertTrue(result['foo'] == 'bar')
self.assertTrue('apple' in result)
self.assertTrue(result['apple'] == 'banana')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictReadsFilesFirstTime(self, findConfigFile,
environ): # pylint: disable=W0613
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
result = configuration.Configuration.dict()
self.assertTrue(isinstance(result, dict))
self.assertTrue(len(result) == 1, result)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictReplacesKeysFromEnvironment(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
key = str(uuid.uuid4())
env = {'NTA_CONF_PROP_' + key: 'foo'}
environ.keys.side_effect = env.keys
environ.__getitem__.side_effect = env.__getitem__
result = configuration.Configuration.dict()
self.assertTrue(key in result)
self.assertTrue(result[key] == 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testClear(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
configuration.Configuration.set('apple', 'banana')
self.assertTrue(configuration.Configuration.get('foo') == 'bar')
self.assertTrue(configuration.Configuration.get('apple') == 'banana')
configuration.Configuration.clear()
self.assertTrue(configuration.Configuration.get('foo') is None)
self.assertTrue(configuration.Configuration.get('apple') is None)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFromEnvironment(self, findConfigFile, environ):
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
key = str(uuid.uuid4())
environ.get.side_effect = {'NTA_CONF_PROP_' + key: 'foo'}.get
self.assertTrue(configuration.Configuration.get(key) == 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileFromPath(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
prefix, _, filename = self.files['nupic-default.xml'].rpartition(os.sep)
configuration.Configuration.readConfigFile(filename, prefix)
self.assertTrue(configuration.Configuration.get('dummy') == 'dummy value')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileUnexpectedElementAtRoot(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingDocumentRoot(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises((ExpatError, ParseError), configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingNonPropertyConfigurationChildren(
self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <foo>bar<baz/></foo>',
'</configuration>')))
self.assertEqual(configuration.Configuration.dict(), \
dict(dummy='dummy value'))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEmptyValue(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(Exception, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEmptyNameAndValue(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name></name>',
' <value></value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingEnvVars(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.foo}</value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMalformedEnvReference(self, findConfigFile,
environ): # pylint: disable=W0613
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.foo</value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEnvironmentOverride(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.NTA_CONF_PROP_foo}</value>',
' </property>',
'</configuration>')))
env = {'NTA_CONF_PROP_foo': 'bar'}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
result = configuration.Configuration.get('foo')
self.assertEqual(result, 'bar')
@patch.object(configuration.Configuration, 'getConfigPaths',
spec=configuration.Configuration.getConfigPaths)
def testFindConfigFile(self, getConfigPaths):
prefix, _, filename = self.files['nupic-default.xml'].rpartition(os.sep)
def replacePaths(**_):
return [prefix]
getConfigPaths.side_effect = replacePaths
configuration.Configuration.clear()
result = configuration.Configuration.findConfigFile(filename)
self.assertTrue(result == self.files['nupic-default.xml'])
getConfigPaths.assert_called_with()
@patch.object(configuration.Configuration, 'getConfigPaths',
spec=configuration.Configuration.getConfigPaths)
def testFindConfigFileReturnsNoneForMissingFile(self, getConfigPaths):
prefix, _, _ = self.files['nupic-default.xml'].rpartition(os.sep)
def replacePaths(**_):
return [prefix]
getConfigPaths.side_effect = replacePaths
configuration.Configuration.clear()
result = configuration.Configuration.findConfigFile(str(uuid.uuid4()))
self.assertTrue(result is None)
getConfigPaths.assert_called_with()
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPaths(
self, environ, configPaths): # pylint: disable=W0613
result = configuration.Configuration.getConfigPaths()
self.assertEqual(result, configPaths)
@unittest.skip('NUP-2081')
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPathsForNone(
self, environ, configPaths): # pylint: disable=W0613
configuration.Configuration._configPaths = None # pylint: disable=W0212
result = configuration.Configuration.getConfigPaths()
self.assertTrue(isinstance(result, list))
self.assertListEqual(result, [resource_filename("nupic",
os.path.join("config", "default"))])
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPathsForNoneWithNTA_CONF_PATHInEnv(
self, environ, configPaths): # pylint: disable=W0613
configuration.Configuration._configPaths = None # pylint: disable=W0212
env = {'NTA_CONF_PATH': ''}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
result = configuration.Configuration.getConfigPaths()
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
self.assertEqual(result[0], env['NTA_CONF_PATH'])
def testSetConfigPathsForNoneWithNTA_CONF_PATHInEnv(self):
paths = [Mock()]
configuration.Configuration.setConfigPaths(paths)
self.assertEqual(
paths,
configuration.Configuration._configPaths) # pylint: disable=W0212
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testEmptyGetCustomDict(self, findConfigFile, environMock):
findConfigFile.side_effect = self.files.get
environMock.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
configuration.Configuration.resetCustomConfig()
self.assertEqual(configuration.Configuration.getCustomDict(), dict())
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testConfiguration(self, findConfigFile, environ):
configuration.Configuration.clear()
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-default.xml'], 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(self.files['nupic-site.xml'], 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
env = {'USER': 'foo', 'HOME': 'bar'}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
environ.keys.side_effect = env.keys
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(configuration.Configuration.get('database.emptypassword'),
'')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'), 'root')
expectedValue = 'foo'
actualValue = configuration.Configuration.get(
'var.environment.standalone.user')
self.assertTrue(actualValue == expectedValue,
"expected {0!r}, but got {1!r}".format(expectedValue, actualValue))
expectedValue = "The user " + os.environ['USER'] + " rocks!"
actualValue = configuration.Configuration.get(
'var.environment.user.in.the.middle')
self.assertTrue(actualValue == expectedValue,
"expected {0!r}, but got {1!r}".format(expectedValue, actualValue))
expectedValue = ("User " + os.environ['USER'] + " and home " +
os.environ['HOME'] + " in the middle")
actualValue = configuration.Configuration.get(
'var.environment.user.and.home.in.the.middle')
self.assertTrue(actualValue == expectedValue,
"expected {0!r}, but got {1!r}".format(expectedValue, actualValue))
env['NTA_CONF_PROP_database_host'] = 'FooBar'
self.assertEqual(configuration.Configuration.get('database.host'), 'FooBar')
allProps = configuration.Configuration.dict()
self.assertTrue(allProps['database.host'] == 'FooBar')
del env['NTA_CONF_PROP_database_host']
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'), 'matrix')
@patch.object(configuration.os, 'environ', spec=dict)
def testConfiguration2(self, environ):
configuration.Configuration.clear()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(os.path.join(tmpDir, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
env = {
'USER': 'foo',
'HOME': 'bar',
'NTA_CONF_PATH': tmpDir
}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
environ.keys.side_effect = env.keys
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(
configuration.Configuration.get('database.emptypassword'), '')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'), 'root')
expectedValue = 'foo'
actualValue = configuration.Configuration.get(
'var.environment.standalone.user')
self.assertEqual(actualValue, expectedValue,
"expected {0!r}, but got {1!r}".format(expectedValue, actualValue))
expectedValue = "The user " + os.environ['USER'] + " rocks!"
actualValue = configuration.Configuration.get(
'var.environment.user.in.the.middle')
self.assertEqual(actualValue, expectedValue,
"expected {0!r}, but got {1!r}".format(expectedValue, actualValue))
expectedValue = ("User " + os.environ['USER'] + " and home " +
os.environ['HOME'] + " in the middle")
actualValue = configuration.Configuration.get(
'var.environment.user.and.home.in.the.middle')
self.assertEqual(actualValue, expectedValue,
"expected {0!r}, but got {1!r}".format(expectedValue, actualValue))
env['NTA_CONF_PROP_database_host'] = 'FooBar'
self.assertEqual(configuration.Configuration.get('database.host'),
'FooBar')
allProps = configuration.Configuration.dict()
self.assertEqual(allProps['database.host'], 'FooBar')
del env['NTA_CONF_PROP_database_host']
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'),
'matrix')
configuration.Configuration.clear()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(os.path.join(tmpDir, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
tmpDir2 = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir2)
with open(os.path.join(tmpDir2, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile3.xml')) as inp:
fp.write(inp.read())
env['NTA_CONF_PATH'] = os.pathsep.join([tmpDir, tmpDir2])
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(
configuration.Configuration.get('database.emptypassword'), '')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'),
'root')
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'),
'matrix')
if __name__ == '__main__':
unittest.main(argv=[sys.argv[0], "--verbose"] + sys.argv[1:])
|
runt18/nupic
|
tests/unit/nupic/support/custom_configuration_test.py
|
Python
|
agpl-3.0
| 42,200
|
from . version import __version__
|
ksomemo/ciserviceex
|
ciserviceex/__init__.py
|
Python
|
mit
| 35
|
# coding: utf-8
from typing import Union, Callable
from .path import Path
class Protection(Path):
"""Useful to protect samples from being pulled apart when setting up a test.
"""
def __init__(self,
time: float,
cmd: float,
condition1: Union[str, bool, Callable],
condition2: Union[str, bool, Callable],
value1: float,
value2: float,
value0: float = 0,
verbose: bool = False) -> None:
"""Sets the args and initializes parent class.
Args:
time:
cmd:
condition1 (:obj:`str`): Representing the first condition. See
:ref:`generator path` for more info.
condition2 (:obj:`str`): Representing the second condition. See
:ref:`generator path` for more info.
value1: Value to send when ``condition1`` is met.
value2: Value to send when ``condition2`` is met.
value0: Value to send when no condition is reached.
verbose:
Note:
By default will send ``value0``.
While ``condition1`` is met, will return ``value1``.
While ``condition2`` is met, will return ``value2``.
If ``condition1`` and ``condition2`` are met simultaneously, the first
one met will prevail. If met at the same time, ``condition1`` will
prevail.
"""
Path.__init__(self, time, cmd)
self.value = (value0, value1, value2)
self.condition1 = self.parse_condition(condition1)
self.condition2 = self.parse_condition(condition2)
s = '<' if '<' in condition1 else '>'
self.lbl1 = condition1.split(s)[0]
s = '<' if '<' in condition2 else '>'
self.lbl2 = condition2.split(s)[0]
self.verbose = verbose
self.status = 0
def get_cmd(self, data: dict) -> float:
if self.status == 0:
if self.condition1(data):
self.status = 1
elif self.condition2(data):
self.status = 2
return self.value[self.status]
if self.status == 1 and data[self.lbl1] and not self.condition1(data):
self.status = 0
elif self.status == 2 and data[self.lbl2] and not self.condition2(data):
self.status = 0
return self.value[self.status]
|
LaboratoireMecaniqueLille/crappy
|
crappy/blocks/generator_path/protection.py
|
Python
|
gpl-2.0
| 2,204
|
import argparse
import pandas as pd
from data_utils import clean_tweet
def main():
from data_utils import RUBTSOVA_HEADER
parser = argparse.ArgumentParser()
parser.add_argument('files', metavar='F',
nargs='+',
help='CSV files to process',
type=argparse.FileType('r'))
parser.add_argument('-o', '--output',
help='Output file',
type=argparse.FileType('w+'),
required=True)
args = parser.parse_args()
for f in args.files:
df = pd.read_csv(f, header=None, sep=';', index_col=False, names=RUBTSOVA_HEADER)
for tweet in df['ttext']:
text = clean_tweet(tweet)
args.output.write(text + ' ')
if __name__ == "__main__":
main()
|
tech-team/sentiment
|
preprocess/rubtsova_csv_to_corpus.py
|
Python
|
mit
| 839
|
from __future__ import print_function
from gocd_cli.command import BaseCommand
from gocd_cli.utils import get_settings
__all__ = ['Decrypt', 'Encrypt']
class BaseEncryptionCommand(object):
_encryption_module = None
_settings = None
@property
def settings(self):
if self._settings is None:
self._settings = get_settings()
return self._settings
@property
def encryption_module(self):
if self._encryption_module is None:
mod = self.settings.get('encryption_module') or 'gocd_cli.encryption.caesar'
self._encryption_module = __import__(mod, fromlist=('',))
return self._encryption_module
class Encrypt(BaseCommand, BaseEncryptionCommand):
usage = """One of either plaintext or key can be passed in
Flags:
plaintext: A string to encrypt
key: A configuration key from the settings file
"""
usage_summary = 'Encrypts the passed in plaintext or key to ciphertext'
def __init__(self, server, plaintext=None, key=None):
self.server = server
self._plaintext = plaintext
self._key = key
@property
def plaintext(self):
if self._key:
return self.settings.get(self._key)
else:
return self._plaintext
def label(self):
if self._key:
return '{0}_encrypted'.format(self._key.replace('_encrypted', ''))
else:
return 'Ciphertext'
def run(self):
ciphertext = self.encryption_module.encrypt(self.plaintext)
return self._return_value('{0}\n{1}'.format(
'encryption_module = {0}'.format(self.encryption_module.__name__),
'{0} = {1}'.format(self.label(), ciphertext)
), exit_code=0)
class Decrypt(BaseCommand, BaseEncryptionCommand):
usage = """One of either ciphertext or key can be passed in
Flags:
ciphertext: A string to decrypt
key: A configuration key from the settings file
"""
usage_summary = 'Decrypts the passed in ciphertext or key to plaintext'
def __init__(self, server, ciphertext=None, key=None):
self.server = server
self._ciphertext = ciphertext
self._key = key
@property
def ciphertext(self):
if self._key:
return self.settings.get(self._key)
else:
return self._ciphertext
def label(self):
if self._key:
return self._key.replace('_encrypted', '')
else:
return 'Plaintext'
def run(self):
plaintext = self.encryption_module.decrypt(self.ciphertext)
return self._return_value('{0}\n{1}'.format(
'encryption_module = {0}'.format(self.encryption_module.__name__),
'{0} = {1}'.format(self.label(), plaintext),
), exit_code=0)
|
gaqzi/py-gocd-cli
|
gocd_cli/commands/settings.py
|
Python
|
mit
| 2,834
|
#!/usr/bin/env python
# Standard packages
import sys
import cyvcf2
import argparse
import geneimpacts
from cyvcf2 import VCF
def get_effects(variant, annotation_keys):
effects = []
effects += [geneimpacts.SnpEff(e, annotation_keys) for e in variant.INFO.get("ANN").split(",")]
return effects
def get_top_impact(effects):
top_impact = geneimpacts.Effect.top_severity(effects)
if isinstance(top_impact, list):
top_impact = top_impact[0]
return top_impact
def get_genes(effects):
genes_list = []
for effect in effects:
if effect.gene not in genes_list:
genes_list.append(effect.gene)
return genes_list
def get_transcript_effects(effects):
transcript_effects = dict()
for effect in effects:
if effect.transcript is not None:
transcript_effects[effect.transcript] = "{biotype}|{effect}".format(biotype=effect.biotype,
effect=effect.impact_severity)
return transcript_effects
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--annotated_vcf', help="snpEff annotated VCF file to scan")
parser.add_argument('-o', '--output', help="File for output information")
args = parser.parse_args()
sys.stdout.write("Parsing VCFAnno VCF with CyVCF2\n")
reader = cyvcf2.VCFReader(args.annotated_vcf)
desc = reader["ANN"]["Description"]
annotation_keys = [x.strip("\"'") for x in re.split("\s*\|\s*", desc.split(":", 1)[1].strip('" '))]
sys.stdout.write("Parsing VCFAnno VCF\n")
vcf = VCF(args.annotated_vcf)
for variant in vcf:
effects = get_effects(variant, annotation_keys)
top_impact = get_top_impact(effects)
gene_effects = dict()
for effect in effects:
if effect.gene not in gene_effects.keys():
if effect.transcript is not None:
|
GastonLab/ddb-scripts
|
specialist/scan_multi-gene_annotated_snpEff.py
|
Python
|
mit
| 1,956
|
from moksha.api.streams import PollingDataStream
from jqplotdemo.controllers.plots import get_plot_data, make_data, get_pie_data
class JQPlotDemoStream(PollingDataStream):
frequency = 1.0
def poll(self):
self.send_message('jqplot.demo.plot', get_plot_data())
self.send_message('jqplot.demo.pie', get_pie_data())
|
ralphbean/moksha
|
moksha/apps/demo/MokshaJQPlotDemo/jqplotdemo/streams.py
|
Python
|
apache-2.0
| 338
|
#!/usr/bin/env python
"""Web authentication classes for the GUI."""
import collections
from django import http
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import log
from grr.lib import registry
class BaseWebAuthManager(object):
"""A class managing web authentication.
This class is responsible for deciding if the user will have access to the web
interface and for generating the token that will be passed to the functions
that deal with data.
Checks are done using a decorator function.
"""
__metaclass__ = registry.MetaclassRegistry
def SecurityCheck(self, func, request, *args, **kwargs):
"""A decorator applied to protected web handlers.
Args:
func: The wrapped function to call.
request: The web request.
Returns:
A django http response object.
This will get called for all requests that get passed through one of our
handlers that is wrapped in @SecurityCheck.
"""
def RedirectBase(self):
"""Return a redirect to the main GRR page."""
return http.HttpResponsePermanentRedirect(config_lib.CONFIG["AdminUI.url"])
class BasicWebAuthManager(BaseWebAuthManager):
"""Manager using basic auth using the config file."""
def __init__(self):
"""Constructor."""
# Reuse the basic ACL manager functions for accessing the config.
self._aclmanager = access_control.BasicAccessControlManager()
super(BasicWebAuthManager, self).__init__()
def SecurityCheck(self, func, request, *args, **kwargs):
"""Wrapping function."""
event_id = log.LOGGER.GetNewEventId()
# Modify request adding an event_id attribute to track the event
request.event_id = event_id
request.user = ""
authorized = False
try:
auth_type, authorization = request.META.get(
"HTTP_AUTHORIZATION", " ").split(" ", 1)
if auth_type == "Basic":
user, password = authorization.decode("base64").split(":", 1)
# Check the hash is ok
auth_obj = collections.namedtuple("AuthObj", "user_provided_hash")
auth_obj.user_provided_hash = password
if self._aclmanager.user_manager.CheckUserAuth(user, auth_obj):
authorized = True
# The password is ok - update the user
request.user = user
except (IndexError, KeyError):
pass
if not authorized:
result = http.HttpResponse("Unauthorized", status=401)
result["WWW-Authenticate"] = "Basic realm='Secure Area'"
return result
# Modify this to implement additional checking (e.g. enforce SSL).
response = func(request, *args, **kwargs)
return response
class NullWebAuthManager(BaseWebAuthManager):
"""Null web auth manager always returns test user unless set."""
def __init__(self, *args, **kwargs):
super(NullWebAuthManager, self).__init__(*args, **kwargs)
self.username = "test"
def SetUserName(self, username):
self.username = username
def SecurityCheck(self, func, request, *args, **kwargs):
"""A decorator applied to protected web handlers."""
request.event_id = "1"
request.user = self.username
request.token = access_control.ACLToken("Testing", "Just a test")
return func(request, *args, **kwargs)
# Global to store the configured web auth manager.
WEBAUTH_MANAGER = None
def SecurityCheck(func):
"""A decorator applied to protected web handlers."""
def Wrapper(request, *args, **kwargs):
"""Wrapping function."""
if WEBAUTH_MANAGER is None:
raise RuntimeError("Attempt to initialize before WEBAUTH_MANAGER set.")
return WEBAUTH_MANAGER.SecurityCheck(func, request, *args, **kwargs)
return Wrapper
class WebAuthInit(registry.InitHook):
pre = ["GuiPluginsInit"]
def RunOnce(self):
"""Run this once on init."""
global WEBAUTH_MANAGER # pylint: disable=global-statement
# pylint: disable=g-bad-name
WEBAUTH_MANAGER = BaseWebAuthManager.NewPlugin(
config_lib.CONFIG["AdminUI.webauth_manager"])()
# pylint: enable=g-bad-name
logging.info("Using webauth manager %s", WEBAUTH_MANAGER)
|
MiniSEC/GRR_clone
|
gui/webauth.py
|
Python
|
apache-2.0
| 4,111
|
#! /usr/bin/env python
"""
Created on Thu Jun 29 11:02:28 2017
@author: njlyons
"""
from matplotlib.backend_bases import Event
from matplotlib.pyplot import gcf
from numpy.testing import assert_equal
from landlab import RasterModelGrid, imshow_grid
from landlab.plot.event_handler import query_grid_on_button_press
def test_query_grid_on_button_press():
rmg = RasterModelGrid((5, 5))
imshow_grid(rmg, rmg.nodes, cmap="RdYlBu")
# Programmatically create an event near the grid center.
event = Event("simulated_event", gcf().canvas)
event.xdata = int(rmg.number_of_node_columns * 0.5)
event.ydata = int(rmg.number_of_node_rows * 0.5)
results = query_grid_on_button_press(event, rmg)
x_coord = results["grid location"]["x_coord"]
y_coord = results["grid location"]["x_coord"]
msg = "Items: Simulated matplotlib event and query results."
assert_equal(x_coord, event.xdata, msg)
assert_equal(y_coord, event.ydata, msg)
msg = "Items: Node ID and grid coordinates of simulated matplotlib event."
node = rmg.grid_coords_to_node_id(event.xdata, event.ydata)
assert_equal(results["node"]["ID"], node, msg)
|
landlab/landlab
|
tests/plot/test_event_handler.py
|
Python
|
mit
| 1,165
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'WikiPage.rating_sum'
db.add_column(u'mezzanine_wiki_wikipage', u'rating_sum',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'WikiPage.created'
db.add_column(u'mezzanine_wiki_wikipage', 'created',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'WikiPage.updated'
db.add_column(u'mezzanine_wiki_wikipage', 'updated',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'WikiPage.in_sitemap'
db.add_column(u'mezzanine_wiki_wikipage', 'in_sitemap',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'WikiPage.rating_sum'
db.delete_column(u'mezzanine_wiki_wikipage', u'rating_sum')
# Deleting field 'WikiPage.created'
db.delete_column(u'mezzanine_wiki_wikipage', 'created')
# Deleting field 'WikiPage.updated'
db.delete_column(u'mezzanine_wiki_wikipage', 'updated')
# Deleting field 'WikiPage.in_sitemap'
db.delete_column(u'mezzanine_wiki_wikipage', 'in_sitemap')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'mezzanine_wiki.wikicategory': {
'Meta': {'object_name': 'WikiCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'mezzanine_wiki.wikipage': {
'Meta': {'ordering': "('title',)", 'object_name': 'WikiPage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'wikipages'", 'blank': 'True', 'to': u"orm['mezzanine_wiki.WikiCategory']"}),
u'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine_wiki.fields.WikiTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'wikipages'", 'to': u"orm['auth.User']"})
},
u'mezzanine_wiki.wikipagerevision': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'WikiPageRevision'},
'content': ('mezzanine_wiki.fields.WikiTextField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mezzanine_wiki.WikiPage']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'wikipagerevisions'", 'to': u"orm['auth.User']"})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['mezzanine_wiki']
|
dfalk/mezzanine-wiki
|
mezzanine_wiki/migrations/0006_auto__add_field_wikipage_rating_sum__add_field_wikipage_created__add_f.py
|
Python
|
bsd-2-clause
| 9,474
|
# Lesson 1
def iterative_factorial(n):
result = 1
for i in range(1, n + 1):
result *= i
return result
print(iterative_factorial(5))
# "Recursion" is the act of calling a function from within itself. Some functions naturally work better recursively.
# Recursion is easier to work out if you write it down on paper.
def factorial(n):
if n > 1:
return n * factorial(n - 1)
else: # Recursive functions generally have a "base case", from which they stop recursing.
return 1
# NOTE: It's helpful to draw out the "frames" of each function call
print(factorial(5))
|
JonTheBurger/python_class
|
chapter 4/lessons/recursion.py
|
Python
|
mit
| 609
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from winsys._compat import unittest
import uuid
import winerror
import win32api
import win32con
import win32evtlog
import win32security
import pywintypes
from winsys.tests import utils as testutils
from winsys import event_logs, registry, utils
LOG_NAME = event_logs.DEFAULT_LOG_NAME
GUID = "_winsys-%s" % uuid.uuid1()
#
# Utility functions
#
def yield_logs(computer=None, log_name=LOG_NAME):
hLog = win32evtlog.OpenEventLog(computer, log_name)
try:
while True:
entries = win32evtlog.ReadEventLog(
hLog,
win32evtlog.EVENTLOG_BACKWARDS_READ | win32evtlog.EVENTLOG_SEQUENTIAL_READ,
0
)
if entries:
for entry in entries:
yield entry
else:
break
finally:
win32evtlog.CloseEventLog(hLog)
#
# TESTS
#
@unittest.skipUnless(testutils.i_am_admin(), "These tests must be run as Administrator")
class TestEventLogs(unittest.TestCase):
#
# Fixtures
#
def setUp(self):
event_logs.EventSource.create(GUID, LOG_NAME)
self.registry_root = registry.registry(r"HKLM\SYSTEM\CurrentControlSet\Services\Eventlog")
def tearDown(self):
event_logs.event_source(r"%s\%s" %(LOG_NAME, GUID)).delete()
#
# Event Source
#
def test_create_source(self):
log_name = "System"
guid = "_winsys-test_create_source-%s" % uuid.uuid1()
try:
source = event_logs.EventSource.create(guid, log_name)
self.assertTrue(self.registry_root + log_name + guid)
except:
raise
else:
source.delete()
self.assertFalse(bool(self.registry_root + log_name + guid))
def test_create_source_at_default(self):
guid = "_winsys-test_create_source_at_default-%s" % uuid.uuid1()
try:
source = event_logs.EventSource.create(guid)
self.assertTrue(self.registry_root + event_logs.DEFAULT_LOG_NAME + guid)
except:
raise
else:
source.delete()
self.assertFalse(bool(self.registry_root + event_logs.DEFAULT_LOG_NAME + guid))
def test_event_sources(self):
log_name = "System"
self.assertEqual(
set(s.name for s in event_logs.event_sources(log_name)),
set(r.name for r in self.registry_root + log_name)
)
self.assertTrue(all(isinstance(s, event_logs.EventSource) for s in event_logs.event_sources(log_name)))
def test_event_source_from_event_source(self):
for s in event_logs.event_sources():
self.assertTrue(isinstance(s, event_logs.EventSource))
self.assertTrue(event_logs.event_source(s) is s)
break
def test_event_source_from_none(self):
self.assertTrue(event_logs.event_source(None) is None)
def test_event_source_from_bad_string(self):
with self.assertRaises(event_logs.x_event_logs):
event_logs.event_source("")
def test_event_source_from_good_string(self):
self.assertTrue(
isinstance(
event_logs.event_source(r"%s\%s" %(LOG_NAME, GUID)),
event_logs.EventSource
)
)
def test_event_source_from_good_string_default_log(self):
self.assertTrue(
isinstance(
event_logs.event_source(GUID),
event_logs.EventSource
)
)
def test_event_source_as_string(self):
self.assertTrue(event_logs.event_source(GUID).as_string())
def test_event_source_log_event(self):
data = str(GUID).encode("utf8")
event_logs.event_source(GUID).log_event(data=data)
for event in yield_logs():
if event.SourceName == GUID and event.Data == data:
self.assertTrue(True)
break
else:
self.assertTrue(False)
#
# Event logs
#
def test_event_logs(self):
self.assertEqual(
set(s.name for s in event_logs.event_logs()),
set(r.name for r in self.registry_root.keys())
)
self.assertTrue(all(isinstance(s, event_logs.EventLog) for s in event_logs.event_logs()))
def test_event_log_from_event_log(self):
for l in event_logs.event_logs():
self.assertTrue(isinstance(l, event_logs.EventLog))
self.assertTrue(event_logs.event_log(l) is l)
break
def test_event_log_from_none(self):
self.assertTrue(event_logs.event_log(None) is None)
def test_event_log_from_bad_string(self):
with self.assertRaises(event_logs.x_event_logs):
event_logs.event_log ("")
def test_event_log_from_good_string(self):
self.assertTrue(
isinstance(
event_logs.event_log(LOG_NAME),
event_logs.EventLog
)
)
def test_event_log_clear_no_save(self):
log_name = "Internet Explorer"
source_name = "_winsys-%s" % uuid.uuid1()
source = event_logs.EventSource.create(source_name, log_name)
log = event_logs.event_log(log_name)
hLog = win32evtlog.OpenEventLog(None, log_name)
try:
log.log_event(source, message="hello")
self.assertNotEquals(win32evtlog.GetNumberOfEventLogRecords(hLog), 0)
log.clear()
self.assertEqual(win32evtlog.GetNumberOfEventLogRecords(hLog), 0)
finally:
win32evtlog.CloseEventLog(hLog)
source.delete()
def test_event_log_clear_with_save(self):
log_name = "Internet Explorer"
source_name = "_winsys-%s" % uuid.uuid1()
source = event_logs.EventSource.create(source_name, log_name)
log = event_logs.event_log(log_name)
hLog = win32evtlog.OpenEventLog(None, log_name)
try:
log.log_event(source, message="hello")
self.assertNotEquals(win32evtlog.GetNumberOfEventLogRecords(hLog), 0)
log.clear()
self.assertEqual(win32evtlog.GetNumberOfEventLogRecords(hLog), 0)
finally:
win32evtlog.CloseEventLog(hLog)
source.delete()
#
# Module-level functions
#
def test_log_event(self):
data = str(GUID).encode("utf8")
event_logs.log_event("%s\\%s" %(LOG_NAME, GUID), data=data)
for event in yield_logs():
if event.SourceName == GUID and event.Data == data:
self.assertTrue(True)
break
else:
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
if sys.stdout.isatty(): raw_input("Press enter...")
|
operepo/ope
|
laptop_credential/winsys/tests/test_event_logs.py
|
Python
|
mit
| 6,794
|
DEBUG = True
TOKEN_SECRET = 'keyboard cat'
FACEBOOK_SECRET = 'Facebook Client Secret'
FOURSQUARE_SECRET = 'Foursquare Client Secret'
GOOGLE_SECRET = 'Google Client Secret'
LINKEDIN_SECRET = 'LinkedIn Client Secret'
TWITTER_CONSUMER_KEY = 'Twitter Consumer Secret'
TWITTER_CONSUMER_SECRET = 'Twitter Consumer Secret'
TWITTER_CALLBACK_URL = 'http://localhost:3000'
SQLALCHEMY_DATABASE_URI = 'sqlite:///app.db'
|
timsvoice/satellizer
|
examples/server/python/config.py
|
Python
|
mit
| 407
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
policy_data = """
{
"admin_api": "is_admin:True",
"cells_scheduler_filter:TargetCellFilter": "is_admin:True",
"context_is_admin": "role:admin or role:administrator",
"compute:create": "",
"compute:create:attach_network": "",
"compute:create:attach_volume": "",
"compute:get": "",
"compute:get_all": "",
"compute:get_all_tenants": "",
"compute:update": "",
"compute:get_instance_metadata": "",
"compute:get_all_instance_metadata": "",
"compute:get_all_instance_system_metadata": "",
"compute:update_instance_metadata": "",
"compute:delete_instance_metadata": "",
"compute:get_instance_faults": "",
"compute:get_diagnostics": "",
"compute:get_instance_diagnostics": "",
"compute:get_lock": "",
"compute:lock": "",
"compute:unlock": "",
"compute:unlock_override": "is_admin:True",
"compute:get_vnc_console": "",
"compute:get_spice_console": "",
"compute:get_rdp_console": "",
"compute:get_serial_console": "",
"compute:get_console_output": "",
"compute:associate_floating_ip": "",
"compute:reset_network": "",
"compute:inject_network_info": "",
"compute:add_fixed_ip": "",
"compute:remove_fixed_ip": "",
"compute:attach_volume": "",
"compute:detach_volume": "",
"compute:attach_interface": "",
"compute:detach_interface": "",
"compute:set_admin_password": "",
"compute:rescue": "",
"compute:unrescue": "",
"compute:suspend": "",
"compute:resume": "",
"compute:pause": "",
"compute:unpause": "",
"compute:start": "",
"compute:stop": "",
"compute:resize": "",
"compute:confirm_resize": "",
"compute:revert_resize": "",
"compute:rebuild": "",
"compute:reboot": "",
"compute:snapshot": "",
"compute:backup": "",
"compute:shelve": "",
"compute:shelve_offload": "",
"compute:unshelve": "",
"compute:security_groups:add_to_instance": "",
"compute:security_groups:remove_from_instance": "",
"compute:delete": "",
"compute:soft_delete": "",
"compute:force_delete": "",
"compute:restore": "",
"compute:swap_volume": "",
"compute:volume_snapshot_create": "",
"compute:volume_snapshot_delete": "",
"compute:v3:servers:start": "",
"compute:v3:servers:stop": "",
"compute_extension:v3:os-access-ips": "",
"compute_extension:accounts": "",
"compute_extension:admin_actions:pause": "",
"compute_extension:admin_actions:unpause": "",
"compute_extension:admin_actions:suspend": "",
"compute_extension:admin_actions:resume": "",
"compute_extension:admin_actions:lock": "",
"compute_extension:admin_actions:unlock": "",
"compute_extension:admin_actions:resetNetwork": "",
"compute_extension:admin_actions:injectNetworkInfo": "",
"compute_extension:admin_actions:createBackup": "",
"compute_extension:admin_actions:migrateLive": "",
"compute_extension:admin_actions:resetState": "",
"compute_extension:admin_actions:migrate": "",
"compute_extension:v3:os-admin-actions:reset_network": "",
"compute_extension:v3:os-admin-actions:inject_network_info": "",
"compute_extension:v3:os-admin-actions:reset_state": "",
"compute_extension:v3:os-admin-password": "",
"compute_extension:aggregates": "rule:admin_api",
"compute_extension:v3:os-aggregates:index": "rule:admin_api",
"compute_extension:v3:os-aggregates:create": "rule:admin_api",
"compute_extension:v3:os-aggregates:show": "rule:admin_api",
"compute_extension:v3:os-aggregates:update": "rule:admin_api",
"compute_extension:v3:os-aggregates:delete": "rule:admin_api",
"compute_extension:v3:os-aggregates:add_host": "rule:admin_api",
"compute_extension:v3:os-aggregates:remove_host": "rule:admin_api",
"compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api",
"compute_extension:agents": "",
"compute_extension:v3:os-agents": "",
"compute_extension:attach_interfaces": "",
"compute_extension:v3:os-attach-interfaces": "",
"compute_extension:baremetal_nodes": "",
"compute_extension:cells": "",
"compute_extension:cells:create": "rule:admin_api",
"compute_extension:cells:delete": "rule:admin_api",
"compute_extension:cells:update": "rule:admin_api",
"compute_extension:cells:sync_instances": "rule:admin_api",
"compute_extension:v3:os-cells": "",
"compute_extension:v3:os-cells:create": "rule:admin_api",
"compute_extension:v3:os-cells:delete": "rule:admin_api",
"compute_extension:v3:os-cells:update": "rule:admin_api",
"compute_extension:v3:os-cells:sync_instances": "rule:admin_api",
"compute_extension:certificates": "",
"compute_extension:v3:os-certificates:create": "",
"compute_extension:v3:os-certificates:show": "",
"compute_extension:cloudpipe": "",
"compute_extension:cloudpipe_update": "",
"compute_extension:config_drive": "",
"compute_extension:v3:os-config-drive": "",
"compute_extension:console_output": "",
"compute_extension:v3:os-console-output": "",
"compute_extension:consoles": "",
"compute_extension:v3:os-remote-consoles": "",
"compute_extension:createserverext": "",
"compute_extension:v3:os-create-backup": "",
"compute_extension:deferred_delete": "",
"compute_extension:v3:os-deferred-delete": "",
"compute_extension:disk_config": "",
"compute_extension:evacuate": "is_admin:True",
"compute_extension:v3:os-evacuate": "is_admin:True",
"compute_extension:extended_server_attributes": "",
"compute_extension:v3:os-extended-server-attributes": "",
"compute_extension:extended_status": "",
"compute_extension:v3:os-extended-status": "",
"compute_extension:extended_availability_zone": "",
"compute_extension:v3:os-extended-availability-zone": "",
"compute_extension:extended_ips": "",
"compute_extension:extended_ips_mac": "",
"compute_extension:extended_vif_net": "",
"compute_extension:extended_volumes": "",
"compute_extension:v3:os-extended-volumes": "",
"compute_extension:v3:os-extended-volumes:swap": "",
"compute_extension:v3:os-extended-volumes:attach": "",
"compute_extension:v3:os-extended-volumes:detach": "",
"compute_extension:v3:extensions:discoverable": "",
"compute_extension:fixed_ips": "",
"compute_extension:flavor_access": "",
"compute_extension:flavor_access:addTenantAccess": "rule:admin_api",
"compute_extension:flavor_access:removeTenantAccess": "rule:admin_api",
"compute_extension:v3:os-flavor-access": "",
"compute_extension:v3:os-flavor-access:remove_tenant_access":
"rule:admin_api",
"compute_extension:v3:os-flavor-access:add_tenant_access":
"rule:admin_api",
"compute_extension:flavor_disabled": "",
"compute_extension:v3:os-flavor-disabled": "",
"compute_extension:flavor_rxtx": "",
"compute_extension:v3:os-flavor-rxtx": "",
"compute_extension:flavor_swap": "",
"compute_extension:flavorextradata": "",
"compute_extension:flavorextraspecs:index": "",
"compute_extension:flavorextraspecs:show": "",
"compute_extension:flavorextraspecs:create": "is_admin:True",
"compute_extension:flavorextraspecs:update": "is_admin:True",
"compute_extension:flavorextraspecs:delete": "is_admin:True",
"compute_extension:v3:flavor-extra-specs:index": "",
"compute_extension:v3:flavor-extra-specs:show": "",
"compute_extension:v3:flavor-extra-specs:create": "is_admin:True",
"compute_extension:v3:flavor-extra-specs:update": "is_admin:True",
"compute_extension:v3:flavor-extra-specs:delete": "is_admin:True",
"compute_extension:flavormanage": "",
"compute_extension:v3:flavor-manage": "",
"compute_extension:v3:flavors:discoverable": "",
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
"compute_extension:floating_ips": "",
"compute_extension:floating_ips_bulk": "",
"compute_extension:fping": "",
"compute_extension:fping:all_tenants": "is_admin:True",
"compute_extension:hide_server_addresses": "",
"compute_extension:v3:os-hide-server-addresses": "",
"compute_extension:hosts": "rule:admin_api",
"compute_extension:v3:os-hosts": "rule:admin_api",
"compute_extension:hypervisors": "rule:admin_api",
"compute_extension:v3:os-hypervisors": "rule:admin_api",
"compute_extension:image_size": "",
"compute_extension:v3:image-size": "",
"compute_extension:instance_actions": "",
"compute_extension:v3:os-instance-actions": "",
"compute_extension:instance_actions:events": "is_admin:True",
"compute_extension:v3:os-instance-actions:events": "is_admin:True",
"compute_extension:instance_usage_audit_log": "",
"compute_extension:keypairs": "",
"compute_extension:keypairs:index": "",
"compute_extension:keypairs:show": "",
"compute_extension:keypairs:create": "",
"compute_extension:keypairs:delete": "",
"compute_extension:v3:os-keypairs": "",
"compute_extension:v3:os-keypairs:index": "",
"compute_extension:v3:os-keypairs:show": "",
"compute_extension:v3:os-keypairs:create": "",
"compute_extension:v3:os-keypairs:delete": "",
"compute_extension:v3:os-lock-server:lock": "",
"compute_extension:v3:os-lock-server:unlock": "",
"compute_extension:v3:os-migrate-server:migrate": "",
"compute_extension:v3:os-migrate-server:migrate_live": "",
"compute_extension:multinic": "",
"compute_extension:v3:os-multinic": "",
"compute_extension:networks": "",
"compute_extension:networks:view": "",
"compute_extension:networks_associate": "",
"compute_extension:os-tenant-networks": "",
"compute_extension:v3:os-pause-server:pause": "",
"compute_extension:v3:os-pause-server:unpause": "",
"compute_extension:v3:os-pci:pci_servers": "",
"compute_extension:v3:os-pci:index": "",
"compute_extension:v3:os-pci:detail": "",
"compute_extension:v3:os-pci:show": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quotas:delete": "",
"compute_extension:v3:os-quota-sets:show": "",
"compute_extension:v3:os-quota-sets:update": "",
"compute_extension:v3:os-quota-sets:delete": "",
"compute_extension:v3:os-quota-sets:detail": "",
"compute_extension:quota_classes": "",
"compute_extension:rescue": "",
"compute_extension:v3:os-rescue": "",
"compute_extension:security_group_default_rules": "",
"compute_extension:security_groups": "",
"compute_extension:v3:os-security-groups": "",
"compute_extension:server_diagnostics": "",
"compute_extension:v3:os-server-diagnostics": "",
"compute_extension:server_groups": "",
"compute_extension:server_password": "",
"compute_extension:v3:os-server-password": "",
"compute_extension:server_usage": "",
"compute_extension:v3:os-server-usage": "",
"compute_extension:v3:os-server-groups": "",
"compute_extension:services": "",
"compute_extension:v3:os-services": "",
"compute_extension:shelve": "",
"compute_extension:shelveOffload": "",
"compute_extension:v3:os-shelve:shelve": "",
"compute_extension:v3:os-shelve:shelve_offload": "",
"compute_extension:simple_tenant_usage:show": "",
"compute_extension:simple_tenant_usage:list": "",
"compute_extension:v3:os-simple-tenant-usage:show": "",
"compute_extension:v3:os-simple-tenant-usage:list": "",
"compute_extension:unshelve": "",
"compute_extension:v3:os-shelve:unshelve": "",
"compute_extension:v3:os-suspend-server:suspend": "",
"compute_extension:v3:os-suspend-server:resume": "",
"compute_extension:users": "",
"compute_extension:virtual_interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
"compute_extension:volume_attachments:index": "",
"compute_extension:volume_attachments:show": "",
"compute_extension:volume_attachments:create": "",
"compute_extension:volume_attachments:update": "",
"compute_extension:volume_attachments:delete": "",
"compute_extension:v3:os-volumes": "",
"compute_extension:volumetypes": "",
"compute_extension:zones": "",
"compute_extension:availability_zone:list": "",
"compute_extension:v3:os-availability-zone:list": "",
"compute_extension:availability_zone:detail": "is_admin:True",
"compute_extension:v3:os-availability-zone:detail": "is_admin:True",
"compute_extension:used_limits_for_admin": "is_admin:True",
"compute_extension:v3:os-used-limits": "is_admin:True",
"compute_extension:migrations:index": "is_admin:True",
"compute_extension:v3:os-migrations:index": "is_admin:True",
"compute_extension:os-assisted-volume-snapshots:create": "",
"compute_extension:os-assisted-volume-snapshots:delete": "",
"compute_extension:console_auth_tokens": "is_admin:True",
"compute_extension:v3:os-console-auth-tokens": "is_admin:True",
"compute_extension:os-server-external-events:create": "rule:admin_api",
"compute_extension:v3:os-server-external-events:create": "rule:admin_api",
"volume:create": "",
"volume:get": "",
"volume:get_all": "",
"volume:get_volume_metadata": "",
"volume:delete": "",
"volume:update": "",
"volume:delete_volume_metadata": "",
"volume:update_volume_metadata": "",
"volume:attach": "",
"volume:detach": "",
"volume:reserve_volume": "",
"volume:unreserve_volume": "",
"volume:begin_detaching": "",
"volume:roll_detaching": "",
"volume:check_attach": "",
"volume:check_detach": "",
"volume:initialize_connection": "",
"volume:terminate_connection": "",
"volume:create_snapshot": "",
"volume:delete_snapshot": "",
"volume:get_snapshot": "",
"volume:get_all_snapshots": "",
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_actions:upload_image": "",
"volume_extension:types_manage": "",
"volume_extension:types_extra_specs": "",
"network:get_all": "",
"network:get": "",
"network:create": "",
"network:delete": "",
"network:associate": "",
"network:disassociate": "",
"network:get_vifs_by_instance": "",
"network:get_vif_by_mac_address": "",
"network:allocate_for_instance": "",
"network:deallocate_for_instance": "",
"network:validate_networks": "",
"network:get_instance_uuids_by_ip_filter": "",
"network:get_instance_id_by_floating_address": "",
"network:setup_networks_on_host": "",
"network:get_floating_ip": "",
"network:get_floating_ip_pools": "",
"network:get_floating_ip_by_address": "",
"network:get_floating_ips_by_project": "",
"network:get_floating_ips_by_fixed_address": "",
"network:allocate_floating_ip": "",
"network:deallocate_floating_ip": "",
"network:associate_floating_ip": "",
"network:disassociate_floating_ip": "",
"network:release_floating_ip": "",
"network:migrate_instance_start": "",
"network:migrate_instance_finish": "",
"network:get_fixed_ip": "",
"network:get_fixed_ip_by_address": "",
"network:add_fixed_ip_to_instance": "",
"network:remove_fixed_ip_from_instance": "",
"network:add_network_to_project": "",
"network:get_instance_nw_info": "",
"network:get_dns_domains": "",
"network:add_dns_entry": "",
"network:modify_dns_entry": "",
"network:delete_dns_entry": "",
"network:get_dns_entries_by_address": "",
"network:get_dns_entries_by_name": "",
"network:create_private_dns_domain": "",
"network:create_public_dns_domain": "",
"network:delete_dns_domain": "",
"network:attach_external_network": "rule:admin_api"
}
"""
|
tianweizhang/nova
|
nova/tests/fake_policy.py
|
Python
|
apache-2.0
| 16,571
|
"""
This is the test suite for space.py.
"""
from unittest import TestCase, main, skip
from indra.agent import Agent, X, Y
from indra.space import DEF_HEIGHT, DEF_WIDTH
from indra.space import Space, distance
from indra.tests.test_agent import create_newton, create_hardy, create_leibniz
from indra.tests.test_agent import create_ramanujan
REP_RAND_TESTS = 20
def create_space():
space = Space("test space")
newton = create_newton()
space += newton
space += create_hardy()
space += create_leibniz()
return space, newton
def create_teeny_space():
"""
This space should be full!
"""
space = Space("test space", 2, 2)
space += create_newton()
space += create_hardy()
space += create_leibniz()
space += create_ramanujan()
return space
class SpaceTestCase(TestCase):
def setUp(self):
(self.space, self.newton) = create_space()
self.teeny_space = create_teeny_space()
self.test_agent = Agent("test agent")
self.test_agent2 = Agent("test agent 2")
self.test_agent3 = Agent("test agent 3")
self.test_agent4 = Agent("test agent 4")
def tearDown(self):
self.space = None
self.teeny_space = None
self.test_agent = None
self.test_agent2 = None
self.test_agent3 = None
self.test_agent4 = None
def test_get_closest_agent(self):
closest = self.space.get_closest_agent(self.newton)
self.assertTrue(distance(self.newton, closest) <=
self.space.get_max_distance())
def test_constrain_x(self):
"""
Test keeping x in bounds.
"""
self.assertEqual(self.space.constrain_x(-10), 0)
self.assertEqual(self.space.constrain_x(DEF_WIDTH * 2), DEF_WIDTH - 1)
def test_constrain_y(self):
"""
Test keeping y in bounds.
"""
self.assertEqual(self.space.constrain_y(-10), 0)
self.assertEqual(self.space.constrain_x(DEF_HEIGHT * 2),
DEF_HEIGHT - 1)
def test_grid_size(self):
"""
Make sure we calc grid size properly.
"""
self.assertEqual(self.space.grid_size(), DEF_HEIGHT * DEF_WIDTH)
def test_is_full(self):
"""
See if the grid is full.
"""
self.assertFalse(self.space.is_full())
self.assertTrue(self.teeny_space.is_full())
def test_rand_place_members(self):
"""
Test rand_place_members() by making sure all agents have a pos
when done.
"""
for agent in self.space:
self.assertTrue(self.space[agent].is_located())
def test_place_member_xy(self):
"""
Test placing an agent at a particular x, y spot.
We will run this DEF_HEIGHT times, to test multiple
possible placements.
"""
space = Space("test space")
for i in range(DEF_HEIGHT):
spot = space.place_member(mbr=self.test_agent, xy=(i, i))
if spot is not None:
# the print output will usually be captured by nose,
# but that can be turned off with --nocapture.
(x, y) = (self.test_agent.get_x(),
self.test_agent.get_y())
self.assertEqual((x, y), (i, i))
def test_get_agent_at(self):
"""
Test getting an agent from some locale.
"""
space = Space("test space")
# before adding agent, all cells are empty:
self.assertEqual(space.get_agent_at(1, 1), None)
for i in range(DEF_HEIGHT):
spot = space.place_member(mbr=self.test_agent, xy=(i, i))
whos_there = space.get_agent_at(i, i)
self.assertEqual(whos_there, self.test_agent)
def test_rand_x(self):
"""
Make sure randomly generated X pos is within grid.
If constrained, make sure it is within constraints.
"""
x = self.space.rand_x()
self.assertTrue(x >= 0)
self.assertTrue(x <= self.space.width)
x2 = self.space.rand_x(low=4, high=8)
self.assertTrue(x2 >= 4)
self.assertTrue(x2 <= 8)
def test_rand_y(self):
"""
Make sure randomly generated Y pos is within grid.
"""
y = self.space.rand_y()
self.assertTrue(y >= 0)
self.assertTrue(y <= self.space.height)
y2 = self.space.rand_y(low=4, high=8)
self.assertTrue(y2 >= 4)
self.assertTrue(y2 <= 8)
def test_gen_new_pos(self):
"""
Making sure new pos is within max_move of old pos.
Since this test relies on random numbers, let's repeat it.
"""
for i in range(REP_RAND_TESTS):
# test with different max moves:
max_move = (i // 2) + 1
(old_x, old_y) = self.newton.get_pos()
(new_x, new_y) = self.space.gen_new_pos(self.newton, max_move)
if not abs(old_x - new_x) <= max_move:
print("Failed test with ", old_x, " ", new_x)
if not abs(old_y - new_y) <= max_move:
print("Failed test with ", old_y, " ", new_y)
self.assertTrue(abs(old_x - new_x) <= max_move)
self.assertTrue(abs(old_y - new_y) <= max_move)
def test_location(self):
"""
Test that an added agent has a location.
"""
n = create_newton()
self.space += n
self.assertTrue(self.space.get_agent_at(n.pos[X], n.pos[Y]) == n)
def test_add_location(self):
"""
Can we add an agent to a location?
"""
for i in range(REP_RAND_TESTS):
# test with different random positions
x, y = self.space.rand_x(), self.space.rand_y()
if (x, y) not in self.space.locations:
self.newton.set_pos(self.space, x, y)
self.space.add_location(x, y, self.newton)
self.assertTrue(self.space.get_agent_at(self.newton.pos[X],
self.newton.pos[Y])
== self.newton)
# def test_move_location(self):
# """
# Can we move agent from one location to another?
# This test sometimes fails: we need to explore!
# """
# for i in range(REP_RAND_TESTS):
# test with different random positions
# print("Trying a new location: ", i, "th iteration")
# print("Previous newton is at ", self.newton.get_x(), self.newton.get_y())
# x, y = self.space.rand_x(), self.space.rand_y()
# print("new x, y = ", x, y)
# self.space.move_location(x, y, self.newton.get_x(), self.newton.get_y())
# print("Now newton is at ", self.newton.get_x(), self.newton.get_y())
# self.assertTrue(self.space.locations[(x, y)] == self.newton)
def test_remove_location(self):
"""
Test removing location from locations.
"""
(x, y) = (self.newton.get_x(), self.newton.get_y())
self.space.remove_location(x, y)
self.assertTrue((x, y) not in self.space.locations)
def test_move(self):
"""
Test whether moving an agent stays within its max move.
"""
for i in range(REP_RAND_TESTS):
# test with different max moves:
max_move = (i // 2) + 1
(old_x, old_y) = (self.newton.get_x(), self.newton.get_y())
self.newton.move(max_move)
(new_x, new_y) = (self.newton.get_x(), self.newton.get_y())
self.assertTrue(abs(new_x - old_x) <= max_move)
self.assertTrue(abs(new_y - old_y) <= max_move)
def test_is_empty(self):
"""
Is cell empty?
"""
(x, y) = (self.newton.get_x(), self.newton.get_y())
self.assertFalse(self.space.is_empty(x, y))
@skip("Skipping von neumann test")
def test_get_vonneumann_hood(self):
"""
Get von Neumann neighborhood.
"""
space = Space("test space")
space += self.test_agent
space += self.test_agent2
space += self.test_agent3
space += self.test_agent4
for i in range(REP_RAND_TESTS):
print("Looping in von Neumann")
space.place_member(mbr=self.test_agent, xy=(0, 0))
space.place_member(mbr=self.test_agent2, xy=(0, 1))
hood = space.get_vonneumann_hood(self.test_agent)
self.assertTrue(self.test_agent2.name in hood)
space.place_member(mbr=self.test_agent3, xy=(1, 0))
hood = space.get_vonneumann_hood(self.test_agent)
self.assertTrue(self.test_agent3.name in hood)
space.place_member(mbr=self.test_agent4, xy=(0, DEF_HEIGHT))
hood = space.get_vonneumann_hood(self.test_agent)
self.assertTrue(self.test_agent4.name not in hood)
if __name__ == '__main__':
main()
|
gcallah/Indra
|
indra/tests/test_space.py
|
Python
|
gpl-3.0
| 8,912
|
"""
===================
Label image regions
===================
This example shows how to segment an image with image labelling. The following
steps are applied:
1. Thresholding with automatic Otsu method
2. Close small holes with binary closing
3. Remove artifacts touching image border
4. Measure image regions to filter small objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square
from skimage.measure import regionprops
from skimage.color import label2rgb
image = data.coins()[50:-50, 50:-50]
# apply threshold
thresh = threshold_otsu(image)
bw = closing(image > thresh, square(3))
# remove artifacts connected to image border
cleared = bw.copy()
clear_border(cleared)
# label image regions
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# skip small images
if region.area < 100:
continue
# draw rectangle around segmented coins
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.show()
|
Hiyorimi/scikit-image
|
doc/examples/segmentation/plot_label.py
|
Python
|
bsd-3-clause
| 1,493
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_utils import importutils
from nova.scheduler import utils
class LazyLoader(object):
def __init__(self, klass, *args, **kwargs):
self.klass = klass
self.args = args
self.kwargs = kwargs
self.instance = None
def __getattr__(self, name):
return functools.partial(self.__run_method, name)
def __run_method(self, __name, *args, **kwargs):
if self.instance is None:
self.instance = self.klass(*self.args, **self.kwargs)
return getattr(self.instance, __name)(*args, **kwargs)
class SchedulerClient(object):
"""Client library for placing calls to the scheduler."""
def __init__(self):
self.queryclient = LazyLoader(importutils.import_class(
'nova.scheduler.client.query.SchedulerQueryClient'))
self.reportclient = LazyLoader(importutils.import_class(
'nova.scheduler.client.report.SchedulerReportClient'))
@utils.retry_select_destinations
def select_destinations(self, context, request_spec, filter_properties):
return self.queryclient.select_destinations(
context, request_spec, filter_properties)
def update_aggregates(self, context, aggregates):
self.queryclient.update_aggregates(context, aggregates)
def delete_aggregate(self, context, aggregate):
self.queryclient.delete_aggregate(context, aggregate)
def update_resource_stats(self, context, name, stats):
self.reportclient.update_resource_stats(context, name, stats)
def update_instance_info(self, context, host_name, instance_info):
self.queryclient.update_instance_info(context, host_name,
instance_info)
def delete_instance_info(self, context, host_name, instance_uuid):
self.queryclient.delete_instance_info(context, host_name,
instance_uuid)
def sync_instance_info(self, context, host_name, instance_uuids):
self.queryclient.sync_instance_info(context, host_name, instance_uuids)
|
thomasem/nova
|
nova/scheduler/client/__init__.py
|
Python
|
apache-2.0
| 2,731
|
my_file = open("/tmp/my_file", "w")
my_file.write("Test string")
my_file.close()
my_file = open("/tmp/my_file", "r")
content = my_file.read()
my_file.close()
if (content == "Test string"):
print("OK")
else:
print("KO")
|
Nakrez/RePy
|
tests/parser/good/file.py
|
Python
|
mit
| 229
|
# xlreg_py/xlreg/regCred.py
""" Registry credentials as seen by client. """
import binascii
from xlutil import parse_decimal_version
SHA1_BYTES = 20
SHA2_BYTES = 32
class RegCredError(RuntimeError):
""" xlReg-related exception. """
class RegCred(object):
""" Registry credentials as seen by client. """
__slots__ = ['_name', '_id', '_comms_pub_key', '_sig_pub_key',
'_end_points', '_version',
]
def __init__(self, name, id_, ck_, sk_, end_points, version):
""" Initialize the instance. """
if name is None or name == '':
raise RegCredError('nil or empty xlReg name')
self._name = name
if id_ is None or id_ == '':
raise RegCredError('nil or empty xlReg id')
id_len = len(id_)
if id_len != SHA1_BYTES and id_len != SHA2_BYTES:
raise RegCredError('id length not 20 and not 32')
self._id = id_
if ck_ is None or ck_ == '':
raise RegCredError('nil or empty xlReg commsPubKkey')
# NOTE need better check(line)
self._comms_pub_key = ck_
if sk_ is None or sk_ == '':
raise RegCredError('nil or empty xlReg sigPubKkey')
# NOTE need better chesk(s)
self._sig_pub_key = sk_
if not end_points:
raise RegCredError('nil or empty end_points list')
self._end_points = []
for ep_ in end_points:
# NOTE currently should begin with 'TcpEndPoint: '
self._end_points.append(ep_)
if version is None:
raise RegCredError('nil regCred version')
# NOTE should check it's a 32-bit value
self._version = version
# properties
def get_name(self):
""" Return the name. """
return self._name
def get_id(self):
""" Return the regCred ID. """
return self._id
def get_comms_pub_key(self):
""" Return the communications public key. """
return self._comms_pub_key
def get_sig_pub_key(self):
""" Return the public key for digital signatures. """
return self._sig_pub_key
def get_end_points(self):
""" Return the associated EndPoint. """
return self._end_points
def get_version(self):
""" Return the RegCred version number. """
return self._version
def __str__(self):
""" Stringify the object. """
strings = []
strings.append('regCred {')
strings.append(' Name: %s' % self._name)
# uncommenting this yields 'odd length string' error
# strings.append(" ID: %s" % binascii.b2a_hex(self._id))
strings.append(" ID: %s" % dump_byte_array(self._id))
strings.append(
' CommsPubKey: ' +
dump_byte_array(
self._comms_pub_key))
strings.append(' SigPubKey: ' + dump_byte_array(self._sig_pub_key))
strings.append(' EndPoints {')
for ep_ in self._end_points:
strings.append(' ' + ep_)
strings.append(' }')
strings.append(' Version: ' + self._version.__str__())
strings.append('}')
string = "\r\n".join(strings) + "\r\n"
return string
def dump_byte_array(aaa):
""" Stringify a byte array as hex. """
out = ''
for bbb in aaa:
pair = "%02x" % bbb
out += pair
return out
def parse_reg_cred(line):
"""
Expect rather loosely formatted registry credentials but require
a space after colon (:) or left brace ({) delimiters.
"""
if line is None or line == "":
raise RegCredError("nil or empty regCred string")
lines = line.split("\r\n")
line_count = len(lines)
def skip_line_and_trim(ndx):
"""
Return the first string containing something other than whitespace.
"""
if ndx >= line_count:
raise RegCredError('no next line')
while ndx < line_count:
line = lines[ndx].strip()
ndx += 1
if line:
break
return ndx, line
ndx = 0
ndx, line = skip_line_and_trim(ndx)
if line != 'regCred {':
raise RegCredError(
"expected 'regCred {' but found '%s': not well-formed" % line)
ndx, line = skip_line_and_trim(ndx)
parts = line.split(': ')
if len(parts) != 2 or parts[0] != 'Name':
raise RegCredError('not a well-formed regCred')
name = parts[1].strip()
if len(name) < 1:
raise RegCredError('not a well-formed regCred: empty name')
ndx, line = skip_line_and_trim(ndx)
parts = line.split(': ')
if len(parts) != 2 or parts[0] != 'ID':
raise RegCredError('not a well-formed regCred')
hex_ = parts[1].strip()
id_ = binascii.a2b_hex(hex_)
# NOTE could require length of 20 or 32
ndx, line = skip_line_and_trim(ndx)
parts = line.split(': ')
if len(parts) != 2 or parts[0] != 'CommsPubKey':
raise RegCredError('not a well-formed regCred')
ck_ = binascii.a2b_hex(parts[1])
ndx, line = skip_line_and_trim(ndx)
parts = line.split(': ')
if len(parts) != 2 or parts[0] != 'SigPubKey':
raise RegCredError('not a well-formed regCred')
sk_ = binascii.a2b_hex(parts[1])
ndx, line = skip_line_and_trim(ndx)
if line != 'EndPoints {':
raise RegCredError('not a well-formed regCred')
# collect end_points
end_points = []
while True:
ndx, line = skip_line_and_trim(ndx)
if line == '}':
break
parts = line.split(': ')
if len(parts) != 2:
raise RegCredError('not a well-formed regCred end_point')
protocol = parts[0].strip()
address = parts[1].strip()
end_points.append('%s: %s' % (protocol, address))
ndx, line = skip_line_and_trim(ndx)
parts = line.split(': ')
if len(parts) != 2 or parts[0] != 'Version':
raise RegCredError('not a well-formed regCred')
vers = parts[1].strip()
version = parse_decimal_version(vers)
return RegCred(name, id_, ck_, sk_, end_points, version)
|
jddixon/xlreg_py
|
src/xlreg/reg_cred.py
|
Python
|
mit
| 6,142
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from cloudkittydashboard.api import cloudkitty as cloudkitty_api
from cloudkittydashboard.dashboards.admin.pricing.pricing \
import tables as pricing_tables
from cloudkittydashboard.dashboards.admin.pricing.pricing_versions \
import tables as pricing_versions_tables
class PricingTab(tabs.TableTab):
table_classes = (pricing_tables.PricingTable,)
name = _("Pricing")
slug = "pricing_tab"
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_pricing_data(self):
try:
pricing_infos = cloudkitty_api.cloudkittyclient(self.request).common.pricing_infos.list(pricing_version='all')
except Exception:
pricing_infos = []
exceptions.handle(self.request, _('Unable to retrieve pricing list.'))
pricing = []
for p in pricing_infos:
if p.time_based:
p.time_based = _("Duration")
else:
p.time_based = _("Usage")
if p.charging_key != "network.bw.total":
pricing.append(p)
return pricing
class PricingVersionsTab(tabs.TableTab):
table_classes = (pricing_versions_tables.PricingVersionsTable,)
name = _("Pricing Versions")
slug = "pricing_versions_tab"
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_pricing_versions_data(self):
try:
pricing_versions = cloudkitty_api.cloudkittyclient(self.request).common.pricing_vers.list()
except Exception:
pricing_versions = []
exceptions.handle(self.request, _('Unable to retrieve pricing versions list.'))
return pricing_versions
class PricingGroupTabs(tabs.TabGroup):
slug = "pricing_group_tabs"
tabs = (PricingTab, PricingVersionsTab)
sticky = True
|
FNST-OpenStack/cloudkitty-dashboard
|
cloudkittydashboard/dashboards/admin/pricing/tabs.py
|
Python
|
apache-2.0
| 2,562
|
class BaseDabbaException(Exception):
@property
def message(self):
return self.args[0]
class ProcessingException(BaseDabbaException):
def __init__(self, message, slug=None, info=None):
super(ProcessingException, self).__init__(message)
self.slug = slug
self.info = info
def to_dict(self):
return {
'message': self.message,
'slug': self.slug,
'info': self.info,
}
class EarlyReturn(BaseDabbaException):
def __init__(self, message, job):
super(EarlyReturn, self).__init__(message)
self.job = job
class BadRequest(BaseDabbaException):
def __init__(self, message, status_code=500):
super(BadRequest, self).__init__(message)
self.status_code = status_code
|
voidfiles/dabba
|
dabba/exceptions.py
|
Python
|
mit
| 799
|
#!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import argparse
import bisect
import getopt
import os
import re
import subprocess
import sys
symbolizers = {}
DEBUG = False
demangle = False
binutils_prefix = None
sysroot_path = None
binary_name_filter = None
fix_filename_patterns = None
logfile = sys.stdin
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
if fix_filename_patterns:
for path_to_cut in fix_filename_patterns:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
def sysroot_path_filter(binary_name):
return sysroot_path + binary_name
def guess_arch(addr):
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
return 'x86_64'
else:
return 'i386'
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.default_arch = default_arch
self.system = system
self.dsym_hints = dsym_hints
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=short',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
if self.system == 'Darwin':
for hint in self.dsym_hints:
cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
print ' '.join(cmd)
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError:
result = None
return result
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') or
not file_name.startswith('??')):
# Append only non-trivial frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
def open_addr2line(self):
addr2line_tool = 'addr2line'
if binutils_prefix:
addr2line_tool = binutils_prefix + addr2line_tool
cmd = [addr2line_tool, '-f']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
try:
print >> self.pipe.stdin, offset
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
except Exception:
function_name = ''
file_name = ''
file_name = fix_filename(file_name)
return ['%s in %s %s' % (addr, function_name, file_name)]
class UnbufferedLineConverter(object):
"""
Wrap a child process that responds to each line of input with one line of
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
# Local imports so that the script can start on Windows.
import pty
import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
if close_stderr:
dev_null = os.open('/dev/null', 0)
os.dup2(dev_null, 2)
os.execvp(args[0], args)
else:
# Disable echoing.
attr = termios.tcgetattr(fd)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
# Set up a file()-like interface to the child process
self.r = os.fdopen(fd, "r", 1)
self.w = os.fdopen(os.dup(fd), "w", 1)
def convert(self, line):
self.w.write(line + "\n")
return self.readline()
def readline(self):
return self.r.readline().rstrip()
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
self.arch = guess_arch(addr)
self.open_atos()
def open_atos(self):
if DEBUG:
print 'atos -o %s -arch %s' % (self.binary, self.arch)
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
atos_line = self.atos.convert('0x%x' % int(offset, 16))
while "got symbolicator for" in atos_line:
atos_line = self.atos.readline()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print 'atos_line: ', atos_line
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary)
elif system == 'Linux':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print result
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
if sys.platform == 'win32':
# ASan on Windows uses dbghelp.dll to symbolize in-process, which works
# even in sandboxed processes. Nothing needs to be done here.
self.process_line = self.process_line_echo
else:
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
# a single symbolizer binary.
# On Darwin, if the dsym hint producer is present:
# 1. check whether we've seen this binary already; if so,
# use |llvm_symbolizers[binary]|, which has already loaded the debug
# info for this binary (might not be the case for
# |last_llvm_symbolizer|);
# 2. otherwise check if we've seen all the hints for this binary already;
# if so, reuse |last_llvm_symbolizer| which has the full set of hints;
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
if not binary in self.llvm_symbolizers:
use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
self.system, guess_arch(addr), self.dsym_hints)
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
result = symbolizers[binary].symbolize(addr, binary, offset)
if result is None:
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def get_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
return [self.current_line]
else:
result = []
for symbolized_frame in symbolized_lines:
result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
self.frame_no += 1
return result
def process_logfile(self):
self.frame_no = 0
for line in logfile:
processed = self.process_line(line)
print '\n'.join(processed)
def process_line_echo(self, line):
return [line.rstrip()]
def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
return [self.current_line]
if DEBUG:
print line
_, frameno_str, addr, binary, offset = match.groups()
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset)
return self.get_symbolized_lines(symbolized_line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='Example of use:\n'
'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
'-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin,
type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
if args.demangle:
demangle = True
if args.s:
binary_name_filter = sysroot_path_filter
sysroot_path = args.s
if args.c:
binutils_prefix = args.c
if args.logfile:
logfile = args.logfile
else:
logfile = sys.stdin
loop = SymbolizationLoop(binary_name_filter)
loop.process_logfile()
|
hujiajie/chromium-crosswalk
|
tools/valgrind/asan/third_party/asan_symbolize.py
|
Python
|
bsd-3-clause
| 16,609
|
#!/usr/bin/pythonTest
# -*- coding: utf-8 -*-
#
# String Names
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# The GNU General Public License is available from:
# The Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston MA 02110-1301 USA
#
# http://www.gnu.org/licenses/gpl.html
#
# Copyright 2007-2016 Rick Graves
#
from six import print_ as print3
from Dict.Get import getReverseDictCarefully
class Finished( Exception ): pass
dNickProper = dict( (
('ab', ('abner', 'abigail')),
('abbie', ('abner', 'abigail')),
('abby', ('abigail',)),
('abe', ('abel', 'abraham', 'abram')),
('abertina', ('alberta',)),
('abram', ('abraham',)),
('ada', ('adaline',)),
('addie', ('adelaide',)),
('addy', ('agatha',)),
('adela', ('della', 'dell', 'adaline')),
('adelaide', ('dell', 'della')),
('adeline', ('aline',)),
('ag', ('agatha',)),
('aggie', ('agatha',)),
('aggy', ('augustina', 'augusta', 'agatha', 'agnes')),
('agnes', ('inez', 'agatha', 'nancy')),
('aileen', ('helena', 'helen')),
('al', ('albert', 'allan', 'alan', 'alonzo', 'alanson', 'alfred', 'allen', 'alexander', 'alfonse')),
('albert', ('adelbert',)),
('albertine', ('alberta',)),
('alec', ('alexander',)),
('alex', ('alexandra', 'alexander')),
('alexandra', ('sandra',)),
('alf', ('alfred',)),
('alfie', ('alfred',)),
('alfy', ('alfreda',)),
('algy', ('algernon',)),
('alice', ('elsie',)),
('alicia', ('alice',)),
('aline', ('adaline',)),
('alla', ('alexandria',)),
('allie', ('alberta', 'alicia', 'almena', 'alice')),
('alphonzo', ('alonzo',)),
('ana', ('anastasia',)),
('andy', ('andrew',)),
('angelica', ('angela',)),
('angelina', ('angela',)),
('angeline', ('angela',)),
('ann', ('antonia', 'rosaenna', 'roxanne', 'roxanna', 'rosaenn', 'nancy', 'agnes', 'antoinette')),
('anna', ('annette',)),
('annie', ('anne', 'ann')),
('anselm', ('ansel',)),
('antonia', ('antoinette',)),
('ara', ('arabella',)),
('archie', ('archibald',)),
('arnie', ('arnold',)),
('art', ('arthur',)),
('assene', ('asenath',)),
('babs', ('barbara',)),
('baldie', ('archibald',)),
('barbie', ('barbery', 'barbara')),
('barby', ('barbara',)),
('barney', ('bernard', 'barnard', 'barnabas')),
('bart', ('bartholomew',)),
('bartel', ('bartholomew',)),
('barth', ('bartholomew',)),
('bat', ('bartholomew',)),
('bea', ('beatrice',)),
('becca', ('rebecca',)),
('becky', ('rebecca',)),
('bell', ('belinda',)),
('bella', ('arabella', 'isabella', 'rosabella', 'isabelle')),
('belle', ('rosabel', 'arabella', 'isabella', 'isabelle', 'isabel', 'belinda')),
('ben', ('benedict', 'benjamin')),
('bennett', ('benedict',)),
('bennie', ('benedict',)),
('berny', ('bernard',)),
('berry', ('greenberry', 'littleberry')),
('bert', ('roberta', 'norbert', 'elbert', 'egbert', 'albert', 'herbert', 'delbert', 'hubert', 'alberta', 'gilbert')),
('bertie', ('alberta', 'bertha', 'roberta')),
('bess', ('elizabeth',)),
('beth', ('elizabeth',)),
('betsy', ('elizabeth',)),
('betty', ('elizabeth',)),
('bette', ('elizabeth',)),
('bias', ('tobias',)),
('biddie', ('obedience',)),
('biddy', ('bridget',)),
('bill', ('william',)),
('bird', ('albert',)),
('birdie', ('bertha', 'roberta')),
('bob', ('robert',)),
('bobbie', ('roberta', 'barbara')),
('brad', ('bradford',)),
('bridie', ('bridget',)),
('brie', ('bridget',)),
('bryan', ('brian',)),
('bryant', ('brian',)),
('burt', ('egbert',)),
('cal', ('caleb', 'calvin')),
('cam', ('campbell',)),
('cammy', ('camille',)),
('candy', ('candace',)),
('carl', ('charles',)),
('carlotta', ('charlotte',)),
('carol', ('caroline', 'carolyn')),
('carrie', ('caroline', 'carolyn')),
('casper', ('jasper',)),
('cass', ('caswell', 'cassandra')),
('cassie', ('cathleen', 'caroline', 'catherine', 'carolyn', 'cassandra')),
('cathy', ('katherina', 'kathleen', 'catherine', 'cathleen', 'katherine', 'katharine')),
('ceall', ('lucille',)),
('cecilia', ('sheila',)),
('celia', ('celeste', 'cecilia')),
('chad', ('charles',)),
('charles', ('carl',)),
('charlie', ('charles',)),
('charlotta', ('lotta', 'lotty')),
('charlotte', ('lotta', 'lotty')),
('chaz', ('charles',)),
('chet', ('chester',)),
('chick', ('charles',)),
('chris', ('christina', 'christian', 'christine', 'christiana', 'kristin', 'christopher', 'kristen')),
('christian', ('christopher',)),
('christopher', ('christian',)),
('christy', ('christiana', 'christine')),
('chuck', ('charles',)),
('cilla', ('priscilla', 'cicely')),
('cille', ('lucille',)),
('cindy', ('cynthia', 'lucinda')),
('cissy', ('clarissa', 'cecilia')),
('claire', ('clara',)),
('clara', ('clarissa', 'clarinda')),
('clarice', ('clara',)),
('clarissa', ('clara',)),
('clem', ('clementine',)),
('cliff', ('clifton', 'clifford')),
('clo', ('chloe',)),
('connie', ('constance',)),
('cora', ('corinne',)),
('cordelia', ('delia',)),
('cordy', ('cordelia',)),
('corny', ('cornelia',)),
('court', ('courtney',)),
('crissy', ('chrintina', 'christiana', 'christine', 'chrintine')),
('curg', ('lecurgus',)),
('curt', ('curtis', 'courtney')),
('cy', ('cyrus',)),
('daisy', ('margaret',)),
('dan', ('sheridan', 'daniel')),
('danny', ('sheridan', 'daniel')),
('daph', ('daphne',)),
('daphie', ('daphne',)),
('dave', ('david',)),
('davey', ('david',)),
('davy', ('david',)),
('deannie', ('geraldine',)),
('deb', ('deborah', 'debra')),
('debbie', ('deborah', 'debra')),
('debby', ('deborah',)),
('dee', ('audrey', 'dorothy', 'delores')),
('deedee', ('deidre',)),
('del', ('delbert',)),
('delia', ('fidelia', 'cordelia')),
('delilah', ('dell', 'della')),
('dell', ('delilah',)),
('della', ('adela', 'delilah')),
('delphia', ('philadelphia',)),
('di', ('diana', 'diane')),
('diah', ('obadiah', 'jedediah')),
('dicey', ('edith',)),
('dick', ('zadock', 'melchizedek', 'richard')),
('dina', ('geraldine',)),
('dolly', ('dorothy',)),
('dolph', ('randolph', 'rudolph', 'adolph')),
('don', ('donald',)),
('dora', ('dorothea', 'eudora', 'theodora', 'dorothy', 'eldora', 'isadora', 'medora', 'doris')),
('dorothea', ('dorothy',)),
('dosia', ('theodosia',)),
('dot', ('dorothy',)),
('dotha', ('dorothy',)),
('dottie', ('dorothy',)),
('dotty', ('dorothy',)),
('drew', ('andrew',)),
('dyer', ('obadiah', 'jedediah')),
('eb', ('ebenezer',)),
('eben', ('ebenezer',)),
('ed', ('eduardo', 'edwin', 'edgar', 'edmond', 'edward', 'edmund')),
('edie', ('edith',)),
('edith', ('adaline',)),
('edny', ('edna',)),
('eileen', ('helena', 'helen')),
('elaine', ('eleanor', 'helena', 'helen')),
('eleanor', ('helena', 'ellie', 'ella', 'helen')),
('elenor', ('leonore', 'leonora')),
('eli', ('elijah', 'elisha', 'elias')),
('eliza', ('louise',)),
('ella', ('eleanor', 'luella', 'gabrielle')),
('ellen', ('eleanor', 'helena', 'helen')),
('ellis', ('elisha',)),
('eloise', ('heloise', 'louise')),
('elsie', ('elizabeth', 'alice')),
('emanuel', ('manuel',)),
('emily', ('amelia', 'emeline')),
('emma', ('emeline', 'emily')),
('emmanuel', ('immanuel',)),
('emmy', ('emeline',)),
('eph', ('ephraim',)),
('erin', ('aaron',)),
('erma', ('emma',)),
('ernie', ('ernest', 'earnest')),
('erwin', ('irwin',)),
('essy', ('estella',)),
('esther', ('hester',)),
('etta', ('henrietta', 'loretta')),
('eve', ('evelyn', 'genevieve')),
('ez', ('ezekiel',)),
('fanny', ('frances',)),
('fay', ('faith',)),
('ferdie', ('ferdinand',)),
('fidelia', ('delia',)),
('field', ('winfield',)),
('fina', ('josephine',)),
('flo', ('florence',)),
('flora', ('florence',)),
('floss', ('florence',)),
('flossie', ('florence',)),
('ford', ('clifford',)),
('fran', ('francis', 'francine', 'frances')),
('francie', ('francis', 'francine', 'frances')),
('frank', ('francis', 'franklin')),
('frankie', ('frances',)),
('franky', ('veronica',)),
('fred', ('frederick', 'winnifred', 'wilfred', 'alfred')),
('freddie', ('ferdinand', 'frederick', 'frieda')),
('fredric', ('frederick',)),
('frieda', ('alfreda',)),
('frish', ('frederick',)),
('fritz', ('frederick',)),
('frona', ('sophronia',)),
('fronia', ('sophronia',)),
('gabbie', ('gabrielle',)),
('gabby', ('gabriella', 'gabrielle')),
('gabe', ('gabriel',)),
('gabriella', ('ellie', 'ella')),
('gail', ('abigail',)),
('gatty', ('gertrude',)),
('gency', ('genevieve',)),
('gene', ('eugene',)),
('geoff', ('geoffrey', 'jeffrey')),
('georgiana', ('georgia',)),
('gerrie', ('geraldine',)),
('gerry', ('gerald', 'geraldine')),
('gertie', ('gertrude',)),
('gil', ('gilbert',)),
('gilbert', ('wilber',)),
('ginger', ('virginia',)),
('ginny', ('virginia',)),
('glory', ('gloria',)),
('green', ('greenberry',)),
('greg', ('gregory',)),
('greta', ('margaret',)),
('gum', ('montgomery',)),
('gus', ('augustine', 'augustus')),
('gusie', ('augustina', 'augusta')),
('gwen', ('gwendolyn',)),
('hal', ('harold', 'howard')),
('hank', ('henrietta', 'henry')),
('hannah', ('ann', 'susannah', 'anna')),
('hans', ('john',)),
('harold', ('harry',)),
('harry', ('harold', 'henry')),
('hattie', ('harriet',)),
('hatty', ('harriet',)),
('helen', ('eileen', 'ella', 'eleanor', 'ellie', 'elaine', 'elena', 'aileen')),
('heloise', ('eloise', 'lois')),
('henry', ('harry',)),
('herb', ('herbert',)),
('hessy', ('hester',)),
('hester', ('esther',)),
('hetty', ('mehitabel', 'hester')),
('hez', ('hezekiah',)),
('honey', ('honora',)),
('horatio', ('horace',)),
('howie', ('howard',)),
('hugh', ('hubert',)),
('hugo', ('hubert',)),
('humey', ('posthuma',)),
('ian', ('john',)),
('ib', ('isabella', 'isabelle')),
('iggy', ('ignatius',)),
('ike', ('isaac',)),
('irving', ('irvin',)),
('irwin', ('erwin',)),
('issy', ('isadora', 'isabella', 'isabelle')),
('ivan', ('john',)),
('izzy', ('isidore',)),
('jack', ('john',)),
('jackie', ('jacqueline',)),
('jake', ('jacob',)),
('jamie', ('james',)),
('jane', ('jessie', 'joanna', 'virginia')),
('janet', ('jane', 'jessie')),
('janie', ('jane',)),
('jasper', ('casper',)),
('jayhugh', ('john',)),
('jean', ('john', 'joanna')),
('jeanne', ('jeanette',)),
('jed', ('jedediah',)),
('jeff', ('geoffrey', 'jeffrey')),
('jeffrey', ('geoffrey',)),
('jehu', ('john',)),
('jemma', ('jemima',)),
('jennie', ('jean', 'virginia', 'jennifer')),
('jenny', ('jane', 'genevieve')),
('jeremy', ('jeremiah',)),
('jerry', ('gerald', 'geraldine', 'jeremiah')),
('jesse', ('jessica',)),
('jessica', ('jessie',)),
('jessie', ('jane', 'jessica')),
('jill', ('julia',)),
('jim', ('james',)),
('jimmie', ('james',)),
('jinsey', ('genevieve',)),
('jo', ('josephine',)),
('joan', ('joanna',)),
('joanna', ('jane', 'jean')),
('jody', ('joanna',)),
('joe', ('joshua', 'joseph')),
('joey', ('josephine', 'josophine', 'joseph')),
('johanna', ('joanna',)),
('john', ('ivan', 'ian', 'jonathan', 'johannes')),
('johnny', ('johannes', 'john', 'jonathon')),
('jon', ('jonathan',)),
('jonathan', ('johannes', 'nathaniel')),
('jorge', ('george',)),
('josephine', ('pheney',)),
('josey', ('josephine', 'josophine')),
('josh', ('joshua',)),
('joy', ('joyce',)),
('judie', ('judith',)),
('judy', ('judith',)),
('julie', ('julia',)),
('juliet', ('julia',)),
('justus', ('justin',)),
('karl', ('carl',)),
('kate', ('katherina', 'kathleen', 'catherine', 'katelyn', 'cathleen', 'katherine', 'katharine', 'katelin')),
('katharine', ('kathleen', 'cathleen', 'catherine', 'katherina')),
('kathleen', ('katherina', 'catherine', 'cathleen', 'katherine', 'katharine')),
('kathy', ('kathleen', 'kathryn', 'katherine')),
('katie', ('katherina', 'kathleen', 'catherine', 'cathleen', 'katherine', 'katharine')),
('katy', ('kathleen', 'katherine')),
('kay', ('katherina', 'kathleen', 'catherine', 'katelyn', 'cathleen', 'katherine', 'katharine', 'katelin')),
('ken', ('kenneth',)),
('kim', ('kimberly', 'kimberley')),
('kit', ('katherina', 'christian', 'kathleen', 'catherine', 'cathleen', 'christopher', 'katharine', 'katherine')),
('kittie', ('katherina', 'kathleen', 'catherine', 'cathleen', 'katherine', 'katharine')),
('kristi', ('kristine',)),
('ky', ('hezekiah',)),
('l.b.', ('littleberry',)),
('lanna', ('eleanor',)),
('lanson', ('alanson',)),
('larry', ('laurence', 'lawrence')),
('lars', ('lawrence',)),
('laura', ('laurinda', 'loretta', 'lorinda')),
('laurie', ('laura','laurinda', 'loretta', 'lorinda')),
('laurence', ('lawrence',)),
('lee', ('leroy',)),
('lem', ('lemuel',)),
('lena', ('helena', 'helen', 'madeline', 'arlene')),
('lenny', ('leonard',)),
('leo', ('leonard',)),
('leon', ('napoleon', 'leonidas', 'leonard', 'lionel')),
('leonora', ('eleanor',)),
('les', ('leslie', 'lester')),
('lester', ('leslie',)),
('lettice', ('letitia',)),
('lettie', ('letitia',)),
('letty', ('charlotte',)),
('lewis', ('louis',)),
('libby', ('elizabeth',)),
('lige', ('elijah',)),
('lil', ('lillian', 'delilah')),
('lila', ('delilah',)),
('lilly', ('lillian',)),
('linda', ('melinda', 'belinda')),
('lindy', ('melinda',)),
('link', ('lincoln',)),
('lisa', ('melissa', 'alice')),
('lish', ('elisha',)),
('little', ('littleberry',)),
('livia', ('olivia',)),
('liz', ('elizabeth',)),
('liza', ('elizabeth',)),
('lizabeth', ('elizabeth',)),
('lizzie', ('eliza', 'elizabeth')),
('lloyd', ('floyd',)),
('lodi', ('melody',)),
('lois', ('heloise', 'louise')),
('lola', ('delores',)),
('lolly', ('lillian',)),
('lon', ('alonzo', 'zebulon', 'lawrence')),
('lonzo', ('alonzo',)),
('lorie', ('loretta', 'lorraine')),
('lorrie', ('loretta', 'lorraine')),
('lorry', ('lawrence',)),
('lotta', ('charlotte',)),
('lottie', ('charlotte', 'carlotta')),
('lou', ('louis', 'louise')),
('louie', ('louis',)),
('louise', ('eloise', 'lois')),
('lucas', ('lucias',)),
('lucy', ('lucille', 'lucia', 'lucinda')),
('luella', ('ellie', 'ella')),
('luke', ('lucias', 'lucas')),
('lulu', ('louise',)),
('lum', ('columbus',)),
('lura', ('lurana',)),
('lynn', ('caroline', 'carolyn')),
('mabel', ('mehitabel',)),
('maddie', ('madeline',)),
('maddy', ('madeline', 'madelyn')),
('madge', ('margaret', 'madeline', 'margaretta', 'magdelina')),
('madie', ('madeline', 'madelyn')),
('mae', ('may', 'mary')),
('maggie', ('margaret', 'madeline')),
('maggy', ('margaret',)),
('maisie', ('margaret',)),
('mamie', ('mary',)),
('manda', ('amanda',)),
('mandy', ('amanda', 'miranda')),
('manny', ('emanuel', 'manuel')),
('manuel', ('immanuel', 'emanuel')),
('margaret', ('gretchen', 'daisy')),
('marge', ('margaret', 'marjorie', 'margaretta')),
('margie', ('margaret', 'marjorie')),
('margo', ('margaret',)),
('margy', ('margaret', 'marjorie')),
('maria', ('mariah', 'mary')),
('mariah', ('mary',)),
('marianna', ('marian',)),
('marie', ('mary',)),
('marietta', ('mary',)),
('marion', ('mary',)),
('mark', ('marcus',)),
('marty', ('martha', 'martin')),
('marv', ('marvin',)),
('mary', ('mariah', 'mitzi', 'miriam', 'marilyn', 'maureen')),
('mat', ('martha', 'matilda')),
('matt', ('mathew', 'matthew')),
('matthias', ('matthew',)),
('mattie', ('martha',)),
('matty', ('matilda',)),
('maud', ('madeline', 'matilda')),
('maureen', ('mary',)),
('maury', ('maurice',)),
('may', ('mary',)),
('meg', ('margaret', 'megan')),
('megan', ('margaret',)),
('mehitabel', ('mabel',)),
('mel', ('amelia', 'melissa', 'melinda')),
('melchizedek', ('zadock',)),
('melia', ('amelia',)),
('mena', ('almena',)),
('merci', ('mercedes',)),
('mercy', ('mercedes',)),
('mert', ('myrtle',)),
('merv', ('marvin', 'mervin')),
('mervyn', ('marvin',)),
('michael', ('mitchell',)),
('mickey', ('michelle', 'michael')),
('middy', ('madeline',)),
('midge', ('margaret',)),
('mike', ('michael',)),
('millie', ('camille', 'amelia', 'emeline')),
('milly', ('millicent', 'mildred', 'melissa', 'armilda')),
('mimi', ('jemima',)),
('mina', ('wilhelmina', 'minerva')),
('mindy', ('melinda',)),
('minerva', ('manerva',)),
('minnie', ('wilhelmina', 'minerva')),
('mira', ('elmira', 'miranda')),
('missy', ('melissa',)),
('mitch', ('mitchell',)),
('mitchell', ('michael',)),
('mitty', ('mehitabel', 'submit')),
('mitzi', ('mary', 'miriam')),
('molly', ('mary',)),
('mona', ('ramona',)),
('monty', ('lamont', 'montgomery')),
('morris', ('maurice',)),
('mort', ('mortimer',)),
('mose', ('moses',)),
('moss', ('moses',)),
('mur', ('muriel',)),
('myra', ('almira',)),
('myrt', ('myrtle',)),
('nabby', ('abigail',)),
('nace', ('ignatius',)),
('nada', ('nadine',)),
('nan', ('hannah', 'ann', 'anna', 'nancy')),
('nana', ('ann', 'anna')),
('nancy', ('ann', 'anna')),
('nannie', ('nancy',)),
('nanny', ('hannah', 'ann', 'anna')),
('nap', ('napoleon',)),
('nat', ('jonathan', 'nathaniel')),
('nate', ('nathan', 'nathaniel')),
('nathan', ('jonathan', 'nathaniel')),
('natty', ('nathaniel',)),
('ned', ('edward', 'edmund')),
('neil', ('cornelius',)),
('nell', ('eleanor', 'helena', 'helen')),
('nelle', ('cornelia',)),
('nellie', ('eleanor', 'helena', 'helen')),
('nelly', ('cornelia',)),
('nels', ('nelson',)),
('neppie', ('penelope',)),
('nerva', ('manerva', 'minerva')),
('nervie', ('manerva', 'minerva')),
('nessie', ('agnes',)),
('nettie', ('jeanette', 'henrietta', 'antonia', 'natalie', 'antoinette')),
('newt', ('newton',)),
('nib', ('isabella', 'isabelle')),
('nicey', ('vernisee',)),
('nick', ('dominic', 'nicholas', 'nicodemus')),
('nickie', ('nicholas',)),
('nicky', ('nicholas',)),
('nita', ('juanita',)),
('noel', ('nowell',)),
('nora', ('leonore', 'leonora', 'eleanor')),
('obe', ('obadiah',)),
('obed', ('obadiah', 'obedience')),
('obie', ('obadiah', 'obediah')),
('odo', ('odell',)),
('olive', ('olivia',)),
('ollie', ('oliver',)),
('ophi', ('theophilus',)),
('ora', ('aurelia',)),
('orlando', ('roland',)),
('ossy', ('oswald',)),
('oswald', ('waldo',)),
('ote', ('otis',)),
('ozzy', ('oswald',)),
('paddy', ('patrick',)),
('pam', ('pamela',)),
('pat', ('patience', 'patricia', 'patrick')),
('patsy', ('patrick', 'martha', 'patricia')),
('patty', ('patience', 'martha', 'patricia')),
('peggie', ('margaret',)),
('peggy', ('margaret',)),
('penny', ('penelope',)),
('perce', ('percival',)),
('percy', ('percival',)),
('pete', ('peter',)),
('peter', ('patrick',)),
('phelia', ('orphelia',)),
('phil', ('phillip', 'philip')),
('philadelphia', ('delpha',)),
('pokey', ('pocahontas',)),
('polly', ('paulina', 'mary')),
('pres', ('prescott',)),
('prissy', ('priscilla',)),
('prudy', ('prudence',)),
('rae', ('rachel',)),
('rafe', ('ralph',)),
('raff', ('raphael',)),
('randall', ('randolph',)),
('randy', ('randolph', 'miranda')),
('ray', ('rachel', 'raymond')),
('reba', ('rebecca',)),
('reg', ('reginald',)),
('reggie', ('reginald',)),
('reginald', ('reynold',)),
('rena', ('irene',)),
('rennie', ('irene',)),
('retta', ('henrietta', 'loretta')),
('reynold', ('reginald',)),
('rich', ('aldrich', 'richard')),
('richie', ('aldrich', 'richard')),
('rick', ('derick', 'eric', 'richard', 'ricardo', 'frederick')),
('ricky', ('derick', 'eric', 'broderick', 'richard')),
('rilla', ('avarilla',)),
('rita', ('margarita',)),
('rob', ('robert', 'roberto')),
('robbie', ('roberta',)),
('robby', ('robert',)),
('robin', ('robert',)),
('rod', ('roderick', 'rodney', 'broderick')),
('rodie', ('rhoda',)),
('roland', ('orlando',)),
('rolf', ('rudolph',)),
('rollo', ('rudolph',)),
('ron', ('ronald', 'veronica', 'aaron')),
('ronna', ('veronica',)),
('ronnie', ('ronald', 'veronica', 'aaron')),
('ronny', ('ronald', 'veronica')),
('rose', ('rosabel', 'roseanna', 'rosabella', 'roseann', 'roxanna', 'roxanne', 'rosalyn')),
('rowland', ('roland',)),
('rox', ('roxane',)),
('roxie', ('roxane',)),
('roy', ('leroy',)),
('roz', ('rosabel', 'rosabella', 'rosalyn')),
('rube', ('reuben',)),
('rudy', ('rudolph',)),
('rupert', ('robert',)),
('russ', ('russell',)),
('rusty', ('russell',)),
('sabe', ('isabella', 'isabelle')),
('sabra', ('isabella', 'isabelle')),
('sadie', ('sarah',)),
('sal', ('sarah', 'solomon', 'salvador')),
('sallie', ('sarah',)),
('salmon', ('solomon',)),
('sam', ('samson', 'samuel', 'sampson')),
('sammy', ('samuel',)),
('sandra', ('cassandra',)),
('sandy', ('alexander', 'cassandra', 'sandra')),
('sara', ('sarah',)),
('scott', ('prescott',)),
('sene', ('asenath',)),
('shelly', ('michelle', 'rachel', 'shelton')),
('si', ('silas', 'sylvester')),
('sid', ('sidney',)),
('sig', ('sigismund',)),
('silla', ('drusilla',)),
('sim', ('simeon',)),
('simon', ('simeon',)),
('sis', ('frances',)),
('sly', ('sylvester',)),
('sol', ('solomon',)),
('solly', ('solomon',)),
('sophia', ('sophronia',)),
('stacia', ('eustacia',)),
('stacy', ('eustacia',)),
('stella', ('estella',)),
('steve', ('stephen', 'steven')),
('steven', ('stephen',)),
('sue', ('susannah', 'suzanne', 'susan')),
('sukey', ('susannah',)),
('sully', ('sullivan',)),
('surry', ('sarah',)),
('susan', ('susannah',)),
('susanna', ('ann', 'anna')),
('susannah', ('hannah',)),
('susie', ('susannah', 'suzanne', 'susan')),
('suzanne', ('susannah',)),
('syl', ('sylvester',)),
('tabby', ('tabitha',)),
('tad', ('thaddeus',)),
('tavia', ('octavia',)),
('ted', ('theodore', 'edward', 'edmund')),
('teddy', ('theodore',)),
('terry', ('teresa', 'theresa', 'terence')),
('tess', ('teresa', 'theresa')),
('tessa', ('teresa', 'theresa')),
('tessie', ('teresa', 'theresa')),
('thad', ('thaddeus',)),
('theo', ('theodore', 'theodosia')),
('thom', ('thomas',)),
('thursa', ('theresa',)),
('tibbie', ('isabella', 'isabelle')),
('ticy', ('theresa',)),
('tilda', ('matilda',)),
('tillie', ('matilda',)),
('tim', ('timothy',)),
('timmy', ('timothy',)),
('tina', ('augustina', 'christine', 'christiana', 'christina', 'augusta', 'martina', 'ernestine')),
('tish', ('letitia',)),
('titia', ('letitia',)),
('tobe', ('tobias',)),
('toby', ('tobias',)),
('tom', ('thomas',)),
('tommy', ('thom', 'thomas')),
('tony', ('anthony',)),
('tori', ('victoria',)),
('torie', ('victoria',)),
('torri', ('victoria',)),
('torrie', ('victoria',)),
('tory', ('victoria',)),
('tracy', ('theresa',)),
('tricia', ('patricia',)),
('trina', ('katherina', 'kathleen', 'catherine', 'cathleen', 'katherine', 'katharine')),
('trisha', ('beatrice', 'patricia')),
('trixie', ('beatrice', 'patricia')),
('trudy', ('gertrude',)),
('val', ('valentina', 'valeri', 'valerie')),
('vallie', ('valentina',)),
('van', ('vanessa',)),
('vannie', ('vanessa',)),
('verna', ('laverne',)),
('vester', ('sylvester',)),
('vi', ('vivian',)),
('vic', ('victor',)),
('vick', ('victor',)),
('vicki', ('victoria',)),
('vin', ('vincent',)),
('vina', ('melvina',)),
('vince', ('vincent',)),
('viney', ('lavinia',)),
('virgie', ('virginia',)),
('virginia', ('jane',)),
('vonnie', ('veronica',)),
('waldo', ('oswald',)),
('wallie', ('wallace',)),
('wally', ('wallace','walter')),
('wat', ('walter',)),
('webb', ('webster',)),
('wendy', ('gwendolyn',)),
('wilber', ('gilbert',)),
('will', ('william', 'wilson', 'wilbur')),
('willie', ('wilhelmina', 'william', 'wilson', 'wilfred', 'wilbur')),
('wilma', ('wilhelmina',)),
('win', ('winfield',)),
('winnet', ('winifred',)),
('winnie', ('winifred', 'winnifred')),
('winny', ('winnifred', 'winfield')),
('zach', ('zachariah',)),
('zacharias', ('zachariah',)),
('zachary', ('zachariah',)),
('zadock', ('melchizedek',)),
('zeb', ('zebulon',)),
('zed', ('zedediah',)),
('zeke', ('isaac', 'ezekiel', 'zachariah')),
('zeph', ('zepaniah',)),
('zolly', ('solomon',)) ) )
# ('karen', ('katherina', 'kathleen', 'catherine', 'cathleen', 'katherine', 'katharine')),
for sNick in dNickProper:
#
dNickProper[ sNick ] = frozenset( dNickProper[ sNick ] )
#
setMenNames = \
frozenset( (
'aaron', 'abraham', 'adam', 'adrian', 'ahmed', 'al', 'alan', 'albert',
'alberto', 'alec', 'alejandro', 'alex', 'alexander', 'alfonso',
'alfred', 'alfredo', 'allan', 'allen', 'alvin', 'anders', 'andre',
'andreas', 'andres', 'andrew', 'andy', 'anthony', 'antonio', 'arnold',
'arthur', 'austin', 'avery', 'barry', 'bart', 'ben', 'benjamin',
'bennett', 'bernard', 'bert', 'bill', 'billy', 'blair', 'blake', 'bob',
'bobby', 'brad', 'bradford', 'bradley', 'brandon', 'brendan', 'brent',
'bret', 'brett', 'brian', 'bruce', 'bryan', 'byron', 'calvin',
'cameron', 'campbell', 'carl', 'carlos', 'carlton', 'casey', 'cecil',
'cesar', 'chad', 'charles', 'charlie', 'chester', 'chris',
'christian', 'christopher', 'clarence', 'clark', 'claude', 'clay',
'clayton', 'clifford', 'clyde', 'cody', 'colin', 'conrad', 'corey',
'cory', 'craig', 'curtis', 'dale', 'dan', 'daniel', 'danny', 'darrell',
'darren', 'darryl', 'dave', 'david', 'davis', 'dean', 'denis',
'dennis', 'derek', 'derrick', 'diego', 'dimitri', 'dirk', 'dominic',
'don', 'donald', 'doug', 'douglas', 'duane', 'duncan', 'dustin',
'dwayne', 'dwight', 'dylan', 'earl', 'ed', 'eddie', 'edgar', 'edmund',
'eduardo', 'edward', 'edwin', 'eli', 'eliot', 'elliot', 'elliott',
'elmer', 'emanuel', 'emmanuel', 'enrique', 'eric', 'erik', 'ernest',
'ernesto', 'ethan', 'eugene', 'evan', 'everett', 'fernando',
'fitzgerald', 'floyd', 'forrest', 'francis', 'francisco', 'frank',
'franklin', 'fred', 'frederic', 'frederick', 'fredrick', 'gabriel',
'garrett', 'gary', 'gene', 'geoffrey', 'george', 'gerald', 'gerard',
'gilbert', 'giovanni', 'glen', 'glenn', 'gordon', 'graham', 'grant',
'greg', 'gregg', 'gregory', 'guido', 'guy', 'hamilton', 'hans',
'harold', 'harris', 'harrison', 'harry', 'harvey', 'henry', 'herbert',
'herman', 'howard', 'hugh', 'hunter', 'ian', 'ira', 'isaac', 'ivan',
'jack', 'jackson', 'jacob', 'jacques', 'james', 'jamie', 'jared',
'jason', 'jay', 'jeff', 'jeffery', 'jeffrey', 'jeremiah', 'jeremy',
'jerome', 'jerry', 'jesse', 'jesus', 'jim', 'jimmie', 'jimmy', 'joe',
'joel', 'johan', 'johannes', 'john', 'johnny', 'johnson', 'jon',
'jonah', 'jonathan', 'jonathon', 'jordan', 'jorge', 'jose', 'josef',
'joseph', 'josh', 'joshua', 'juan', 'julian', 'julio', 'julius',
'justin', 'karl', 'keith', 'ken', 'kendall', 'kenneth', 'kent',
'kerry', 'kevin', 'kirk', 'kurt', 'kyle', 'lance', 'larry', 'lars',
'laurence', 'lawrence', 'lee', 'leland', 'leo', 'leon', 'leonard',
'leroy', 'lester', 'lewis', 'liam', 'lloyd', 'lou', 'louis', 'lucas',
'luis', 'luke', 'mackenzie', 'malcolm', 'manuel', 'marc', 'marco',
'marcus', 'mario', 'mark', 'markus', 'marshall', 'martin', 'marvin',
'mathew', 'matt', 'matthew', 'maurice', 'max', 'maxwell', 'melvin',
'michael', 'micheal', 'miguel', 'mike', 'miles', 'milton', 'mitchell',
'mohamed', 'mohammad', 'mohammed', 'morgan', 'morris', 'murray',
'nathan', 'nathaniel', 'neal', 'neil', 'nelson', 'nicholas', 'nick',
'nicolas', 'noah', 'noel', 'norman', 'omar', 'oscar', 'owen', 'paolo',
'patrick', 'paul', 'pedro', 'perry', 'peter', 'phil', 'philip',
'philippe', 'phillip', 'pierce', 'pierre', 'rafael', 'ralph', 'ramon',
'randall', 'randolph', 'randy', 'raul', 'ray', 'raymond', 'reed',
'reginald', 'ricardo', 'richard', 'richardo', 'rick', 'ricky', 'rob',
'robert', 'roberto', 'roderick', 'rodney', 'roger', 'roland', 'rolf',
'ron', 'ronald', 'ronnie', 'ross', 'roy', 'russ', 'russell', 'ryan',
'sam', 'samuel', 'scot', 'scott', 'sean', 'sebastian', 'sergio',
'seth', 'shane', 'shawn', 'sheldon', 'sherman', 'sidney', 'simon',
'simone', 'spencer', 'stan', 'stanley', 'stefan', 'stephan', 'stephen',
'sterling', 'steve', 'steven', 'stewart', 'stuart', 'sydney', 'taylor',
'ted', 'terence', 'terrence', 'terry', 'theodore', 'thomas', 'tim',
'timothy', 'toby', 'todd', 'tom', 'tommy', 'tony', 'travis', 'trevor',
'troy', 'tyler', 'tyrone', 'vernon', 'victor', 'vincent', 'wade',
'walker', 'wallace', 'walter', 'ward', 'warren', 'wayne', 'wesley',
'will', 'willard', 'william', 'willie', 'wilson', 'winston',
'wolfgang', 'zachary') )
setWomenNames = \
frozenset( (
'abby', 'abigail', 'adele', 'adriana', 'adrienne', 'agnes', 'aileen',
'aimee', 'alana', 'alberta', 'alejandra', 'alessandra', 'alexa',
'alexandra', 'alexandre', 'alexandria', 'alexis', 'ali', 'alice',
'alicia', 'alisa', 'alison', 'alissa', 'allison', 'allyson', 'alma',
'alyce', 'alyssa', 'amanda', 'amber', 'amelia', 'ami', 'amy', 'ana',
'anastasia', 'andrea', 'angel', 'angela', 'angelica', 'angelina',
'angelique', 'angie', 'anita', 'ann', 'anna', 'annabel', 'annabelle',
'anne', 'annemarie', 'annette', 'annie', 'annika', 'antoinette',
'antonia', 'april', 'ariane', 'ariel', 'arielle', 'arlene', 'ashleigh',
'ashley', 'athena', 'audrey', 'autumn', 'barbara', 'beatrice', 'becky',
'belinda', 'bernadette', 'bernice', 'bertha', 'bessie', 'beth',
'bethany', 'betsy', 'betty', 'beverly', 'bianca', 'bonnie', 'brandi',
'brandy', 'brenda', 'bridget', 'brigitte', 'brittany', 'brittney',
'brooke', 'caitlin', 'camille', 'candace', 'candice', 'cara', 'carla',
'carmela', 'carmen', 'carol', 'carole', 'carolina', 'caroline',
'carolyn', 'carrie', 'carroll', 'cassandra', 'caterina', 'catharine',
'catherine', 'cathleen', 'cathy', 'cecelia', 'cecile', 'cecilia',
'celeste', 'celia', 'charlene', 'charlotte', 'chelsea', 'cher',
'cherie', 'cheryl', 'chloe', 'christa', 'christiane', 'christie',
'christina', 'christine', 'christy', 'cindy', 'claire', 'clara',
'clare', 'clarissa', 'claudia', 'colette', 'colleen', 'connie',
'constance', 'corinne', 'courtney', 'cristina', 'crystal', 'cynthia',
'daisy', 'dana', 'daniela', 'daniele', 'danielle', 'daphne', 'dara',
'darcy', 'darlene', 'dawn', 'deanna', 'debbie', 'deborah', 'debra',
'dee', 'deirdre', 'delores', 'dena', 'denise', 'desiree', 'diana',
'diane', 'dianne', 'dina', 'dolores', 'dominique', 'donna', 'doreen',
'doris', 'dorothy', 'edith', 'edna', 'eileen', 'elaine', 'eleanor',
'elena', 'elisa', 'elisabeth', 'elise', 'elissa', 'eliza',
'elizabeth', 'ella', 'ellen', 'ellyn', 'eloise', 'elsa', 'elsie',
'elyse', 'emilia', 'emilie', 'emily', 'emma', 'erica', 'erika',
'erin', 'estelle', 'esther', 'ethel', 'eugenia', 'eva', 'eve',
'evelyn', 'faith', 'fanny', 'fatima', 'fay', 'faye', 'felicia', 'fern',
'fiona', 'florence', 'fran', 'frances', 'francesca', 'francine',
'francois', 'francoise', 'gabriela', 'gabriele', 'gabriella',
'gabrielle', 'gail', 'gale', 'gayle', 'genevieve', 'georgia',
'georgina', 'geraldine', 'gertrude', 'gillian', 'gina', 'ginger',
'giovanna', 'gladys', 'glenda', 'gloria', 'grace', 'greta', 'gretchen',
'gwen', 'gwendolyn', 'hanna', 'hannah', 'harriet', 'hazel', 'heather',
'heidi', 'helen', 'helena', 'helene', 'hilary', 'hilda', 'hillary',
'holly', 'hope', 'ida', 'ilana', 'ilene', 'ines', 'ingrid', 'irene',
'iris', 'isabel', 'isabella', 'isabelle', 'jaclyn', 'jacqueline',
'jacquelyn', 'jaime', 'jamie', 'jana', 'jane', 'janelle', 'janet',
'janice', 'janine', 'janis', 'jasmine', 'jayne', 'jean', 'jeanette',
'jeanne', 'jeannette', 'jenna', 'jennie', 'jennifer', 'jenny',
'jessica', 'jessie', 'jill', 'jillian', 'jo', 'joan', 'joann',
'joanna', 'joanne', 'jocelyn', 'jodi', 'jody', 'joelle', 'johanna',
'josephine', 'joy', 'joyce', 'juanita', 'judith', 'judy', 'julia',
'juliana', 'julianne', 'julie', 'juliet', 'juliette', 'june',
'justine', 'kara', 'karen', 'karin', 'karla', 'katarina', 'kate',
'katharina', 'katharine', 'katherine', 'kathleen', 'kathryn', 'kathy',
'katia', 'katie', 'katrina', 'katy', 'kay', 'kayla', 'kelli',
'kellie', 'kelly', 'kelsey', 'kim', 'kimberley', 'kimberly',
'kirsten', 'kitty', 'krista', 'kristen', 'kristi', 'kristin',
'kristina', 'kristine', 'kristy', 'krystal', 'lara', 'larissa',
'latoya', 'laura', 'laurel', 'lauren', 'laurie', 'lea', 'leah',
'leanne', 'leigh', 'leila', 'leilani', 'lena', 'leslie', 'liana',
'lila', 'lillian', 'lillie', 'lily', 'linda', 'lindsay', 'lindsey',
'lisa', 'lise', 'liz', 'liza', 'lois', 'lora', 'loraine', 'loren',
'loretta', 'lori', 'lorraine', 'louisa', 'louise', 'lucia', 'lucie',
'lucille', 'lucy', 'luisa', 'lydia', 'lyn', 'lynda', 'lynette',
'lynn', 'lynne', 'madeleine', 'madeline', 'mae', 'magdalena',
'maggie', 'mara', 'marcia', 'margaret', 'margarita', 'margo',
'margot', 'marguerite', 'mari', 'maria', 'mariah', 'marian',
'mariana', 'marianna', 'marianne', 'marie', 'marilyn', 'marina',
'marion', 'marisa', 'marissa', 'marjorie', 'marla', 'marlene',
'marsha', 'marta', 'martha', 'martina', 'mary', 'maryam', 'maryann',
'maura', 'maureen', 'maxine', 'may', 'maya', 'meemie', 'meg', 'megan',
'meghan', 'mei', 'melanie', 'melinda', 'melissa', 'melody', 'mercedes',
'meredith', 'meryl', 'mia', 'michaela', 'michele', 'michelle',
'mildred', 'mindy', 'miranda', 'mireille', 'miriam', 'misty', 'moira',
'molly', 'mona', 'monica', 'monika', 'monique', 'muriel', 'myra',
'myrtle', 'nadia', 'nadine', 'nancy', 'nanette', 'naomi', 'natalia',
'natalie', 'natasha', 'nathalie', 'nell', 'nellie', 'new', 'nichole',
'nicola', 'nicole', 'nikki', 'nina', 'noelle', 'nora', 'norma', 'olga',
'oliver', 'olivia', 'paige', 'pam', 'pamela', 'patrice', 'patricia',
'patsy', 'patti', 'paula', 'paulette', 'pauline', 'pearl', 'peggy',
'penelope', 'penny', 'phyllis', 'polly', 'priscilla', 'rachael',
'rachel', 'rachelle', 'ramona', 'raquel', 'rebecca', 'rebekah',
'regina', 'rene', 'renee', 'rhonda', 'rita', 'roberta', 'robin',
'robyn', 'rochelle', 'rosa', 'rosalie', 'rose', 'rosemarie',
'rosemary', 'roxanne', 'ruby', 'ruth', 'sabina', 'sabine', 'sabrina',
'sally', 'samantha', 'sandra', 'sara', 'sarah', 'sasha', 'shana',
'shannon', 'shari', 'sharon', 'shauna', 'sheila', 'shelley', 'shelly',
'sheri', 'sherri', 'sherry', 'sheryl', 'shirley', 'silvia', 'sofia',
'sonia', 'sonja', 'sonya', 'sophia', 'sophie', 'stacey', 'stacy',
'stefanie', 'stella', 'stephanie', 'sue', 'susan', 'susana', 'susanna',
'susannah', 'susanne', 'suzan', 'suzanne', 'sybil', 'sylvia', 'tamara',
'tammy', 'tania', 'tanya', 'tara', 'tatiana', 'teresa', 'terese',
'teri', 'terri', 'terry', 'tessa', 'thelma', 'theresa', 'therese',
'tiffany', 'tina', 'toni', 'tonya', 'tracey', 'tracy', 'tricia',
'trisha', 'ursula', 'valerie', 'vanessa', 'vera', 'veronica', 'vicki',
'vickie', 'victoria', 'viola', 'violet', 'virginia', 'vivian', 'wanda',
'wendy', 'whitney', 'willie', 'wilma', 'winifred', 'xavier', 'yolanda',
'yvette', 'yvonne', 'zoe', 'zulema' ) )
dBegsNicks = \
dict(
ty = 'ty',
ed = 'ed',
jo = 'jo',
rod = 'rod',
wil = 'willie',
es = 'essy',
mac = ( 'mac', 'mack' ),
mc = ( 'mac', 'mack' ) )
# is = 'issy',
# max = ( 'maxine', 'max' )
dBegsNicks[ 'is' ] = 'issy'
dBegsNicks[ 'max' ] = ( 'maxine', 'max' )
dNicksBegs = getReverseDictCarefully( dBegsNicks )
dEndsNicks = \
dict(
field = 'field',
leen = ( 'lena', 'lynn' ),
lina = ( 'lena', 'lina', 'lynn' ),
lena = 'lina',
lene = 'lynn',
lyn = 'lynn',
rita = 'rita',
tina = 'tina',
tine = 'tina' )
dNicksEnds = getReverseDictCarefully( dEndsNicks )
dWithinsNicks = \
dict(
wood = ( 'woody', 'woodrow', 'elwood' ),
scott = 'scott' )
dNicksWithins = getReverseDictCarefully( dWithinsNicks )
def _testNames( sName1, sName2, dOthersNicks, dNicksOthers, fGetSlice ):
#
from Collect.Test import ContainsAny
from Iter.AllVers import iRange
#
if sName1 in dNicksOthers and \
sName2 in dNicksOthers and \
ContainsAny(
dNicksOthers[ sName1 ], dNicksOthers[ sName2 ] ):
#
raise Finished
#
#
for i in iRange( 2, 6 ):
#
if len( sName1 ) < i and len( sName2 ) < i: break
#
sName1Part = fGetSlice( sName1, i )
sName2Part = fGetSlice( sName2, i )
#
if sName1Part == sName2Part and \
sName1Part in dOthersNicks and \
( sName1 == sName1Part or sName2 == sName1Part ):
#
raise Finished
#
#
if sName1Part in dOthersNicks and \
sName2 in dNicksOthers and \
sName1Part in dNicksOthers[ sName2 ]:
#
raise Finished
#
if sName2Part in dOthersNicks and \
sName1 in dNicksOthers and \
sName2Part in dNicksOthers[ sName1 ]:
#
raise Finished
#
if sName1Part in dOthersNicks and \
sName2Part in dOthersNicks and \
( sName1 == sName1Part or sName2 == sName1Part ) and \
ContainsAny(
dOthersNicks[ sName1Part ], dOthersNicks[ sName2Part ] ):
#
raise Finished
#
def _getPartBeg( s, i ): return s[ : i ]
def _getPartEnd( s, i ): return s[ -i : ]
def isNickName( sName1, sName2 ):
#
from Collect.Query import get1stThatMeets
from Collect.Test import ContainsAny
from Dict.Get import getKeyIter
from String.Test import getItemFoundInString
#
sName1, sName2 = sName1.lower(), sName2.lower()
#
bNickNames = False
#
tHaveNicks = ( sName1 in dNickProper, sName2 in dNickProper )
#
try:
#
if tHaveNicks == ( False, False ):
#
pass # most common result, bypass other tests of tHaveNicks
#
elif tHaveNicks == ( True, True ):
#
setPropers1 = dNickProper[ sName1 ]
setPropers2 = dNickProper[ sName2 ]
#
if setPropers1 == setPropers2 or \
ContainsAny( setPropers1, setPropers2 ) or \
sName2 in dNickProper[ sName1 ] or \
sName1 in dNickProper[ sName2 ]:
#
raise Finished
#
#
elif tHaveNicks == ( True, False ):
#
if sName2 in dNickProper[ sName1 ]:
#
raise Finished
#
#
elif tHaveNicks == ( False, True ):
#
if sName1 in dNickProper[ sName2 ]:
#
raise Finished
#
#
#
# _testNames wull raise Finished if bNickNames set to True
_testNames( sName1, sName2, dBegsNicks, dNicksBegs, _getPartBeg )
# _testNames wull raise Finished if bNickNames set to True
#
# _testNames wull raise Finished if bNickNames set to True
_testNames( sName1, sName2, dEndsNicks, dNicksEnds, _getPartEnd )
# _testNames wull raise Finished if bNickNames set to True
#
if sName1 in dNicksWithins:
#
def name2HasSubstring( sub ): return sub in sName2
#
if get1stThatMeets(
getKeyIter( dWithinsNicks ), name2HasSubstring ):
raise Finished
#
if sName2 in dNicksWithins:
#
def name1HasSubstring( sub ): return sub in sName1
#
if get1stThatMeets(
getKeyIter( dWithinsNicks ), name1HasSubstring ):
raise Finished
#
#
#
except Finished:
#
bNickNames = True
#
#
return bNickNames
def isWomensName( sName ):
#
return sName.lower() in setWomenNames
def isMensName( sName ):
#
return sName.lower() in setMenNames
def isNameMisSpelled( sName1, sName2 ):
#
from Iter.AllVers import lZip
from Collect.Query import getBegAndEndIfInOrder
#
bNameMisSpelled = True
#
# bPrint= ( sName1, sName2 ) == ( 'John', 'Joel' )
#
try:
#
if not ( sName1 and sName2 ):
# if bPrint: print3( 'got blank' )
raise Finished
#
#
if ( isWomensName( sName1 ) and isMensName( sName2 ) ) or \
( isWomensName( sName2 ) and isMensName( sName1 ) ):
#
raise Finished
#
#
iLen1, iLen2 = len( sName1 ), len( sName2 )
#
# if bPrint: print3( 'sName1, sName2:', sName1, sName2 )
#
# if bPrint: print3( 'iLen1, iLen2:', iLen1, iLen2 )
#
if min( iLen1, iLen2 ) < 2:
# if bPrint: print3( 'too short' )
raise Finished
#
#
if abs( iLen1 - iLen2 ) > 2:
# if bPrint: print3( 'different lengths' )
raise Finished
#
#
iHalfLen = min( iLen1, iLen2 ) // 2
#
# if bPrint: print3( 'iHalfLen:', iHalfLen )
#
sName1, sName2 = sName1.lower(), sName2.lower()
#
#print3( 'iLen1, iLen2, iHalfLen:', iLen1, iLen2, iHalfLen )
#print3( 'sName1, sName2:', sName1, sName2 )
#
if not ( sName1[ : iHalfLen ] == sName2[ : iHalfLen ] or
sName1[ -iHalfLen : ] == sName2[ -iHalfLen : ] ):
#
# if bPrint: print3( 'halfs do not match' )
raise Finished
#
#
sCommon = getBegAndEndIfInOrder( sName1, sName2 )
#
# if bPrint: print3( 'sCommon:', sCommon )
#
# if bPrint: print3( 'min( iLen1, iLen2 ):', min( iLen1, iLen2 )
#
# if bPrint: print3( 'len( sCommon ):', len( sCommon ) )
#
if min( iLen1, iLen2 ) - len( sCommon ) > 1:
# if bPrint: print3( 'not enough in common with shorter' )
raise Finished
#
#
# if bPrint: print3( 'min( iLen1, iLen2 ) - len( sCommon ):', min( iLen1, iLen2 ) - len( sCommon ) )
#
# if bPrint: print3( 'max( iLen1, iLen2 ):', max( iLen1, iLen2 ) )
#
lLenNames = lZip( ( iLen1, iLen2 ), ( sName1, sName2 ) )
#
lLenNames.sort()
#
sShort = lLenNames[0][1]
sLong = lLenNames[1][1]
#
# if bPrint: print3( 'sLong, sShort:', sLong, sShort )
#
if max( iLen1, iLen2 ) - len( sCommon ) > 1 and \
not sLong.startswith( sShort ):
# if bPrint: print3( 'not enough in common with longer' )
raise Finished
#
# if bPrint: print3( 'max( iLen1, iLen2 ) - len( sCommon ):', max( iLen1, iLen2 ) - len( sCommon ) )
#
except Finished:
#
bNameMisSpelled = False
#
#
return bNameMisSpelled
def getFirstMiddleLast( s ):
#
lParts = s.split()
#
sFirst, sMiddle, sLast = '', '', ''
#
if lParts:
#
sFirst = lParts[ 0 ]
sLast = lParts[ -1 ]
#
if len( lParts ) > 2: sMiddle = ' '.join( lParts[ 1 : -1 ] )
#
#
return sFirst, sMiddle, sLast
def _getInitialOffFront( s ):
#
while s and s.strip()[ 1 : 3 ] == '. ':
#
s = s.strip()[ 2 : ]
#
return s.strip()
def getLastGoBySkipNick( s ):
#
sFirst, sMiddle, sLast = getFirstMiddleLast( s )
#
sGoBy = sFirst
#
if len( sFirst ) == 5 and sFirst[1] == '.' and sFirst[4] == '.':
#
pass
#
elif len( sFirst ) == 2 and sFirst.endswith( '.' ):
#
if sMiddle:
sGoBy = _getInitialOffFront( sMiddle )
else:
sGoBy = _getInitialOffFront( sLast )
#
#
return sLast, sGoBy
def getGoBySkipNick( s ):
#
sLast, sGoBy = getLastGoBySkipNick( s )
#
return sGoBy
def getGoByNickOK( s ):
#
from String.Get import getTextWithin
#
sGoBy = getGoBySkipNick( s )
#
if '"' in s:
#
sGoByMaybe = getTextWithin( s, '"','"' )
#
if sGoByMaybe: sGoBy = sGoByMaybe
#
return sGoBy
def getLastFirstTitle( s ):
#
from Iter.AllVers import iFilter, tMap
from String.Split import getWhiteCleaned
from String.Get import getStripped
from String.Names import getGoBySkipNick
#
s = getWhiteCleaned( s.replace( '.', '. ' ) )
#
# print3( s )
tParts = tMap( getStripped, s.split( ',' ) )
#
#print3( tParts )
sFirst = ' '.join( tParts[ 1 : ] )
sLast = tParts[ 0 ]
sTitle = ''
#
if len( tParts ) == 3:
sTitle = tParts[ 1 ]
sFirst = ' '.join( tParts[ 2 : ] )
#
if sLast.endswith( ' Jr.' ):
#
sTitle = ', '.join( iFilter( bool, ( sTitle, 'Jr.' ) ) )
sLast = sLast[ : -4 ].strip()
#
#
if len( sFirst ) == 5 and sFirst[1] == '.' and sFirst[4] == '.':
#
pass
#
else:
#
sFirst = getGoBySkipNick( sFirst )
#
#
return sLast, sFirst, sTitle
if __name__ == "__main__":
#
lProblems = []
#
from Utils.Result import sayTestResult
#
#print3( isNickName( 'mike', 'Michael' )
tLastGoBy = getLastGoBySkipNick( 'Stephanie Herseth Sandlin' )
#
if tLastGoBy != ('Sandlin', 'Stephanie'):
#
#
lProblems.append( 'getLastGoBySkipNick()' )
#
#
if ( not isNickName( 'bill', 'william' ) or
not isNickName( 'jill', 'julie' ) or
not isNickName( 'susan', 'sue' ) or
not isNickName( 'sue', 'susan' ) or
not isNickName( 'rick', 'dick' ) or
not isNickName( 'rick', 'richard' ) or
not isNickName( 'ed', 'edward' ) or
not isNickName( 'tina', 'valentina' ) or
not isNickName( 'woody','woodrow' ) or
not isNickName( 'lena', 'lynn' ) or
not isNickName( 'lynn', 'kathleen' ) or
not isNickName( 'steven', 'stephen' ) or
not isNickName( 'carol', 'carolyn' ) or
not isNickName( 'julie', 'julia' ) or
not isNickName( 'kristi', 'Kristine') or
not isNickName( 'mike', 'Michael' ) or
isNickName( 'bill', 'ralph' ) or
isNickName( 'chrstine', 'christine' ) ):
#
lProblems.append( 'isNickName()' )
#
#
if not isNameMisSpelled( 'kevin', 'kevn' ):
#
lProblems.append( 'isNameMisSpelled() kevin/kevn' )
#
#
if not isNameMisSpelled( 'Kimberley', 'Kimberly' ):
#
lProblems.append( 'isNameMisSpelled() Kimberley/Kimberly' )
#
if not isNameMisSpelled( 'Natalie', 'Nathalie' ):
#
lProblems.append( 'isNameMisSpelled() Natalie, Nathalie' )
#
if not isNameMisSpelled( 'Panayiotis', 'Panayotis' ):
#
lProblems.append( 'isNameMisSpelled() Panayiotis, Panayotis' )
#
if not isNameMisSpelled( 'Chrstine', 'Christine' ):
#
lProblems.append( 'isNameMisSpelled() Chrstine, Christine' )
#
if not isNameMisSpelled( 'Tomas', 'Tom' ):
#
lProblems.append( 'isNameMisSpelled() Tomas/Tom' )
#
if not isNameMisSpelled( 'Alison', 'Allison' ):
#
lProblems.append( 'isNameMisSpelled() Alison/Allison' )
#
if not isNameMisSpelled( 'Nanay', 'Nancy' ):
#
lProblems.append( 'isNameMisSpelled() Nanay/Nancy' )
#
if not isNameMisSpelled( 'Chistopher', 'Christopher' ):
#
lProblems.append( 'isNameMisSpelled() Chistopher/Christopher' )
#
if not isNameMisSpelled( 'Ann', 'Anne' ):
#
lProblems.append( 'isNameMisSpelled() Ann/Anne' )
#
if not isNameMisSpelled( 'Carel', 'Carol' ):
#
lProblems.append( 'isNameMisSpelled() Carol/Carel' )
#
if not isNameMisSpelled( 'Eric', 'Erik' ):
#
lProblems.append( 'isNameMisSpelled() Eric/Erik' )
#
if not isNameMisSpelled( 'Jeegar', 'Jeeger' ):
#
lProblems.append( 'isNameMisSpelled() Jeegar/Jeeger' )
#
if not isNameMisSpelled( 'Georgianna', 'Georgiana' ):
#
lProblems.append( 'isNameMisSpelled() Georgianna/Georgiana' )
#
#
if not isNameMisSpelled( 'Jacqueline', 'Jaqueline' ):
#
lProblems.append( 'isNameMisSpelled() Jacqueline/Jaqueline' )
#
if not isNameMisSpelled( 'Josep', 'Joseph' ):
#
lProblems.append( 'isNameMisSpelled() Josep/Joseph' )
#
if not isNameMisSpelled( 'Beverley', 'Beverly' ):
#
lProblems.append( 'isNameMisSpelled() Beverley/Beverly' )
#
if not isNameMisSpelled( 'Heid', 'Heidi' ):
#
lProblems.append( 'isNameMisSpelled() Heid/Heidi' )
#
if not isNameMisSpelled( 'Christine', 'Christina' ):
#
lProblems.append( 'isNameMisSpelled() Christine/Christina' )
#
if not isNameMisSpelled( 'Kimberley', 'Kimberly' ):
#
lProblems.append( 'isNameMisSpelled() Kimberley/Kimberly' )
#
if not isNameMisSpelled( 'Paul', 'Paulo' ):
#
lProblems.append( 'isNameMisSpelled() Paul/Paulo' )
#
if not isNameMisSpelled( 'Dawn', 'Daun' ):
#
lProblems.append( 'isNameMisSpelled() Dawn/Daun' )
#
if not isNameMisSpelled( 'Takao', 'Taka' ):
#
lProblems.append( 'isNameMisSpelled() Takao/Taka' )
#
if not isNameMisSpelled( 'Paul', 'Paulk' ):
#
lProblems.append( 'isNameMisSpelled() Paul/Paulk' )
#
#
#
#
if isNameMisSpelled( 'John', 'Joel' ):
#
lProblems.append( 'isNameMisSpelled() John/Joel' )
#
if isNameMisSpelled( 'Kelsey', 'Kelly' ):
#
lProblems.append( 'isNameMisSpelled() Kelsey/Kelly' )
#
if isNameMisSpelled( 'Mark', 'Mary' ):
#
lProblems.append( 'isNameMisSpelled() Mark/Mary' )
#
if isNameMisSpelled( 'Maria', 'Maya' ):
#
lProblems.append( 'isNameMisSpelled() Maria/Maya' )
#
if isNameMisSpelled( 'John', 'Joan' ):
#
lProblems.append( 'isNameMisSpelled() John/Joan' )
#
if isNameMisSpelled( 'Jan', 'Ryan' ):
#
lProblems.append( 'isNameMisSpelled() Jan/Ryan' )
#
#
if getFirstMiddleLast(
'William Jefferson Clinton' ) != \
( 'William', 'Jefferson', 'Clinton' ):
#
lProblems.append( 'getFirstMiddleLast()' )
#
#
if getGoBySkipNick( 'L. Ron Hubbard' ) != 'Ron':
#
lProblems.append( 'getGoBySkipNick()' )
#
#
s = 'Paul "Tex" Yearout'
#
if getGoBySkipNick( s ) != 'Paul':
#
lProblems.append( 'getGoBySkipNick() got nick' )
#
#
if getGoByNickOK( s ) != 'Tex':
#
lProblems.append( 'getGoByNickOK() got nick' )
#
#
s = 'C. W. Bill Young'
#
if getGoBySkipNick( s ) != 'Bill':
#
lProblems.append( 'getGoBySkipNick() two initials' )
#
#
if getLastGoBySkipNick( s ) != ( 'Young', 'Bill' ):
#
lProblems.append( 'getLastGoBySkipNick() two initials' )
#
#
if getLastFirstTitle(
'Kratovil, Jr., Frank M.' ) != ( 'Kratovil', 'Frank', 'Jr.' ):
#
print3( getLastFirstTitle( 'Kratovil, Jr., Frank M.' ) )
lProblems.append( 'getLastFirstTitle() Kratovil, Jr., Frank M.' )
#
#
#if 0:
if getLastFirstTitle( 'Kagen, Steve' ) != ( 'Kagen', 'Steve', '' ):
#
lProblems.append( 'getLastFirstTitle() Kagen, Steve' )
#
#
if getLastFirstTitle( 'Pascrell Jr., Bill' ) != ( 'Pascrell', 'Bill', 'Jr.' ):
#
print3( getLastFirstTitle( 'Pascrell Jr., Bill' ) )
lProblems.append( 'getLastFirstTitle() Pascrell Jr., Bill' )
#
#
if getLastFirstTitle( 'Barrett, J.Gresham' ) != ( 'Barrett', 'Gresham', '' ):
#
print3( getLastFirstTitle( 'Barrett, J.Gresham' ) )
lProblems.append( 'getLastFirstTitle() Barrett, J.Gresham' )
#
#
if getLastFirstTitle( 'Butterfield, G.K.' ) != ( 'Butterfield', 'G. K.', '' ):
#
print3( getLastFirstTitle( 'Butterfield, G.K.' ) )
lProblems.append( 'getLastFirstTitle() Butterfield, G.K.' )
#
# Patrica and Patrick Forster
#
if isMensName( "Patricia" ):
#
lProblems.append( 'isMensName( "Patricia" )' )
#
#
if isWomensName( "Patrick" ):
#
lProblems.append( 'isWomensName( "Patrick" )' )
#
#
if isNameMisSpelled( "Patricia", "Patrick" ):
#
lProblems.append( 'isNameMisSpelled( "Patricia", "Patrick" )' )
#
#
if isNickName( "Patricia", "Patrick" ):
#
lProblems.append( 'isNickName( "Patricia", "Patrick" )' )
#
#
#
#
sayTestResult( lProblems )
|
netvigator/myPyPacks
|
pyPacks/String/Names.py
|
Python
|
gpl-2.0
| 56,111
|
from model_mommy import mommy
from django.test import TestCase
from ..models import Department, Employee
class DepartmentTestMommy(TestCase):
"""Department's modle test case."""
def test_department_creation_mommy(self):
"""Test create department's model."""
new_department = mommy.make('employees.Department')
self.assertTrue(isinstance(new_department, Department))
self.assertEqual(new_department.__str__(), new_department.name)
class EmployeeTestMommy(TestCase):
"""Employee's model test cazse."""
def test_employee_creation_mommy(self):
"""Test create department's model."""
new_employee = mommy.make('employees.Employee')
self.assertTrue(isinstance(new_employee, Employee))
self.assertEqual(new_employee.__str__(), '%s %s' % (new_employee.first_name, new_employee.last_name))
|
maurobaraldi/ll_interview_application
|
luizalabs/employees/tests/tests_models.py
|
Python
|
gpl-3.0
| 866
|
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='django-cms-bootstrap-templates',
version='0.0.1',
author=u'Arne Schauf',
author_email='python.asmaps.de',
packages=['cms_bootstrap_templates'],
url='https://github.com/asmaps/django-cms-bootstrap-templates',
license='MIT licence, see LICENCE file',
description='A set of bootstrap3 templates for use with django-cms',
long_description=open('README.md').read(),
zip_safe=False,
include_package_data=True
)
|
asmaps/django-cms-bootstrap-templates
|
setup.py
|
Python
|
mit
| 520
|
import os
from setuptools import setup, find_packages
__VERSION__ = "18.01.0"
with open('README.md') as f:
long_description = f.read()
setup(
name='dice_tools',
version=__VERSION__,
author='DICEhub',
author_email='info@dicehub.com',
description='DICE application tools',
long_description=long_description,
url='http://dicehub.com',
packages = find_packages(),
install_requires=[
'PyYAML',
'py-lz4framed',
'msgpack-python'],
)
|
dicehub/dice_tools
|
setup.py
|
Python
|
mit
| 493
|
# encoding: utf-8
from django.urls import reverse
from rest_framework import serializers
from entity.serializers import DetailSerializerV2
from externaltools.models import ExternalTool
from mainsite.serializers import StripTagsCharField
from mainsite.utils import OriginSetting
class ExternalToolSerializerV2(DetailSerializerV2):
name = StripTagsCharField(max_length=254)
clientId = StripTagsCharField(max_length=254, source='client_id')
class Meta(DetailSerializerV2.Meta):
model = ExternalTool
# apispec_definition = ('ExternalTool', {})
def to_representation(self, instance):
representation = super(ExternalToolSerializerV2, self).to_representation(instance)
representation['launchpoints'] = {
lp.launchpoint: {
"url": "{}{}".format(OriginSetting.HTTP, reverse("v2_api_externaltools_launch", kwargs=dict(
launchpoint=lp.launchpoint,
entity_id=lp.cached_externaltool.entity_id
))),
"launchUrl": lp.launch_url,
"label": lp.label,
"iconUrl": lp.icon_url
} for lp in instance.cached_launchpoints()
}
return representation
class ExternalToolLaunchSerializerV2(DetailSerializerV2):
launchUrl = serializers.URLField(source='launch_url')
launchData = serializers.DictField(source='generate_launch_data')
|
concentricsky/badgr-server
|
apps/externaltools/serializers_v2.py
|
Python
|
agpl-3.0
| 1,423
|
'''
Created on 21.07.2013
@author: bronikkk
'''
import re
from ti.sema import *
def findReModule():
import config
importer = config.data.importer
return importer.importedFiles['re']
def findReName(name):
module = findReModule()
var = module.getScope().findName(name)
assert len(var.nodeType) == 1
return list(var.nodeType)[0]
def quasiCompile(params, **kwargs):
try:
pattern = re.compile(params[0].value)
cls = findReName(getPatternClassName())
if not cls:
return set()
res = cls.getClassInstance()
res.data = pattern
return {res}
except:
return set()
def quasiMatch(params, **kwargs):
try:
match = params[0].data.match(params[1].value)
cls = findReName(getMatchClassName())
if not cls:
return set()
res = cls.getClassInstance()
res.data = match
return {res}
except:
return set()
def quasiGroups(params, **kwargs):
try:
groups = params[0].data.groups()
res = TupleSema()
res.elems = [set()]
for elem in groups:
res.elems.append({LiteralValueSema(elem)})
return {res}
except:
return set()
functions = [
['compile', quasiCompile, 1],
]
variables = [
]
modules = [
]
def getPatternClassName():
return 'SRE_Pattern'
patternClass = (
getPatternClassName(),
[
['match', quasiMatch, 2],
],
[
]
)
def getMatchClassName():
return 'SRE_Match'
matchClass = (
getMatchClassName(),
[
['groups', quasiGroups, 1],
],
[
]
)
classes = [
patternClass,
matchClass
]
def getAll():
return (functions, variables, modules, classes)
|
bronikkk/tirpan
|
std/re_.py
|
Python
|
gpl-3.0
| 2,038
|
__RCSID__ = "$Id$"
import socket
import select
import time
import os
from DIRAC.Core.DISET.private.Transports.BaseTransport import BaseTransport
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
class PlainTransport( BaseTransport ):
def initAsClient( self ):
timeout = None
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict[ 'timeout' ]
try:
self.oSocket = socket.create_connection(self.stServerAddress, timeout)
except socket.error as e:
if e.args[0] != 115:
return S_ERROR( "Can't connect: %s" % str( e ) )
#Connect in progress
oL = select.select( [], [ self.oSocket ], [], self.extraArgsDict[ 'timeout' ] )[1]
if len( oL ) == 0:
self.oSocket.close()
return S_ERROR( "Connection timeout" )
errno = self.oSocket.getsockopt( socket.SOL_SOCKET, socket.SO_ERROR )
if errno != 0:
return S_ERROR( "Can't connect: %s" % str( ( errno, os.strerror( errno ) ) ) )
self.remoteAddress = self.oSocket.getpeername()
return S_OK( self.oSocket )
def initAsServer( self ):
if not self.serverMode():
raise RuntimeError( "Must be initialized as server mode" )
try:
self.oSocket = socket.socket( socket.AF_INET6, socket.SOCK_STREAM )
except socket.error:
# IPv6 is probably disabled on this node, try IPv4 only instead
self.oSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
if self.bAllowReuseAddress:
self.oSocket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
self.oSocket.bind( self.stServerAddress )
self.oSocket.listen( self.iListenQueueSize )
return S_OK( self.oSocket )
def close( self ):
gLogger.debug( "Closing socket" )
try:
self.oSocket.shutdown( socket.SHUT_RDWR )
except:
pass
self.oSocket.close()
def setClientSocket( self, oSocket ):
if self.serverMode():
raise RuntimeError( "Mustbe initialized as client mode" )
self.oSocket = oSocket
if 'timeout' in self.extraArgsDict:
self.oSocket.settimeout( self.extraArgsDict[ 'timeout' ] )
self.remoteAddress = self.oSocket.getpeername()
def acceptConnection( self ):
#HACK: Was = PlainTransport( self )
oClientTransport = PlainTransport( self.stServerAddress )
oClientSocket, stClientAddress = self.oSocket.accept()
oClientTransport.setClientSocket( oClientSocket )
return S_OK( oClientTransport )
def _read( self, bufSize = 4096, skipReadyCheck = False ):
start = time.time()
timeout = False
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict[ 'timeout' ]
while True:
if timeout:
if time.time() - start > timeout:
return S_ERROR( "Socket read timeout exceeded" )
try:
data = self.oSocket.recv( bufSize )
return S_OK( data )
except socket.error as e:
if e[0] == 11:
time.sleep( 0.001 )
else:
return S_ERROR( "Exception while reading from peer: %s" % str( e ) )
except Exception as e:
return S_ERROR( "Exception while reading from peer: %s" % str( e ) )
def _write( self, buffer ):
sentBytes = 0
timeout = False
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict[ 'timeout' ]
if timeout:
start = time.time()
while sentBytes < len( buffer ):
try:
if timeout:
if time.time() - start > timeout:
return S_ERROR( "Socket write timeout exceeded" )
sent = self.oSocket.send( buffer[ sentBytes: ] )
if sent == 0:
return S_ERROR( "Connection closed by peer" )
if sent > 0:
sentBytes += sent
except socket.error as e:
if e[0] == 11:
time.sleep( 0.001 )
else:
return S_ERROR( "Exception while sending to peer: %s" % str( e ) )
except Exception as e:
return S_ERROR( "Error while sending: %s" % str( e ) )
return S_OK( sentBytes )
def checkSanity( *args, **kwargs ):
return S_OK( {} )
def delegate( delegationRequest, kwargs ):
"""
Check delegate!
"""
return S_OK()
|
arrabito/DIRAC
|
Core/DISET/private/Transports/PlainTransport.py
|
Python
|
gpl-3.0
| 4,193
|
""" configuration module for awsu, contains two objects """
import boto3
import sqlite3
import logging
import getpass
import datetime
import configparser
import uuid
import requests
import json
from dateutil.tz import tzutc
from urllib.parse import urlencode, quote_plus
from os import environ
from bs4 import BeautifulSoup
import base64
from lxml import etree
class Credential(object):
""" credential class """
def __init__(self):
self.conn = sqlite3.connect(environ.get('HOME') + '/.aws/config.db')
self.initialize_database('credentials')
def initialize_database(self, table):
cur = self.conn.cursor()
tables = cur.execute(
"SELECT name FROM sqlite_master WHERE type='table'").fetchall()
if not table in tables[0]:
stmt = '''CREATE TABLE %s(
profile text,
access_key text,
secret_key text,
session_token text,
expiration text)
''' % table[0]
cur.execute(stmt)
self.conn.commit()
def get_session(self, profile="default"):
if profile is None:
profile = "default"
cur = self.conn.cursor()
self.session = cur.execute(
"SELECT * FROM credentials WHERE profile=? LIMIT 1", (profile,))
self.session = self.session.fetchone()
if self.session is None or self.is_expired():
if self.is_expired():
cur.execute("DELETE FROM credentials WHERE profile=?", (profile,))
self.conn.commit()
creds = self.get_credentials(profile)
cur.execute("INSERT INTO credentials VALUES(?,?,?,?,?)", creds)
self.conn.commit()
return {
'AWS_ACCESS_KEY_ID': creds[1],
'AWS_SECRET_ACCESS_KEY': creds[2],
'AWS_SESSION_TOKEN': creds[3],
'AWS_SECURITY_TOKEN': creds[3]
}
else:
return {
'AWS_ACCESS_KEY_ID': self.session[1],
'AWS_SECRET_ACCESS_KEY': self.session[2],
'AWS_SESSION_TOKEN': self.session[3],
'AWS_SECURITY_TOKEN': self.session[3]
}
def get_credentials(self, profile="default"):
""" return aws profile environment variables """
if profile is None:
profile = 'default'
# get session token
if profile != 'saml':
session = boto3.Session(profile_name=profile)
sts = boto3.client('sts')
user = User()
token = getpass.getpass("Enter MFA Code : ")
if profile == "default":
res = sts.get_session_token(
DurationSeconds=3600,
SerialNumber=user.mfa,
TokenCode=token
)
elif profile == "saml":
config_file = configparser.RawConfigParser()
config_file.read(environ.get('HOME') + '/.aws/config')
if not config_file.has_section(profile):
config_file.add_section(profile)
username = str(input("Google Email : "))
idp_id = str(input('IDP ID : '))
sp_id = str(input('SP ID : '))
else:
username = config_file.get(profile, 'username')
idp_id = config_file.get(profile, 'idpid')
sp_id = config_file.get(profile, 'spid')
passwd = getpass.getpass('Password : ')
google = GoogleSAML(username, passwd, idp_id, sp_id)
google.auth()
saml_res = google.get_saml_response()
doc = etree.fromstring(base64.b64decode(saml_res))
roles = google.parse_roles(doc)
role_arn, provider = google.pick_one(roles)
config_file.set(profile, 'username', google.username)
config_file.set(profile, 'idpid', google.idp_id)
config_file.set(profile, 'spid', google.sp_id)
config_file.set(profile, 'role_arn', role_arn)
config_file.set(profile, 'provider', provider)
config_file.set(profile, 'durations', google.duration_seconds)
with open(environ.get('HOME') + '/.aws/config', 'w+') as f:
try:
config_file.write(f)
finally:
f.close()
print("Assuming " + config_file.get(profile, 'role_arn'))
sts = boto3.client('sts')
res = sts.assume_role_with_saml(
RoleArn=config_file.get(profile, 'role_arn'),
PrincipalArn=config_file.get(profile, 'provider'),
SAMLAssertion=saml_res,
DurationSeconds=config_file.get(profile, 'durations'))
else:
config_file = configparser.RawConfigParser()
config_file.read(environ.get('HOME') + '/.aws/credentials')
role_arn = config_file.get(profile, 'role_arn')
role_name = role_arn.split('/')[-1]
random_identifier = str(uuid.uuid4())[4:]
role_session = ''.join(
[user.username, role_name, random_identifier])
res = sts.assume_role(
RoleArn=role_arn,
RoleSessionName=role_session,
DurationSeconds=3600,
SerialNumber=user.mfa,
TokenCode=token
)
return (
profile,
res['Credentials']['AccessKeyId'],
res['Credentials']['SecretAccessKey'],
res['Credentials']['SessionToken'],
res['Credentials']['Expiration']
)
def clean_environment(self):
""" remove aws environment variables """
for var in list(environ.keys()):
if var.startswith('AWS_'):
del environ[var]
def is_expired(self):
try:
stored_date = self.session[4]
except:
return False
now = datetime.datetime.utcnow()
session_time = datetime.datetime.strptime(
stored_date,
'%Y-%m-%d %H:%M:%S+00:00')
return now > session_time
class User(object):
def __init__(self):
sts = boto3.client('sts')
caller = sts.get_caller_identity()
self.arn = caller['Arn']
self.account_id = caller['Account']
self.username = self.get_username()
self.mfa = self.get_mfa()
def get_username(self):
username = str(self.arn).split('/')[-1]
return username
def get_mfa(self):
mfa = "arn:aws:iam::" + self.account_id + ":mfa/" + self.username
return mfa
class GoogleSAML(object):
def __init__(self, username, passwd, idp_id, sp_id):
""" method for google saml auth init"""
self.username = username
self.password = passwd
self.idp_id = idp_id
self.sp_id = sp_id
self.duration_seconds = 3600
payload = {
'idpid': str(self.idp_id),
'spid': str(self.sp_id),
'forceauthn': 'false'
}
params = urlencode(payload, quote_via=quote_plus)
self.url = "https://accounts.google.com/o/saml2/initsso?" + params
def auth(self):
self.request = requests.Session()
res = self.request.get(self.url)
res.raise_for_status()
page = BeautifulSoup(res.text, 'html.parser')
gaia_loginform = page.find(
'form', {'id': 'gaia_loginform'}).get('action')
payload = {}
payload['gxf'] = page.find('input', {'name': 'gxf'}).get('value')
payload['continue'] = page.find(
'input', {'name': 'continue'}).get('value')
payload['ltmpl'] = page.find('input', {'name': 'ltmpl'}).get('value')
payload['sarp'] = 1
payload['scc'] = 1
payload['oauth'] = page.find('input', {'name': 'oauth'}).get('value')
payload['_utf8'] = page.find('input', {'name': '_utf8'}).get('value')
payload['bgresponse'] = page.find(
'input', {'name': 'bgresponse'}).get('value')
payload['Email'] = self.username
payload['Passwd'] = self.password
res = self.request.post(gaia_loginform, data=payload)
res.raise_for_status()
self.request.headers['Referer'] = res.url
page = BeautifulSoup(res.text, 'html.parser')
payload['ProfileInformation'] = page.find(
'input', {'name': 'ProfileInformation'}).get('value')
payload['SessionState'] = page.find(
'input', {'name': 'SessionState'}).get('value')
payload['Passwd'] = self.password
passwd_challenge_url = page.find(
'form', {'id': 'gaia_loginform'}).get('action')
res = self.request.post(passwd_challenge_url, data=payload)
res.raise_for_status()
self.request.headers['Referer'] = res.url
if "challenge/az" in res.url:
res = self.auth_prompt(res, payload)
self.session_state = res
def auth_prompt(self, session, payload):
res = BeautifulSoup(session.text, 'html.parser')
auth_url = session.url.split('?')[0]
data_key = res.find('div', {'data-api-key': True}).get('data-api-key')
data_tx_id = res.find('div', {'data-tx-id': True}).get('data-tx-id')
params = {
'alt': 'json',
'key': data_key
}
params = urlencode(params, quote_via=quote_plus)
prompt_url = "https://content.googleapis.com/cryptauth/v1/authzen/awaittx?" + params
prompt_body = {'txId': data_tx_id}
print("Open the Google App, and tap 'Yes' on the prompt to sign in ...")
self.request.headers['Referer'] = session.url
res_prompt = self.request.post(prompt_url, json=prompt_body)
parsed = json.loads(res_prompt.text)
payload = {
'challengeId': res.find('input', {'name': 'challengeId'}).get('value'),
'challengeType': res.find('input', {'name': 'challengeType'}).get('value'),
'continue': res.find('input', {'name': 'continue'}).get('value'),
'scc': res.find('input', {'name': 'scc'}).get('value'),
'sarp': res.find('input', {'name': 'sarp'}).get('value'),
'TL': res.find('input', {'name': 'TL'}).get('value'),
'gxf': res.find('input', {'name': 'gxf'}).get('value'),
'token': parsed['txToken'],
'action': res.find('input', {'name': 'action'}).get('value'),
'TrustDevice': 'on',
}
res = self.request.post(auth_url, data=payload)
res.raise_for_status()
return res
def get_saml_response(self):
res = BeautifulSoup(self.session_state.text, 'html.parser')
saml_response = res.find(
'input', {'name': 'SAMLResponse'}).get('value')
return saml_response
def parse_roles(self, doc):
roles = {}
for x in doc.xpath('//*[@Name = "https://aws.amazon.com/SAML/Attributes/Role"]//text()'):
if "arn:aws:iam:" not in x:
continue
res = x.split(',')
roles[res[0]] = res[1]
return roles
def pick_one(self, roles):
while True:
for i, role in enumerate(roles):
print("[{:>3d}] {}".format(i + 1, role))
prompt = 'Type the number (1 - {:d}) of the role to assume: '.format(
len(roles))
choice = input(prompt)
try:
num = int(choice)
return list(roles.items())[num - 1]
except:
print("Invalid choice, try again")
|
rizkidoank/awsu
|
awsu/config.py
|
Python
|
gpl-3.0
| 11,653
|
import unittest
from test import support
from _testcapi import getargs_keywords
import warnings
warnings.filterwarnings("ignore",
category=DeprecationWarning,
message=".*integer argument expected, got float",
module=__name__)
warnings.filterwarnings("ignore",
category=DeprecationWarning,
message=".*integer argument expected, got float",
module="unittest")
"""
> How about the following counterproposal. This also changes some of
> the other format codes to be a little more regular.
>
> Code C type Range check
>
> b unsigned char 0..UCHAR_MAX
> h signed short SHRT_MIN..SHRT_MAX
> B unsigned char none **
> H unsigned short none **
> k * unsigned long none
> I * unsigned int 0..UINT_MAX
> i int INT_MIN..INT_MAX
> l long LONG_MIN..LONG_MAX
> K * unsigned long long none
> L long long LLONG_MIN..LLONG_MAX
> Notes:
>
> * New format codes.
>
> ** Changed from previous "range-and-a-half" to "none"; the
> range-and-a-half checking wasn't particularly useful.
Plus a C API or two, e.g. PyInt_AsLongMask() ->
unsigned long and PyInt_AsLongLongMask() -> unsigned
long long (if that exists).
"""
LARGE = 0x7FFFFFFF
VERY_LARGE = 0xFF0000121212121212121242
from _testcapi import UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, INT_MAX, \
INT_MIN, LONG_MIN, LONG_MAX, PY_SSIZE_T_MIN, PY_SSIZE_T_MAX
# fake, they are not defined in Python's header files
LLONG_MAX = 2**63-1
LLONG_MIN = -2**63
ULLONG_MAX = 2**64-1
class Long:
def __int__(self):
return 99
class Int:
def __int__(self):
return 99
class Unsigned_TestCase(unittest.TestCase):
def test_b(self):
from _testcapi import getargs_b
# b returns 'unsigned char', and does range checking (0 ... UCHAR_MAX)
self.assertRaises(TypeError, getargs_b, 3.14)
self.assertEqual(99, getargs_b(Long()))
self.assertEqual(99, getargs_b(Int()))
self.assertRaises(OverflowError, getargs_b, -1)
self.assertEqual(0, getargs_b(0))
self.assertEqual(UCHAR_MAX, getargs_b(UCHAR_MAX))
self.assertRaises(OverflowError, getargs_b, UCHAR_MAX + 1)
self.assertEqual(42, getargs_b(42))
self.assertEqual(42, getargs_b(42))
self.assertRaises(OverflowError, getargs_b, VERY_LARGE)
def test_B(self):
from _testcapi import getargs_B
# B returns 'unsigned char', no range checking
self.assertRaises(TypeError, getargs_B, 3.14)
self.assertEqual(99, getargs_B(Long()))
self.assertEqual(99, getargs_B(Int()))
self.assertEqual(UCHAR_MAX, getargs_B(-1))
self.assertEqual(UCHAR_MAX, getargs_B(-1))
self.assertEqual(0, getargs_B(0))
self.assertEqual(UCHAR_MAX, getargs_B(UCHAR_MAX))
self.assertEqual(0, getargs_B(UCHAR_MAX+1))
self.assertEqual(42, getargs_B(42))
self.assertEqual(42, getargs_B(42))
self.assertEqual(UCHAR_MAX & VERY_LARGE, getargs_B(VERY_LARGE))
def test_H(self):
from _testcapi import getargs_H
# H returns 'unsigned short', no range checking
self.assertRaises(TypeError, getargs_H, 3.14)
self.assertEqual(99, getargs_H(Long()))
self.assertEqual(99, getargs_H(Int()))
self.assertEqual(USHRT_MAX, getargs_H(-1))
self.assertEqual(0, getargs_H(0))
self.assertEqual(USHRT_MAX, getargs_H(USHRT_MAX))
self.assertEqual(0, getargs_H(USHRT_MAX+1))
self.assertEqual(42, getargs_H(42))
self.assertEqual(42, getargs_H(42))
self.assertEqual(VERY_LARGE & USHRT_MAX, getargs_H(VERY_LARGE))
def test_I(self):
from _testcapi import getargs_I
# I returns 'unsigned int', no range checking
self.assertRaises(TypeError, getargs_I, 3.14)
self.assertEqual(99, getargs_I(Long()))
self.assertEqual(99, getargs_I(Int()))
self.assertEqual(UINT_MAX, getargs_I(-1))
self.assertEqual(0, getargs_I(0))
self.assertEqual(UINT_MAX, getargs_I(UINT_MAX))
self.assertEqual(0, getargs_I(UINT_MAX+1))
self.assertEqual(42, getargs_I(42))
self.assertEqual(42, getargs_I(42))
self.assertEqual(VERY_LARGE & UINT_MAX, getargs_I(VERY_LARGE))
def test_k(self):
from _testcapi import getargs_k
# k returns 'unsigned long', no range checking
# it does not accept float, or instances with __int__
self.assertRaises(TypeError, getargs_k, 3.14)
self.assertRaises(TypeError, getargs_k, Long())
self.assertRaises(TypeError, getargs_k, Int())
self.assertEqual(ULONG_MAX, getargs_k(-1))
self.assertEqual(0, getargs_k(0))
self.assertEqual(ULONG_MAX, getargs_k(ULONG_MAX))
self.assertEqual(0, getargs_k(ULONG_MAX+1))
self.assertEqual(42, getargs_k(42))
self.assertEqual(42, getargs_k(42))
self.assertEqual(VERY_LARGE & ULONG_MAX, getargs_k(VERY_LARGE))
class Signed_TestCase(unittest.TestCase):
def test_i(self):
from _testcapi import getargs_i
# i returns 'int', and does range checking (INT_MIN ... INT_MAX)
self.assertRaises(TypeError, getargs_i, 3.14)
self.assertEqual(99, getargs_i(Long()))
self.assertEqual(99, getargs_i(Int()))
self.assertRaises(OverflowError, getargs_i, INT_MIN-1)
self.assertEqual(INT_MIN, getargs_i(INT_MIN))
self.assertEqual(INT_MAX, getargs_i(INT_MAX))
self.assertRaises(OverflowError, getargs_i, INT_MAX+1)
self.assertEqual(42, getargs_i(42))
self.assertEqual(42, getargs_i(42))
self.assertRaises(OverflowError, getargs_i, VERY_LARGE)
def test_l(self):
from _testcapi import getargs_l
# l returns 'long', and does range checking (LONG_MIN ... LONG_MAX)
self.assertRaises(TypeError, getargs_l, 3.14)
self.assertEqual(99, getargs_l(Long()))
self.assertEqual(99, getargs_l(Int()))
self.assertRaises(OverflowError, getargs_l, LONG_MIN-1)
self.assertEqual(LONG_MIN, getargs_l(LONG_MIN))
self.assertEqual(LONG_MAX, getargs_l(LONG_MAX))
self.assertRaises(OverflowError, getargs_l, LONG_MAX+1)
self.assertEqual(42, getargs_l(42))
self.assertEqual(42, getargs_l(42))
self.assertRaises(OverflowError, getargs_l, VERY_LARGE)
def test_n(self):
from _testcapi import getargs_n
# n returns 'Py_ssize_t', and does range checking
# (PY_SSIZE_T_MIN ... PY_SSIZE_T_MAX)
self.assertRaises(TypeError, getargs_n, 3.14)
self.assertRaises(TypeError, getargs_n, Long())
self.assertRaises(TypeError, getargs_n, Int())
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MIN-1)
self.assertEqual(PY_SSIZE_T_MIN, getargs_n(PY_SSIZE_T_MIN))
self.assertEqual(PY_SSIZE_T_MAX, getargs_n(PY_SSIZE_T_MAX))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MAX+1)
self.assertEqual(42, getargs_n(42))
self.assertEqual(42, getargs_n(42))
self.assertRaises(OverflowError, getargs_n, VERY_LARGE)
class LongLong_TestCase(unittest.TestCase):
def test_L(self):
from _testcapi import getargs_L
# L returns 'long long', and does range checking (LLONG_MIN ... LLONG_MAX)
self.assertRaises(TypeError, getargs_L, "Hello")
self.assertEqual(3, getargs_L(3.14))
self.assertEqual(99, getargs_L(Long()))
self.assertEqual(99, getargs_L(Int()))
self.assertRaises(OverflowError, getargs_L, LLONG_MIN-1)
self.assertEqual(LLONG_MIN, getargs_L(LLONG_MIN))
self.assertEqual(LLONG_MAX, getargs_L(LLONG_MAX))
self.assertRaises(OverflowError, getargs_L, LLONG_MAX+1)
self.assertEqual(42, getargs_L(42))
self.assertEqual(42, getargs_L(42))
self.assertRaises(OverflowError, getargs_L, VERY_LARGE)
def test_K(self):
from _testcapi import getargs_K
# K return 'unsigned long long', no range checking
self.assertRaises(TypeError, getargs_K, 3.14)
self.assertRaises(TypeError, getargs_K, Long())
self.assertRaises(TypeError, getargs_K, Int())
self.assertEqual(ULLONG_MAX, getargs_K(ULLONG_MAX))
self.assertEqual(0, getargs_K(0))
self.assertEqual(0, getargs_K(ULLONG_MAX+1))
self.assertEqual(42, getargs_K(42))
self.assertEqual(42, getargs_K(42))
self.assertEqual(VERY_LARGE & ULLONG_MAX, getargs_K(VERY_LARGE))
class Tuple_TestCase(unittest.TestCase):
def test_tuple(self):
from _testcapi import getargs_tuple
ret = getargs_tuple(1, (2, 3))
self.assertEqual(ret, (1,2,3))
# make sure invalid tuple arguments are handled correctly
class seq:
def __len__(self):
return 2
def __getitem__(self, n):
raise ValueError
self.assertRaises(TypeError, getargs_tuple, 1, seq())
class Keywords_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all positional args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), 10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_keyword_args(self):
# all keywords
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg3=(4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg5=10),
(1, 2, 3, -1, -1, -1, -1, -1, -1, 10)
)
def test_required_args(self):
# required arg missing
try:
getargs_keywords(arg1=(1,2))
except TypeError as err:
self.assertEqual(str(err), "Required argument 'arg2' (pos 2) not found")
else:
self.fail('TypeError should have been raised')
def test_too_many_args(self):
try:
getargs_keywords((1,2),3,(4,(5,6)),(7,8,9),10,111)
except TypeError as err:
self.assertEqual(str(err), "function takes at most 5 arguments (6 given)")
else:
self.fail('TypeError should have been raised')
def test_invalid_keyword(self):
# extraneous keyword arg
try:
getargs_keywords((1,2),3,arg5=10,arg666=666)
except TypeError as err:
self.assertEqual(str(err), "'arg666' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
def test_main():
tests = [Signed_TestCase, Unsigned_TestCase, Tuple_TestCase, Keywords_TestCase]
try:
from _testcapi import getargs_L, getargs_K
except ImportError:
pass # PY_LONG_LONG not available
else:
tests.append(LongLong_TestCase)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
mancoast/CPythonPyc_test
|
fail/314_test_getargs2.py
|
Python
|
gpl-3.0
| 11,329
|
from datetime import datetime, timedelta
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
ITEM_METHODS = ['GET', 'PATCH', 'PUT', 'DELETE']
DATE_FORMAT = '%Y-%m-%d %H:%M:%S GMT'
schema = {
'name': {
'type': 'string',
'minlength': 3,
'maxlength': 50,
'required': True,
},
'occurred_on': {
'type': 'datetime',
'default': datetime.utcnow(),
},
'source': {
'type': 'string',
'minlength': 2,
'maxlength': 50,
'required': True,
},
'reporter': {
'type': 'string',
'minlength': 3,
'maxlength': 20,
'required': True,
},
'details': {
'type': 'string',
'minlength': 0,
'maxlength': 300,
'required': False
},
}
event = {
'item_title': 'event',
'additional_lookup': {
'url': 'regex("[\w]+")',
'field': 'name',
},
'cache_control': 'max-age=10, must-revalidate',
'cache_expires': 10,
'resource_methods': ['GET', 'POST'],
'schema': schema
}
DOMAIN = {
'event': event,
}
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_USERNAME = ''
MONGO_PASSWORD = ''
MONGO_DBNAME = 'historia'
|
waoliveros/historia
|
settings.py
|
Python
|
mit
| 1,196
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from pathlib import Path
import datetime as dt
import pytest
from rnacentral_pipeline.databases.psicquic import parser
from rnacentral_pipeline.databases.data import (
Entry,
Interaction,
InteractionIdentifier,
Interactor,
)
from rnacentral_pipeline.databases.helpers import publications as pub
@pytest.fixture(scope="module")
def data():
path = Path("data/psicquic/data.tsv")
return list(parser.parse(path, os.environ["PGDATABASE"]))
def test_can_parse_all_entries(data):
assert len(data) == 9
def test_can_parse_correctly(data):
assert data[0] == Entry(
primary_id="PSICQUIC:URS000000B1C9_9606",
accession="PSICQUIC:URS000000B1C9_9606",
ncbi_tax_id=9606,
database="PSICQUIC",
sequence="TGAGGTAGGAGGTTGTATAGTT",
regions=[],
rna_type="miRNA",
url="http://www.ebi.ac.uk/Tools/webservices/psicquic/view/main.xhtml",
seq_version="1",
description="Homo sapiens (human) hsa-let-7e-5p",
references=[
pub.reference("PMID:23671334"),
pub.reference("PMID:30670152"),
],
species="Homo sapiens",
common_name="human",
lineage="Eukaryota; Metazoa; Chordata; Craniata; Vertebrata; Euteleostomi; Mammalia; Eutheria; Euarchontoglires; Primates; Haplorrhini; Catarrhini; Hominidae; Homo; Homo sapiens",
interactions=[
Interaction(
ids=(
InteractionIdentifier(
key="psicquic", value="URS000000B1C9_9606-0", name=""
),
),
interactor1=Interactor(
id=InteractionIdentifier(
key="RNAcentral", value="URS000000B1C9_9606", name=None
),
alt_ids=(),
aliases=(
InteractionIdentifier(
key="RNAcentral",
value="Homo sapiens (human) hsa-let-7e-5p",
name="recommended name",
),
),
taxid=9606,
biological_role=(
InteractionIdentifier(
key="psi-mi", value="MI:0499", name="unspecified role"
),
),
experimental_role=(
InteractionIdentifier(
key="psi-mi", value="MI:0499", name="unspecified role"
),
),
interactor_type=(
InteractionIdentifier(
key="psi-mi", value="MI:0320", name="ribonucleic acid"
),
),
xrefs=(
InteractionIdentifier(
key="go",
value="GO:0035278",
name="miRNA mediated inhibition of translation",
),
),
annotations="-",
features=(),
stoichiometry=None,
participant_identification=(),
),
interactor2=Interactor(
id=InteractionIdentifier(
key="uniprotkb", value="P40763", name=None
),
alt_ids=(),
aliases=(),
taxid=-3,
biological_role=(
InteractionIdentifier(
key="psi-mi", value="MI:0499", name="unspecified role"
),
),
experimental_role=(
InteractionIdentifier(
key="psi-mi", value="MI:0499", name="unspecified role"
),
),
interactor_type=(
InteractionIdentifier(
key="psi-mi", value="MI:0329", name="unknown participant"
),
),
xrefs=(),
annotations="-",
features=(),
stoichiometry=None,
participant_identification=(),
),
methods=(),
types=(
InteractionIdentifier(
key="psi-mi", value="MI:0915", name="physical association"
),
),
xrefs=(),
annotations=(),
confidence=(),
source_database=(
InteractionIdentifier(
key="psi-mi", value="MI:2320", name="aruk-ucl"
),
),
is_negative=True,
publications=(pub.reference("PMID:30670152"),),
create_date=None,
update_date=dt.date(2020, 1, 27),
host_organisms=None,
)
],
)
|
RNAcentral/rnacentral-import-pipeline
|
tests/databases/psicquic/parser_test.py
|
Python
|
apache-2.0
| 5,789
|
#!/usr/bin/env python
#
# restrict_long_contigs.py
#
# USAGE: restrict_long_contigs.py [options] <input_directory> \
# <output_directory>
#
# Options:
# -h, --help show this help message and exit
# -l MINLEN, --minlen=MINLEN
# Minimum length of sequence
# -s SUFFIX, --filesuffix=SUFFIX
# Suffix to indicate the file was processed
# -v, --verbose Give verbose output
#
# Non-PSL dependencies: Biopython (www.biopython.org)
#
# A short script that takes as input a directory containing (many) FASTA files
# describing biological sequences, and writes to a new, named directory
# multiple FASTA files containing the same sequences, but restricted only to
# those sequences whose length is greater than a passed value.
#
# Example usage: You have a directory with many sets of contigs from different
# assemblies. This script will produce a new directory of the same data where
# the contig lengths are restricted to being greater than a specified length.
#
# Copyright (C) 2013 The James Hutton Institute
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@hutton.ac.uk
#
# Leighton Pritchard,
# Information and Computing Sciences,
# James Hutton Institute,
# Errol Road,
# Invergowrie,
# Dundee,
# DD6 9LH,
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2010-2014 The James Hutton Institute
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
# IMPORTS
from Bio import SeqIO
from optparse import OptionParser
import logging
import logging.handlers
import os
import re
import sys
###
# GLOBALS
# File extensions that indicate FASTA content
fasta_ext = ['.fa', '.fas', '.fasta']
###
# FUNCTIONS
# Parse cmd-line
def parse_cmdline(args):
""" Parse command-line arguments. Note that the input and output
directories are positional arguments
"""
usage = "usage: %prog [options] <input_directory> <output_directory>"
parser = OptionParser(usage)
parser.add_option("-l", "--minlen", dest="minlen",
action="store", default=1000,
help="Minimum length of sequence")
parser.add_option("-s", "--filesuffix", dest="suffix",
action="store", default="_restricted",
help="Suffix to indicate the file was processed")
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="Give verbose output")
return parser.parse_args()
# Get list of FASTA files from a directory
def get_fasta_filenames(indir, extensions=fasta_ext):
""" Identifies files in the passed directory whose extensions indicate
that they may be FASTA files. Returns the path to the file,
including the parent directory.
"""
filelist = [f for f in os.listdir(indir) if
os.path.splitext(f)[-1].lower() in extensions]
logger.info("Identified %d FASTA files in %s:" % (len(filelist),
indir))
if not len(filelist): # We want there to be at least one file
logger.error("No FASTA files found in %s" % indir)
sys.exit(1)
return filelist
# Restrict sequence length in a named FASTA file, writing it to
# the named location
def restrict_seq_length(infile, outfile, minlen):
""" Takes an input FASTA file as infile, and writes out a corresponding
file to outfile, where sequences shorter than minlen are not included
"""
logger.info("Restricting lengths of %s to >=%d;" % (infile, minlen) +
" writing to %s" % outfile)
SeqIO.write([s for s in SeqIO.parse(infile, 'fasta')
if not len(s) < minlen],
outfile, 'fasta')
# Process FASTA files in the directory
def process_files(indir, outdir, minlen, suffix):
""" Takes an input directory that contains FASTA files, and writes
to the output directory corresponding files (with the suffix appended)
that contain only sequences of length greater than minlen.
"""
for filename in get_fasta_filenames(indir):
filestem, ext = os.path.splitext(filename)
infilename = os.path.join(indir, filename)
outfilename = os.path.join(outdir, ''.join([filestem, suffix, ext]))
restrict_seq_length(infilename, outfilename, minlen)
###
# SCRIPT
if __name__ == '__main__':
# Parse command-line
# options are options, arguments are the .sff files
options, args = parse_cmdline(sys.argv)
# We set up logging, and modify loglevel according to whether we need
# verbosity or not
logger = logging.getLogger('restrict_long_contigs.py')
logger.setLevel(logging.DEBUG)
err_handler = logging.StreamHandler(sys.stderr)
err_formatter = logging.Formatter('%(levelname)s: %(message)s')
err_handler.setFormatter(err_formatter)
if options.verbose:
err_handler.setLevel(logging.INFO)
else:
err_handler.setLevel(logging.WARNING)
logger.addHandler(err_handler)
# Report arguments, if verbose
logger.info(options)
logger.info(args)
# If there are not two positional arguments, throw an error
if len(args) != 2:
logger.error("Not enough arguments: script requires input and " +
"output directory")
sys.exit(1)
indir, outdir = tuple(args)
# Make sure that the input directory exists
if not os.path.isdir(indir):
logger.error("Input directory %s does not exist" % indir)
sys.exit(1)
# If output directory does not exist, create it. If it does exist,
# issue a warning that contents may be overwritten
if os.path.isdir(outdir):
logger.warning("Contents of %s may be overwritten" % outdir)
else:
logger.warning("Output directory %s does not exist: creating it" %
outdir)
os.mkdir(outdir)
# Check that the passed suffix is a valid string: escape dodgy characters
#try:
# suffix = re.escape(options.suffix)
#except:
# logger.error("Could not escape suffix string: %s" % options.suffix)
# sys.exit(1)
# Make sure that the minimum length is an integer, and positive
if not int(options.minlen) > 0:
logger.error("Minimum length must be a positive integer, got %s" %
options.minlen)
sys.exit(1)
# Restrict sequence lengths
process_files(indir, outdir, int(options.minlen), options.suffix)
|
widdowquinn/scripts
|
bioinformatics/restrict_long_contigs.py
|
Python
|
mit
| 7,563
|
../../../share/pyshared/FSM.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/FSM.py
|
Python
|
gpl-3.0
| 30
|
#!/usr/bin/env python
"""Toy Parser Generator is a lexical and syntactic parser generator
for Python. This generator was born from a simple statement: YACC
is too complex to use in simple cases (calculators, configuration
files, small programming languages, ...).
TPG can very simply write parsers that are usefull for most every
day needs (even if it can't make your coffee). With a very clear
and simple syntax, you can write an attributed grammar that is
translated into a recursive descendant parser. TPG generated code
is very closed to the original grammar. This means that the parser
works "like" the grammar. A grammar rule can be seen as a method
of the parser class, symbols as method calls, attributes as method
parameters and semantic values as return values. You can also add
Python code directly into grammar rules and build abstract syntax
trees while parsing.
"""
# Toy Parser Generator: A Python parser generator
# Copyright (C) 2001-2013 Christophe Delord
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For further information about TPG you can visit
# http://cdsoft.fr/tpg
# TODO:
# - indent and dedent preprocessor
#
__tpgname__ = 'TPG'
__version__ = '3.2.2'
__date__ = '2013-12-29'
__description__ = "A Python parser generator"
__long_description__ = __doc__
__license__ = 'LGPL'
__author__ = 'Christophe Delord'
__email__ = 'cdsoft.fr'
__url__ = 'http://cdsoft.fr/tpg/'
import parser
import re
import sre_parse
import sys
# Python 2/3 compatibility
__python__ = sys.version_info[0]
if __python__ == 3:
import collections
callable = lambda value: isinstance(value, collections.Callable)
exc = lambda: sys.exc_info()[1]
if __python__ == 2:
exc = lambda: sys.exc_value
_id = lambda x: x
tab = " "*4
class Error(Exception):
""" Error((line, column), msg)
Error is the base class for TPG exceptions.
Attributes:
line : line number from where the error has been raised
column : column number from where the error has been raised
msg : message associated to the error
"""
def __init__(self, line_column, msg):
self.line, self.column = line_column
self.msg = msg
def __str__(self):
return "%s at line %s, column %s: %s"%(self.__class__.__name__, self.line, self.column, self.msg)
class WrongToken(Error):
""" WrongToken()
WrongToken is raised when the parser can not continue in order to backtrack.
"""
def __init__(self):
Exception.__init__(self)
class LexicalError(Error):
""" LexicalError((line, column), msg)
LexicalError is raised by lexers when a lexical error is encountered.
Attributes:
line : line number from where the error has been raised
column : column number from where the error has been raised
msg : message associated to the error
"""
pass
class SyntacticError(Error):
""" SyntacticError((line, column), msg)
SyntacticError is raised by parsers when they fail.
Attributes:
line : line number from where the error has been raised
column : column number from where the error has been raised
msg : message associated to the error
"""
pass
class SemanticError(Error):
""" SemanticError(msg)
SemanticError is raised by user actions when an error is detected.
Attributes:
msg : message associated to the error
"""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return "%s: %s"%(self.__class__.__name__, self.msg)
class LexerOptions:
""" LexerOptions(word_bounded, compile_options)
LexerOptions is a base class for lexers holding lexers' options.
Parameters:
word_bounded : if True identifier like regular expressions are added word boundaries
compile_options : options given to re.compile to compile regular expressions
"""
word_re = re.compile(r"^\w+$")
def __init__(self, wb, compile_options):
if not wb:
self.word_bounded = self.not_word_bounded
self.compile_options = compile_options
def re_compile(self, expr):
""" compile expr using self.compile_options as re.compile options
"""
return re.compile(expr, self.compile_options)
def word_bounded(self, expr):
""" add word boundaries (\\b) to expr if it looks like an identifier
"""
if self.word_re.match(expr):
return r"\b%s\b"%expr
else:
return expr
def not_word_bounded(self, expr):
""" return expr without change. Used to replace word_bounded when wb is False
"""
return expr
class NamedGroupLexer(LexerOptions):
r""" NamedGroupLexer(word_bounded, compile_options)
NamedGroupLexer is a TPG lexer:
- use named group regular expressions (faster but limited to 100 tokens)
Attributes:
token_re : regular expression containing the whole lexer
tokens : dictionnary name -> (value, is_real_token)
name is a token name
value is a function that compute the value of a token from its text
is_real_token is a boleean. True for tokens, False for separators
Once the lexer is started more attributes are defined:
input : input string being parsed
max_pos : maximum position reached in the input string
last_token : last token reached in the input string
pos : position in the input string of the current token
line : line of the current token
column : column of the current token
cur_token : current token
"""
def __init__(self, wb, compile_options):
LexerOptions.__init__(self, wb, compile_options)
self.token_re = [] # [named_regexp] and then regexp
self.tokens = {} # name -> value, is_real_token
def def_token(self, name, expr, value=_id):
""" add a new token to the lexer
Parameters:
name : name of the token
expr : regular expression of the token
value : function to compute the token value from its text
The default for value is the identity function. If value is not callable
it is returned whatever the text of the token.
"""
if not callable(value):
value = lambda _, value=value: value
if name not in self.tokens:
self.token_re.append("(?P<%s>%s)"%(name, self.word_bounded(expr)))
self.tokens[name] = value, True
else:
raise SemanticError("Duplicate token definition (%s)"%name)
def def_separator(self, name, expr, value=_id):
""" add a new separator to the lexer
Parameters:
name : name of the separator
expr : regular expression of the separator
value : function to compute the separator value from its text
The default for value is the identity function. If value is not callable
it is returned whatever the text of the separator. Note that separator
values are ignored.
"""
if not callable(value):
value = lambda _, value=value: value
if name not in self.tokens:
self.token_re.append("(?P<%s>%s)"%(name, self.word_bounded(expr)))
self.tokens[name] = value, False
else:
raise SemanticError("Duplicate token definition (%s)"%name)
def build(self):
""" build the token_re attribute from the tokens and separators
"""
if isinstance(self.token_re, list):
self.token_re = self.re_compile("|".join(self.token_re))
def start(self, input):
""" start a lexical analysis
Parameters:
input : input string to be parsed
"""
self.input = input
self.max_pos = 0
self.last_token = None
self.build()
self.back(None)
self.next_token()
def eof(self):
""" True if the current position of the lexer is the end of the input string
"""
return self.pos >= len(self.input) and isinstance(self.cur_token, EOFToken)
def back(self, token):
""" change the current token to token (used for backtracking)
"""
if token is None:
self.pos = 0
self.line, self.column = 1, 1
self.cur_token = None
else:
self.pos = token.stop
self.line, self.column = token.end_line, token.end_column
self.cur_token = token
def next_token(self):
""" return the next token
Tokens are Token instances. Separators are ignored.
"""
if self.cur_token is None:
prev_stop = 0
else:
prev_stop = self.cur_token.stop
while True:
if self.pos >= len(self.input):
self.cur_token = EOFToken(self.line, self.column, self.pos, prev_stop)
return self.cur_token
tok = self.token_re.match(self.input, self.pos)
if tok:
name = tok.lastgroup
text = tok.group()
value, real_token = self.tokens[name]
try:
value = value(text)
except WrongToken:
raise LexicalError((self.line, self.column), "Lexical error in %s"%text)
start, stop = tok.span()
self.pos = stop
tok_line, tok_column = self.line, self.column
if '\n' in text:
self.line += text.count('\n')
self.column = len(text) - text.rfind('\n')
else:
self.column += len(text)
if real_token:
self.cur_token = Token(name, text, value, tok_line, tok_column, self.line, self.column, start, stop, prev_stop)
if self.pos > self.max_pos:
self.max_pos = self.pos
self.last_token = self.cur_token
return self.cur_token
else:
w = 20
nl = self.input.find('\n', self.pos, self.pos+w)
if nl > -1:
err = self.input[self.pos:nl]
else:
err = self.input[self.pos:self.pos+w]
raise LexicalError((self.line, self.column), "Lexical error near %s"%err)
def token(self):
""" return the current token
"""
return self.cur_token
def extract(self, start, stop):
""" extract text from the input string
Parameters:
start : token from which the extraction starts
stop : token where the extraction stops
"""
return self.input[start.start:stop.prev_stop]
class Lexer(NamedGroupLexer):
r""" Lexer(word_bounded, compile_options)
Lexer is a TPG lexer:
- based on NamedGroupLexer
- doesn't use named group regular expressions (slower but not limited to 100 tokens)
- select the longuest match so the order of token definitions doesn't mater
Attributes:
tokens : list (name, regexp, value, is_real_token)
name is a token name
regexp is the regular expression of the token
value is a function that computes the value of a token from its text
is_real_token is a boleean. True for tokens, False for separators
Once the lexer is started more attributes are defined:
input : input string being parsed
max_pos : maximum position reached in the input string
last_token : last token reached in the input string
pos : position in the input string of the current token
line : line of the current token
column : column of the current token
cur_token : current token
"""
def __init__(self, wb, compile_options):
LexerOptions.__init__(self, wb, compile_options)
self.tokens = [] # [(name, regexp, value, is_real_token)]
def def_token(self, name, expr, value=_id):
""" adds a new token to the lexer
Parameters:
name : name of the token
expr : regular expression of the token
value : function to compute the token value from its text
The default for value is the identity function. If value is not callable
it is returned whatever the text of the token.
"""
if not callable(value):
value = lambda _, value=value: value
if name not in self.tokens:
self.tokens.append((name, self.re_compile(self.word_bounded(expr)), value, True))
else:
raise SemanticError("Duplicate token definition (%s)"%name)
def def_separator(self, name, expr, value=_id):
""" add a new separator to the lexer
Parameters:
name : name of the separator
expr : regular expression of the separator
value : function to compute the separator value from its text
The default for value is the identity function. If value is not callable
it is returned whatever the text of the separator. Note that separator
values are ignored.
"""
if not callable(value):
value = lambda _, value=value: value
if name not in self.tokens:
self.tokens.append((name, self.re_compile(self.word_bounded(expr)), value, False))
else:
raise SemanticError("Duplicate token definition (%s)"%name)
def start(self, input):
""" start a lexical analysis
Parameters:
input : input string to be parsed
"""
self.input = input
self.max_pos = 0
self.last_token = None
self.back(None)
self.next_token()
def next_token(self):
""" return the next token
Tokens are Token instances. Separators are ignored.
"""
if self.cur_token is None:
prev_stop = 0
else:
prev_stop = self.cur_token.stop
while True:
if self.pos >= len(self.input):
self.cur_token = EOFToken(self.line, self.column, self.pos, prev_stop)
return self.cur_token
tok = None
text = ""
for _name, _regexp, _value, _is_real_token in self.tokens:
_tok = _regexp.match(self.input, self.pos)
if _tok:
_text = _tok.group()
if len(_text) > len(text):
tok = _tok
name = _name
text = _text
value = _value
real_token = _is_real_token
if tok:
try:
value = value(text)
except WrongToken:
raise LexicalError((self.line, self.column), "Lexical error in %s"%text)
start, stop = tok.span()
self.pos = stop
tok_line, tok_column = self.line, self.column
if '\n' in text:
self.line += text.count('\n')
self.column = len(text) - text.rfind('\n')
else:
self.column += len(text)
if real_token:
self.cur_token = Token(name, text, value, tok_line, tok_column, self.line, self.column, start, stop, prev_stop)
if self.pos > self.max_pos:
self.max_pos = self.pos
self.last_token = self.cur_token
return self.cur_token
else:
w = 20
nl = self.input.find('\n', self.pos, self.pos+w)
if nl > -1:
err = self.input[self.pos:nl]
else:
err = self.input[self.pos:self.pos+w]
raise LexicalError((self.line, self.column), "Lexical error near %s"%err)
class CacheNamedGroupLexer(NamedGroupLexer):
r""" CacheNamedGroupLexer(word_bounded, compile_options)
CacheNamedGroupLexer is a TPG lexer:
- based on NamedGroupLexer
- the complete token list is built before parsing
(faster with very ambigous grammars but needs more memory)
Attributes:
token_re : regular expression containing the whole lexer
tokens : dictionnary name -> (value, is_real_token)
name is a token name
value is a function that computes the value of a token from its text
is_real_token is a boleean. True for tokens, False for separators
cache : token list
Once the lexer is started more attributes are defined:
input : input string being parsed
max_pos : maximum position reached in the input string
last_token : last token reached in the input string
pos : position in the input string of the current token
line : line of the current token
column : column of the current token
cur_token : current token
"""
def __init__(self, wb, compile_options):
NamedGroupLexer.__init__(self, wb, compile_options)
def start(self, input):
""" start a lexical analysis
Parameters:
input : input string to be parsed
"""
self.cache = []
self.input = input
self.max_pos = 0
self.last_token = None
self.build()
self.back(None)
while True:
token = NamedGroupLexer.next_token(self)
token.index = len(self.cache)
self.cache.append(token)
if isinstance(token, EOFToken):
break
self.max_pos = 0
self.last_token = None
self.back(None)
self.next_token()
def next_token(self):
""" return the next token
Tokens are Token instances. Separators are ignored.
"""
if self.cur_token is None:
index = 0
else:
index = self.cur_token.index+1
token = self.cache[index]
self.pos = token.stop
self.line, self.column = token.line, token.column
self.cur_token = token
if self.pos > self.max_pos:
self.max_pos = self.pos
self.last_token = self.cur_token
return self.cur_token
class CacheLexer(Lexer):
r""" CacheLexer(word_bounded, compile_options)
CacheLexer is a TPG lexer:
- based on Lexer
- doesn't use named group regular expressions (slower but not limited to 100 tokens)
- select the longuest match so the order of token definitions doesn't mater
- the complete token list is built before parsing
(faster with very ambigous grammars but needs more memory)
Attributes:
tokens : list (name, regexp, value, is_real_token)
name is a token name
regexp is the regular expression of the token
value is a function that computes the value of a token from its text
is_real_token is a boleean. True for tokens, False for separators
cache : token list
Once the lexer is started more attributes are defined:
input : input string being parsed
max_pos : maximum position reached in the input string
last_token : last token reached in the input string
pos : position in the input string of the current token
line : line of the current token
column : column of the current token
cur_token : current token
"""
def __init__(self, wb, compile_options):
Lexer.__init__(self, wb, compile_options)
def start(self, input):
""" start a lexical analysis
Parameters:
input : input string to be parsed
"""
self.cache = []
self.input = input
self.max_pos = 0
self.last_token = None
self.back(None)
while True:
token = Lexer.next_token(self)
token.index = len(self.cache)
self.cache.append(token)
if isinstance(token, EOFToken):
break
self.max_pos = 0
self.last_token = None
self.back(None)
self.next_token()
def next_token(self):
""" return the next token
Tokens are Token instances. Separators are ignored.
"""
if self.cur_token is None:
index = 0
else:
index = self.cur_token.index+1
token = self.cache[index]
self.pos = token.stop
self.line, self.column = token.line, token.column
self.cur_token = token
if self.pos > self.max_pos:
self.max_pos = self.pos
self.last_token = self.cur_token
return self.cur_token
class ContextSensitiveLexer(LexerOptions):
r""" ContextSensitiveLexer(word_bounded, compile_options)
ContextSensitiveLexer is a TPG lexer:
- context sensitive means that each regular expression is matched when required by the parser.
Different tokens can be found at the same position if the parser uses different grammar rules.
Attributes:
tokens : dictionnary name -> (regexp, value)
name is a token name
regexp is the regular expression of the token
value is a function that computes the value of a token from its text
separators : list (name, regexp, value)
name is a token name
regexp is the regular expression of the token
value is a function that computes the value of a token from its text
Once the lexer is started more attributes are defined:
input : input string being parsed
max_pos : maximum position reached in the input string
last_token : last token reached in the input string
pos : position in the input string of the current token
line : line of the current token
column : column of the current token
cur_token : current token
"""
def __init__(self, wb, compile_options):
LexerOptions.__init__(self, wb, compile_options)
self.tokens = {} # name -> (regexp, value)
self.separators = [] # [(name, regexp, value)]
def def_token(self, name, expr, value=_id):
""" add a new token to the lexer
Parameters:
name : name of the token
expr : regular expression of the token
value : function to compute the token value from its text
The default for value is the identity function. If value is not callable
it is returned whatever the text of the token.
"""
if not callable(value):
value = lambda _, value=value: value
if name not in self.tokens and name not in self.separators:
self.tokens[name] = self.re_compile(self.word_bounded(expr)), value
else:
raise SemanticError("Duplicate token definition (%s)"%name)
def def_separator(self, name, expr, value=_id):
""" add a new separator to the lexer
Parameters:
name : name of the separator
expr : regular expression of the separator
value : function to compute the separator value from its text
The default for value is the identity function. If value is not callable
it is returned whatever the text of the separator. Note that separator
values are ignored.
"""
if not callable(value):
value = lambda _, value=value: value
if name not in self.tokens and name not in self.separators:
self.separators.append((name, self.re_compile(self.word_bounded(expr)), value))
else:
raise SemanticError("Duplicate token definition (%s)"%name)
def start(self, input):
""" start a lexical analysis
Parameters:
input : input string to be parsed
"""
self.input = input
self.max_pos = 0
self.last_token = None
self.back(None)
def eof(self):
""" True if the current position of the lexer is the end of the input string
"""
return self.pos >= len(self.input)
def back(self, token):
""" change the current token to token (used for backtracking)
"""
if token is None:
self.pos = 0
self.line, self.column = 1, 1
self.cur_token = SOFToken()
else:
self.pos = token.stop
self.line, self.column = token.end_line, token.end_column
self.cur_token = token
self.eat_separators()
self.cur_token.next_start = self.pos
def eat_separators(self):
""" skip separators in the input string from the current position
"""
done = False
while not done:
done = True
for name, regexp, value in self.separators:
sep = regexp.match(self.input, self.pos)
if sep:
start, stop = sep.span()
text = self.input[start:stop]
value = value(text)
self.pos = stop
if '\n' in text:
self.line += text.count('\n')
self.column = len(text) - text.rfind('\n')
else:
self.column += len(text)
done = False
def eat(self, name):
""" return the next token value if it matches the expected token name
"""
regexp, value = self.tokens[name]
tok = regexp.match(self.input, self.pos)
if tok is None:
raise WrongToken
else:
if self.cur_token is None:
prev_stop = 0
else:
prev_stop = self.cur_token.stop
start, stop = tok.span()
text = self.input[start:stop]
value = value(text)
self.pos = stop
tok_line, tok_column = self.line, self.column
if '\n' in text:
self.line += text.count('\n')
self.column = len(text) - text.rfind('\n')
else:
self.column += len(text)
self.cur_token = Token(name, text, value, tok_line, tok_column, self.line, self.column, start, stop, prev_stop)
if self.pos > self.max_pos:
self.max_pos = self.pos
self.last_token = self.cur_token
self.eat_separators()
self.cur_token.next_start = self.pos
return self.cur_token
def token(self):
""" return the current token
"""
return self.cur_token
def extract(self, start, stop):
""" extract text from the input string
Parameters:
start : the token from which the extraction starts
stop : the token where the extraction stops
"""
start = start and start.next_start or 0
stop = stop and stop.stop or -1
return self.input[start:stop]
class Token:
""" Token(name, text, value, line, column, end_line, end_column, start, stop, prev_stop)
Token object used by lexers
Attributes:
name : name of the token
text : text matched by the regular expression
value : value computed from the text
line : line of the token in the input string
column : column of the token in the input string
end_line : line of the end of the token
end_column : column of the end of the token
start : position of the start in the input string of the token
stop : position of the end in the input string of the token
prev_stop : position of the end of the previous token
"""
def __init__(self, name, text, value, line, column, end_line, end_column, start, stop, prev_stop):
self.name = name
self.text = text
self.value = value
self.line, self.column = line, column
self.end_line, self.end_column = end_line, end_column
self.start, self.stop = start, stop
self.prev_stop = prev_stop
def match(self, name):
""" return True is the token name is the name of the expected token
Parameters:
name : name of the expected token
"""
return name == self.name
def __str__(self):
return "line %s, column %s: %s %s %s"%(self.line, self.column, self.name, self.text, self.value)
class EOFToken(Token):
""" EOFToken(line, column, pos, prev_stop)
Token for the end of file (end of the input string).
EOFToken is a Token object.
Attributes:
name : name of the token
text : text matched by the regular expression
value : value computed from the text
line : line of the token in the input string
column : column of the token in the input string
end_line : line of the end of the token
end_column : column of the end of the token
start : position of the start in the input string of the token
stop : position of the end in the input string of the token
prev_stop : position of the end of the previous token
"""
def __init__(self, line, column, pos, prev_stop):
Token.__init__(self, "EOF", "EOF", None, line, column, line, column, pos, pos, prev_stop)
class SOFToken(Token):
""" SOFToken()
Token for the start of file (start of the input string).
SOFToken is a Token object.
Attributes:
name : name of the token
text : text matched by the regular expression
value : value computed from the text
line : line of the token in the input string
column : column of the token in the input string
end_line : line of the end of the token
end_column : column of the end of the token
start : position of the start in the input string of the token
stop : position of the end in the input string of the token
prev_stop : position of the end of the previous token
"""
def __init__(self):
Token.__init__(self, "SOF", "SOF", None, 1, 1, 1, 1, 0, 0, 0)
class Py:
def __init__(self, level=0):
frame = sys._getframe(1+level)
self.globals = frame.f_globals
self.locals = frame.f_locals
def __getitem__(self, item):
return eval(item%self, self.globals, self.locals)
class ParserMetaClass(type):
""" ParserMetaClass is the metaclass of Parser objects.
When a ParserMetaClass class is defined, its doc string should contain
a grammar. This grammar is parsed by TPGParser and the generated code
is added to the class.
If the class doesn't have a doc string, nothing is generated
"""
def __init__(cls, name, bases, dict):
super(ParserMetaClass, cls).__init__(name, bases, dict)
try:
grammar = dict['__doc__']
except KeyError:
pass
else:
parser = TPGParser(sys._getframe(1).f_globals)
for attribute, source, code in parser(grammar):
setattr(cls, attribute, code)
if __python__ == 3:
exec("class _Parser(metaclass=ParserMetaClass): pass")
else:
class _Parser: __metaclass__ = ParserMetaClass
class Parser(_Parser):
# Parser is the base class for parsers.
#
# This class can not have a doc string otherwise it would be considered as a grammar.
# The metaclass of this class is ParserMetaClass.
#
# Attributes:
# lexer : lexer build from the grammar
#
# Methods added to the generated parsers:
# init_lexer(self) : return a lexer object to scan the tokens defined by the grammar
# <rule> : each rule is translated into a method with the same name
def __init__(self):
""" Parser is the base class for parsers.
This class can not have a doc string otherwise it would be considered as a grammar.
The metaclass of this class is ParserMetaClass.
Attributes:
lexer : lexer build from the grammar
Methods added to the generated parsers:
init_lexer(self) : return a lexer object to scan the tokens defined by the grammar
<rule> : each rule is translated into a method with the same name
"""
self.lexer = self.init_lexer()
def eat(self, name):
""" eat the current token if it matches the expected token
Parameters:
name : name of the expected token
"""
token = self.lexer.token()
if token.match(name):
self.lexer.next_token()
return token.value
else:
raise WrongToken
def eatCSL(self, name):
""" eat the current token if it matches the expected token
This method replaces eat for context sensitive lexers.
Parameters:
name : name of the expected token
"""
token = self.lexer.eat(name)
return token.value
def __call__(self, input, *args, **kws):
""" parse a string starting from the default axiom
The default axiom is START.
Parameters:
input : input string to parse
*args : argument list to pass to START
**kws : argument dictionnary to pass to START
"""
return self.parse('START', input, *args, **kws)
def parse(self, axiom, input, *args, **kws):
""" parse a string starting from a given axiom
Parameters:
axiom : rule name where the parser starts
input : input string to parse
*args : argument list to pass to START
**kws : argument dictionnary to pass to START
"""
try:
self.lexer.start(input)
if __python__ == 2 and isinstance(input, unicode):
self.string_prefix = 'ur'
else:
self.string_prefix = 'r'
value = getattr(self, axiom)(*args, **kws)
if not self.lexer.eof():
raise WrongToken
except WrongToken:
if self.lexer.last_token is None:
last_token = ""
line, column = 1, 1
else:
last_token = self.lexer.last_token.text
line, column = self.lexer.last_token.line, self.lexer.last_token.column
raise SyntacticError((line, column), "Syntax error near %s"%last_token)
return value
def line(self, token=None):
""" return the line number of a token
Parameters:
token : token object. If None, the current token line is returned.
"""
if token is None:
token = self.lexer.token()
if token is None:
return 1
return token.line
def column(self, token=None):
""" return the column number of a token
Parameters:
token : token object. If None, the current token column is returned.
"""
if token is None:
token =self.lexer.token()
if token is None:
return 1
return token.column
def mark(self):
""" return the current token
This can be used to get the line or column number of a token
or to extract text between two tokens.
"""
return self.lexer.token()
def extract(self, start, stop):
""" return the text found between two tokens
Parameters :
start : token object as returned by mark
stop : token object as returned by mark
"""
return self.lexer.extract(start, stop)
def check(self, cond):
""" check a condition and backtrack when it is False
Parameters:
cond : condition to be checked
"""
if not cond:
raise WrongToken
return cond
def error(self, msg):
""" stop the parser and raise a SemanticError exception
Parameters:
msg : error message to raise
"""
raise SemanticError(msg)
class VerboseParser(Parser):
# VerboseParser is the base class for debugging parsers.
#
# This class can not have a doc string otherwise it would be considered as a grammar.
# The metaclass of this class is ParserMetaClass.
# It extends the Parser class to log the activity of the lexer.
#
# Attributes:
# lexer : lexer build from the grammar
# verbose : level of information
# 0 : no information
# 1 : print tokens successfully matched
# 2 : print tokens matched and not matched
#
# Methods added to the generated parsers:
# init_lexer(self) : return a lexer object to scan the tokens defined by the grammar
# <rule> : each rule is translated into a method with the same name
verbose = 1
def __init__(self):
""" VerboseParser is the base class for debugging parsers.
This class can not have a doc string otherwise it would be considered as a grammar.
The metaclass of this class is ParserMetaClass.
It extends the Parser class to log the activity of the lexer.
Attributes:
lexer : lexer build from the grammar
verbose : level of information
0 : no information
1 : print tokens successfully matched
2 : print tokens matched and not matched
Methods added to the generated parsers:
init_lexer(self) : return a lexer object to scan the tokens defined by the grammar
<rule> : each rule is translated into a method with the same name
"""
Parser.__init__(self)
self.eatcnt = 0
def eat(self, name):
""" eat the current token if it matches the expected token
Parameters:
name : name of the expected token
"""
self.eatcnt += 1
token = self.lexer.token()
try:
value = Parser.eat(self, name)
if self.verbose >= 1:
#print(self.token_info(token, "==", name))
sys.stderr.write(self.token_info(token, "==", name)+"\n")
return value
except WrongToken:
if self.verbose >= 2:
#print(self.token_info(token, "!=", name))
sys.stderr.write(self.token_info(token, "!=", name)+"\n")
raise
def eatCSL(self, name):
""" eat the current token if it matches the expected token
This method replaces eat for context sensitive lexers.
Parameters:
name : name of the expected token
"""
self.eatcnt += 1
try:
value = Parser.eatCSL(self, name)
if self.verbose >= 1:
token = self.lexer.token()
#print(self.token_info(token, "==", name))
sys.stderr.write(self.token_info(token, "==", name)+"\n")
return value
except WrongToken:
if self.verbose >= 2:
token = Token("???", self.lexer.input[self.lexer.pos:self.lexer.pos+10].replace('\n', ' '), "???", self.lexer.line, self.lexer.column, self.lexer.line, self.lexer.column, self.lexer.pos, self.lexer.pos, self.lexer.pos)
#print(self.token_info(token, "!=", name))
sys.stderr.write(self.token_info(token, "!=", name)+"\n")
raise
def parse(self, axiom, input, *args, **kws):
""" parse a string starting from a given axiom
Parameters:
axiom : rule name where the parser starts
input : input string to parse
*args : argument list to pass to START
**kws : argument dictionnary to pass to START
"""
self.axiom = axiom
return Parser.parse(self, axiom, input, *args, **kws)
def token_info(self, token, op, expected):
""" return information about a token
Parameters:
token : token read by the lexer
op : result of the comparison made by the lexer (== or !=)
expected : name of the expected token
"""
eatcnt = self.eatcnt
callernames = []
stackdepth = 0
name = None
while name != self.axiom:
stackdepth += 1
name = sys._getframe(stackdepth+1).f_code.co_name
if len(callernames) < 10:
callernames.insert(0, name)
callernames = '.'.join(callernames)
found = "(%d,%d) %s %s"%(token.line, token.column, token.name, token.text)
return "[%3d][%2d]%s: %s %s %s"%(eatcnt, stackdepth, callernames, found, op, expected)
blank_line_re = re.compile("^\s*$")
indent_re = re.compile("^\s*")
class tpg:
""" This class contains some TPG classes to make the parsers usable inside and outside the tpg module
"""
NamedGroupLexer = NamedGroupLexer
Lexer = Lexer
CacheNamedGroupLexer = CacheNamedGroupLexer
CacheLexer = CacheLexer
ContextSensitiveLexer = ContextSensitiveLexer
Parser = Parser
WrongToken = WrongToken
re = re
class TPGParser(tpg.Parser):
__grammar__ = r"""
# This class parses TPG grammar
# and generates the Python source and compiled code for the parser
set lexer = NamedGroupLexer
set lexer_verbose
separator spaces '\s+' ;
separator comment '\#.*' ;
token string '''
"{3} [^"\\]*
(?: (?: \\. | "(?!"") )
[^"\\]*
)*
"{3}
| " [^"\\\n]*
(?: \\. [^"\\\n]* )*
"
| '{3} [^'\\]*
(?: (?: \\. | '(?!'') )
[^'\\]*
)*
'{3}
| ' [^'\\\n]*
(?: \\. [^'\\\n]* )*
'
''' ;
token code '''
\{\{
( \}? [^\}]+ )*
\}\}
| \$ [^\$\n]* \$
| \$ .*\n
( [ \t]* \$ .*\n )*
''' $ self.Code
token ident '\w+' ;
token lcbra '\{' ;
token rcbra '\}' ;
token star2 '\*\*' ;
token star '\*' ;
START/$self.gen(options, tokens, rules)$ ->
OPTIONS/options
TOKENS/tokens
RULES/rules
;
OPTIONS/options ->
$ options = self.Options(self)
( 'set' ident/name
( '=' ident/value $ options.set(name, value)
| $ options.set(name, 'True')
)
)*
;
TOKENS/ts ->
$ ts = []
( TOKEN/t $ ts.append(t)
)*
;
TOKEN/$token_type(name, self.string_prefix, expr, code)$ ->
( 'separator' $ token_type = self.DefSeparator
| 'token' $ token_type = self.DefToken
)
ident/name ':'?
@t string/expr $ self.re_check(expr, t)
( PY_EXPR/code ';'?
| ';' $ code = None
)
;
RULES/rs ->
$ rs = self.Rules()
( RULE/r $ rs.append(r)
)*
;
RULE/$self.Rule(head, body)$ -> HEAD/head '->' OR_EXPR/body ';' ;
HEAD/$self.Symbol(name, args, ret)$ -> ident/name OPT_ARGS/args RET<$self.PY_Ident(name)$>/ret ;
OR_EXPR/$self.balance(or_expr)$ ->
AND_EXPR/a $ or_expr = [a]
( check $ not or_expr[-1].empty() $
'\|' AND_EXPR/a $ or_expr.append(a)
)*
;
AND_EXPR/$and_expr$ ->
$ and_expr = self.And()
( ATOM_EXPR/a REP<a>/a $ and_expr.append(a)
)*
;
ATOM_EXPR/a ->
SYMBOL/a
| INLINE_TOKEN/a
| @t code/a $ self.code_check(a, t)
| '\(' OR_EXPR/a '\)'
| 'check' PY_EXPR/cond $ a = self.Check(cond)
| 'error' PY_EXPR/msg $ a = self.Error(msg)
| '@' PY_EXPR/mark $ a = self.Mark(mark)
;
REP<a>/a ->
( '\*' $ a = self.Rep(a, 0, None)
| '\+' $ a = self.Rep(a, 1, None)
| '\?' $ a = self.Rep(a, 0, 1)
| '\{'
( PY_EXPR/min | $ min = self.PY_Ident("0") $ )
( ',' ( PY_EXPR/max | $ max = self.PY_Ident("None") $ )
| $ max = min $
)
'\}' $ a = self.Rep(a, min, max)
)?
;
SYMBOL/$self.Symbol(name, args, ret)$ -> ident/name OPT_ARGS/args RET<$self.PY_Ident(name)$>/ret ;
INLINE_TOKEN/$self.InlineToken(expr, ret)$ ->
@t string/expr $ self.re_check(expr, t)
RET/ret
;
OPT_ARGS/args -> ARGS/args | $ args = self.Args() $ ;
ARGS/args ->
'<' $ args = self.Args()
( ARG/arg $ args.append(arg)
( ',' ARG/arg $ args.append(arg)
)*
','?
)?
'>'
;
ARG/a ->
ident/name '=' PY_EXPR/a $ a = self.PY_KeywordArgument(name, a)
| PY_EXPR/a $ a = self.PY_PositionArgument(a)
| '\*' ident/name $ a = self.PY_PositionArgumentList(name)
| '\*\*' ident/name $ a = self.PY_KeywordArgumentList(name)
;
RET<ret=None>/ret -> ( '/' PY_EXPR/ret )? ;
PY_EXPR/expr ->
ident/name $ expr = self.PY_Ident(name)
| string/st $ expr = self.PY_Ident(st)
| code/expr
| ARGS/expr
;
"""
def init_lexer(self):
lexer = tpg.NamedGroupLexer(True, tpg.re.VERBOSE)
lexer.def_token('_tok_1', r'set')
lexer.def_token('_tok_2', r'=')
lexer.def_token('_tok_3', r'separator')
lexer.def_token('_tok_4', r'token')
lexer.def_token('_tok_5', r':')
lexer.def_token('_tok_6', r';')
lexer.def_token('_tok_7', r'->')
lexer.def_token('_tok_8', r'\|')
lexer.def_token('_tok_9', r'\(')
lexer.def_token('_tok_10', r'\)')
lexer.def_token('_tok_11', r'check')
lexer.def_token('_tok_12', r'error')
lexer.def_token('_tok_13', r'@')
lexer.def_token('_tok_14', r'\+')
lexer.def_token('_tok_15', r'\?')
lexer.def_token('_tok_16', r',')
lexer.def_token('_tok_17', r'<')
lexer.def_token('_tok_18', r'>')
lexer.def_token('_tok_19', r'/')
lexer.def_separator('spaces', r'\s+')
lexer.def_separator('comment', r'\#.*')
lexer.def_token('string', r'''
"{3} [^"\\]*
(?: (?: \\. | "(?!"") )
[^"\\]*
)*
"{3}
| " [^"\\\n]*
(?: \\. [^"\\\n]* )*
"
| '{3} [^'\\]*
(?: (?: \\. | '(?!'') )
[^'\\]*
)*
'{3}
| ' [^'\\\n]*
(?: \\. [^'\\\n]* )*
'
''')
lexer.def_token('code', r'''
\{\{
( \}? [^\}]+ )*
\}\}
| \$ [^\$\n]* \$
| \$ .*\n
( [ \t]* \$ .*\n )*
''', self.Code)
lexer.def_token('ident', r'\w+')
lexer.def_token('lcbra', r'\{')
lexer.def_token('rcbra', r'\}')
lexer.def_token('star2', r'\*\*')
lexer.def_token('star', r'\*')
return lexer
def START(self, ):
r""" ``START -> OPTIONS TOKENS RULES ;`` """
options = self.OPTIONS()
tokens = self.TOKENS()
rules = self.RULES()
return self.gen(options, tokens, rules)
def OPTIONS(self, ):
r""" ``OPTIONS -> ('set' ident ('=' ident | ))* ;`` """
options = self.Options(self)
while True:
_p1 = self.lexer.token()
try:
self.eat('_tok_1') # 'set'
name = self.eat('ident')
_p2 = self.lexer.token()
try:
self.eat('_tok_2') # '='
value = self.eat('ident')
options.set(name, value)
except tpg.WrongToken:
self.lexer.back(_p2)
options.set(name, 'True')
except tpg.WrongToken:
self.lexer.back(_p1)
break
return options
def TOKENS(self, ):
r""" ``TOKENS -> (TOKEN)* ;`` """
ts = []
while True:
_p1 = self.lexer.token()
try:
t = self.TOKEN()
ts.append(t)
except tpg.WrongToken:
self.lexer.back(_p1)
break
return ts
def TOKEN(self, ):
r""" ``TOKEN -> ('separator' | 'token') ident ':'? string (PY_EXPR ';'? | ';') ;`` """
_p1 = self.lexer.token()
try:
self.eat('_tok_3') # 'separator'
token_type = self.DefSeparator
except tpg.WrongToken:
self.lexer.back(_p1)
self.eat('_tok_4') # 'token'
token_type = self.DefToken
name = self.eat('ident')
_p2 = self.lexer.token()
try:
self.eat('_tok_5') # ':'
except tpg.WrongToken:
self.lexer.back(_p2)
t = self.mark()
expr = self.eat('string')
self.re_check(expr, t)
_p3 = self.lexer.token()
try:
code = self.PY_EXPR()
_p4 = self.lexer.token()
try:
self.eat('_tok_6') # ';'
except tpg.WrongToken:
self.lexer.back(_p4)
except tpg.WrongToken:
self.lexer.back(_p3)
self.eat('_tok_6') # ';'
code = None
return token_type(name, self.string_prefix, expr, code)
def RULES(self, ):
r""" ``RULES -> (RULE)* ;`` """
rs = self.Rules()
while True:
_p1 = self.lexer.token()
try:
r = self.RULE()
rs.append(r)
except tpg.WrongToken:
self.lexer.back(_p1)
break
return rs
def RULE(self, ):
r""" ``RULE -> HEAD '->' OR_EXPR ';' ;`` """
head = self.HEAD()
self.eat('_tok_7') # '->'
body = self.OR_EXPR()
self.eat('_tok_6') # ';'
return self.Rule(head, body)
def HEAD(self, ):
r""" ``HEAD -> ident OPT_ARGS RET ;`` """
name = self.eat('ident')
args = self.OPT_ARGS()
ret = self.RET(self.PY_Ident(name))
return self.Symbol(name, args, ret)
def OR_EXPR(self, ):
r""" ``OR_EXPR -> AND_EXPR ('\|' AND_EXPR)* ;`` """
a = self.AND_EXPR()
or_expr = [a]
while True:
_p1 = self.lexer.token()
try:
self.check(not or_expr[-1].empty())
self.eat('_tok_8') # '\|'
a = self.AND_EXPR()
or_expr.append(a)
except tpg.WrongToken:
self.lexer.back(_p1)
break
return self.balance(or_expr)
def AND_EXPR(self, ):
r""" ``AND_EXPR -> (ATOM_EXPR REP)* ;`` """
and_expr = self.And()
while True:
_p1 = self.lexer.token()
try:
a = self.ATOM_EXPR()
a = self.REP(a)
and_expr.append(a)
except tpg.WrongToken:
self.lexer.back(_p1)
break
return and_expr
def ATOM_EXPR(self, ):
r""" ``ATOM_EXPR -> SYMBOL | INLINE_TOKEN | code | '\(' OR_EXPR '\)' | 'check' PY_EXPR | 'error' PY_EXPR | '@' PY_EXPR ;`` """
_p1 = self.lexer.token()
try:
try:
a = self.SYMBOL()
except tpg.WrongToken:
self.lexer.back(_p1)
try:
a = self.INLINE_TOKEN()
except tpg.WrongToken:
self.lexer.back(_p1)
t = self.mark()
a = self.eat('code')
self.code_check(a, t)
except tpg.WrongToken:
self.lexer.back(_p1)
try:
try:
self.eat('_tok_9') # '\('
a = self.OR_EXPR()
self.eat('_tok_10') # '\)'
except tpg.WrongToken:
self.lexer.back(_p1)
self.eat('_tok_11') # 'check'
cond = self.PY_EXPR()
a = self.Check(cond)
except tpg.WrongToken:
self.lexer.back(_p1)
try:
self.eat('_tok_12') # 'error'
msg = self.PY_EXPR()
a = self.Error(msg)
except tpg.WrongToken:
self.lexer.back(_p1)
self.eat('_tok_13') # '@'
mark = self.PY_EXPR()
a = self.Mark(mark)
return a
def REP(self, a):
r""" ``REP -> ('\*' | '\+' | '\?' | '\{' (PY_EXPR | ) (',' (PY_EXPR | ) | ) '\}')? ;`` """
_p1 = self.lexer.token()
try:
try:
try:
self.eat('star') # '\*'
a = self.Rep(a, 0, None)
except tpg.WrongToken:
self.lexer.back(_p1)
self.eat('_tok_14') # '\+'
a = self.Rep(a, 1, None)
except tpg.WrongToken:
self.lexer.back(_p1)
try:
self.eat('_tok_15') # '\?'
a = self.Rep(a, 0, 1)
except tpg.WrongToken:
self.lexer.back(_p1)
self.eat('lcbra') # '\{'
_p2 = self.lexer.token()
try:
min = self.PY_EXPR()
except tpg.WrongToken:
self.lexer.back(_p2)
min = self.PY_Ident("0")
_p3 = self.lexer.token()
try:
self.eat('_tok_16') # ','
_p4 = self.lexer.token()
try:
max = self.PY_EXPR()
except tpg.WrongToken:
self.lexer.back(_p4)
max = self.PY_Ident("None")
except tpg.WrongToken:
self.lexer.back(_p3)
max = min
self.eat('rcbra') # '\}'
a = self.Rep(a, min, max)
except tpg.WrongToken:
self.lexer.back(_p1)
return a
def SYMBOL(self, ):
r""" ``SYMBOL -> ident OPT_ARGS RET ;`` """
name = self.eat('ident')
args = self.OPT_ARGS()
ret = self.RET(self.PY_Ident(name))
return self.Symbol(name, args, ret)
def INLINE_TOKEN(self, ):
r""" ``INLINE_TOKEN -> string RET ;`` """
t = self.mark()
expr = self.eat('string')
self.re_check(expr, t)
ret = self.RET()
return self.InlineToken(expr, ret)
def OPT_ARGS(self, ):
r""" ``OPT_ARGS -> ARGS | ;`` """
_p1 = self.lexer.token()
try:
args = self.ARGS()
except tpg.WrongToken:
self.lexer.back(_p1)
args = self.Args()
return args
def ARGS(self, ):
r""" ``ARGS -> '<' (ARG (',' ARG)* ','?)? '>' ;`` """
self.eat('_tok_17') # '<'
args = self.Args()
_p1 = self.lexer.token()
try:
arg = self.ARG()
args.append(arg)
while True:
_p2 = self.lexer.token()
try:
self.eat('_tok_16') # ','
arg = self.ARG()
args.append(arg)
except tpg.WrongToken:
self.lexer.back(_p2)
break
_p3 = self.lexer.token()
try:
self.eat('_tok_16') # ','
except tpg.WrongToken:
self.lexer.back(_p3)
except tpg.WrongToken:
self.lexer.back(_p1)
self.eat('_tok_18') # '>'
return args
def ARG(self, ):
r""" ``ARG -> ident '=' PY_EXPR | PY_EXPR | '\*' ident | '\*\*' ident ;`` """
_p1 = self.lexer.token()
try:
try:
name = self.eat('ident')
self.eat('_tok_2') # '='
a = self.PY_EXPR()
a = self.PY_KeywordArgument(name, a)
except tpg.WrongToken:
self.lexer.back(_p1)
a = self.PY_EXPR()
a = self.PY_PositionArgument(a)
except tpg.WrongToken:
self.lexer.back(_p1)
try:
self.eat('star') # '\*'
name = self.eat('ident')
a = self.PY_PositionArgumentList(name)
except tpg.WrongToken:
self.lexer.back(_p1)
self.eat('star2') # '\*\*'
name = self.eat('ident')
a = self.PY_KeywordArgumentList(name)
return a
def RET(self, ret=None):
r""" ``RET -> ('/' PY_EXPR)? ;`` """
_p1 = self.lexer.token()
try:
self.eat('_tok_19') # '/'
ret = self.PY_EXPR()
except tpg.WrongToken:
self.lexer.back(_p1)
return ret
def PY_EXPR(self, ):
r""" ``PY_EXPR -> ident | string | code | ARGS ;`` """
_p1 = self.lexer.token()
try:
try:
name = self.eat('ident')
expr = self.PY_Ident(name)
except tpg.WrongToken:
self.lexer.back(_p1)
st = self.eat('string')
expr = self.PY_Ident(st)
except tpg.WrongToken:
self.lexer.back(_p1)
try:
expr = self.eat('code')
except tpg.WrongToken:
self.lexer.back(_p1)
expr = self.ARGS()
return expr
def __init__(self, _globals=None):
Parser.__init__(self)
if _globals is not None:
self.env = _globals
else:
self.env = {}
def __call__(self, input, *args, **kws):
""" parse a string starting from the default axiom
The default axiom is START.
Parameters:
input : input string to parse
*args : argument list to pass to START
**kws : argument dictionnary to pass to START
If a line ends with '::', it is considered as the end
of the ReST part. The lines after are the grammar.
"""
docs = re.split(r"::[ \t]*$", input, maxsplit=1, flags=re.M)
if len(docs) == 2:
input = re.sub(".", " ", docs[0])+docs[1]
return self.parse('START', input, *args, **kws)
def re_check(self, expr, tok):
try:
sre_parse.parse(eval(self.string_prefix+expr))
except Exception:
raise LexicalError((tok.line, tok.column), "Invalid regular expression: %s (%s)"%(expr, exc()))
def code_check(self, code, tok):
try:
parser.suite(code.code)
except Exception:
erroneous_code = "\n".join([ "%2d: %s"%(i+1, l) for (i, l) in enumerate(code.code.splitlines()) ])
raise LexicalError((tok.line, tok.column), "Invalid Python code (%s): \n%s"%(exc, erroneous_code))
class Options:
option_dict = {
# Option name Accepted values Default value
'lexer': ({'NamedGroupLexer': NamedGroupLexer,
'Lexer': Lexer,
'CacheNamedGroupLexer': CacheNamedGroupLexer,
'CacheLexer': CacheLexer,
'ContextSensitiveLexer': ContextSensitiveLexer,
}, 'NamedGroupLexer'),
'word_boundary': ({'True': True, 'False': False}, 'True'),
#'indent': ({'True': True, 'False': False}, 'False'),
'lexer_ignorecase': ({'True': "IGNORECASE", 'False': False}, 'False'),
'lexer_locale': ({'True': "LOCALE", 'False': False}, 'False'),
'lexer_multiline': ({'True': "MULTILINE", 'False': False}, 'False'),
'lexer_dotall': ({'True': "DOTALL", 'False': False}, 'False'),
'lexer_verbose': ({'True': "VERBOSE", 'False': False}, 'False'),
'lexer_unicode': ({'True': "UNICODE", 'False': False}, 'False'),
}
def __init__(self, parser):
self.parser = parser
for name, (values, default) in TPGParser.Options.option_dict.items():
self.set(name, default)
def set(self, name, value):
try:
options, default = TPGParser.Options.option_dict[name]
except KeyError:
opts = TPGParser.Options.option_dict.keys()
self.parser.error("Unknown option (%s). Valid options are %s"%(name, ', '.join(sorted(opts))))
try:
value = options[value]
except KeyError:
values = options.keys()
self.parser.error("Unknown value (%s). Valid values for %s are %s"%(value, name, ', '.join(sorted(values))))
setattr(self, name, value)
def lexer_compile_options(self):
options = [ self.lexer_ignorecase,
self.lexer_locale,
self.lexer_multiline,
self.lexer_dotall,
self.lexer_verbose,
self.lexer_unicode,
]
return "+".join([ "tpg.re.%s"%opt for opt in options if opt ]) or 0
class Empty:
def empty(self):
return True
class NotEmpty:
def empty(self):
return False
class Code(NotEmpty):
def __init__(self, code):
if code.startswith('$'):
if code.endswith('$'):
lines = code[1:-1].splitlines()
else:
lines = [line.split('$', 1)[1] for line in code.splitlines()]
elif code.startswith('{{') and code.endswith('}}'):
lines = code[2:-2].splitlines()
else:
raise WrongToken
while lines and blank_line_re.match(lines[0]): lines.pop(0)
while lines and blank_line_re.match(lines[-1]): lines.pop(-1)
if lines:
indents = [len(indent_re.match(line).group(0)) for line in lines]
indent = indents[0]
if min(indents) < indent:
# Indentation incorrecte
raise WrongToken
lines = [line[indent:] for line in lines]
self.code = "".join([line+"\n" for line in lines])
def get_inline_tokens(self):
return
yield None
def gen_code(self, indent=None, counters=None, pos=None):
if indent is None:
return self.code.strip()
else:
return [indent+line for line in self.code.splitlines()]
def links_symbols_to_tokens(self, tokens):
pass
def gen_doc(self, parent):
return ""
class DefToken:
def_method = "def_token"
def __init__(self, name, string_prefix, expr, code=None):
self.name = name
self.string_prefix = string_prefix
self.expr = expr
if code is not None and code.gen_code().count('\n') > 1:
raise WrongToken
self.code = code
def gen_def(self):
expr = self.expr
if self.code is None:
return "lexer.%s('%s', %s%s)"%(self.def_method, self.name, self.string_prefix, expr)
else:
code = self.code.gen_code().strip()
return "lexer.%s('%s', %s%s, %s)"%(self.def_method, self.name, self.string_prefix, expr, code)
class DefSeparator(DefToken):
def_method = "def_separator"
class Rules(list):
def get_inline_tokens(self):
for rule in self:
for token in rule.get_inline_tokens():
yield token
def links_symbols_to_tokens(self, tokens):
for rule in self:
rule.links_symbols_to_tokens(tokens)
def gen_code(self):
for rule in self:
yield rule.gen_code()
class Rule:
class Counters(dict):
def __call__(self, name):
n = self.get(name, 1)
self[name] = n+1
return "_%s%s"%(name, n)
def __init__(self, head, body):
self.head = head
self.body = body
def get_inline_tokens(self):
for token in self.body.get_inline_tokens():
yield token
def links_symbols_to_tokens(self, tokens):
if self.head.name in tokens:
raise SemanticError("%s is both a token and a symbol"%self.head.name)
else:
self.body.links_symbols_to_tokens(tokens)
def gen_code(self):
counters = self.Counters()
return self.head.name, [
self.head.gen_def(),
tab + 'r""" ``%s -> %s ;`` """'%(self.head.gen_doc(self), self.body.gen_doc(self)),
self.head.gen_init_ret(tab),
self.body.gen_code(tab, counters, None),
self.head.gen_ret(tab),
]
class Symbol(NotEmpty):
def __init__(self, name, args, ret):
self.name = name
self.args = args
self.ret = ret
def get_inline_tokens(self):
return
yield None
def links_symbols_to_tokens(self, tokens):
self.token = tokens.get(self.name, None)
if self.token is not None and self.args:
raise SemanticError("Token %s can not have arguments"%self.name)
def gen_def(self):
return "def %s(self, %s):"%(self.name, self.args.gen_code())
def gen_init_ret(self, indent):
return self.ret.gen_code() == self.name and indent + "%s = None"%(self.name) or ()
def gen_ret(self, indent):
return self.ret and indent + "return %s"%self.ret.gen_code() or ()
def gen_code(self, indent, counters, pos):
if self.token is not None:
if self.ret is not None:
return indent + "%s = self.eat('%s')"%(self.ret.gen_code(), self.token.name)
else:
return indent + "self.eat('%s')"%(self.token.name)
else:
if self.ret is not None:
return indent + "%s = self.%s(%s)"%(self.ret.gen_code(), self.name, self.args.gen_code())
else:
return indent + "self.%s(%s)"%(self.name, self.args.gen_code())
def gen_doc(self, parent):
return self.name
class InlineToken(NotEmpty):
def __init__(self, expr, ret):
self.expr = expr
self.ret = ret
def get_inline_tokens(self):
yield self
def set_explicit_token(self, token):
self.explicit_token = token
def gen_def(self):
return self.explicit_token.gen_def()
def links_symbols_to_tokens(self, tokens):
pass
def gen_code(self, indent, counters, pos):
if self.ret is not None:
return indent + "%s = self.eat('%s') # %s"%(self.ret.gen_code(), self.explicit_token.name, self.expr)
else:
return indent + "self.eat('%s') # %s"%(self.explicit_token.name, self.expr)
def gen_doc(self, parent):
return self.expr
class Args(list):
def gen_code(self):
return ", ".join([a.gen_code() for a in self])
class PY_PositionArgument:
def __init__(self, arg):
self.arg = arg
def gen_code(self):
return self.arg.gen_code()
class PY_KeywordArgument:
def __init__(self, name, arg):
self.name = name
self.arg = arg
def gen_code(self):
return "%s=%s"%(self.name, self.arg.gen_code())
class PY_PositionArgumentList:
def __init__(self, name):
self.name = name
def gen_code(self):
return "*%s"%self.name
class PY_KeywordArgumentList:
def __init__(self, name):
self.name = name
def gen_code(self):
return "**%s"%self.name
class And(list):
def empty(self):
for a in self:
if not a.empty():
return False
return True
def get_inline_tokens(self):
for a in self:
for token in a.get_inline_tokens():
yield token
def links_symbols_to_tokens(self, tokens):
for a in self:
a.links_symbols_to_tokens(tokens)
def gen_code(self, indent, counters, pos):
return self and [
self[0].gen_code(indent, counters, pos),
[a.gen_code(indent, counters, None) for a in self[1:]],
]
def gen_doc(self, parent):
docs = []
for a in self:
doc = a.gen_doc(self)
if doc:
docs.append(doc)
return " ".join(docs)
class Or(NotEmpty):
def __init__(self, a, b):
self.a = a
self.b = b
def get_inline_tokens(self):
for token in self.a.get_inline_tokens():
yield token
for token in self.b.get_inline_tokens():
yield token
def links_symbols_to_tokens(self, tokens):
self.a.links_symbols_to_tokens(tokens)
self.b.links_symbols_to_tokens(tokens)
def gen_code(self, indent, counters, pos):
p = pos or counters("p")
return [
pos is None and indent + "%s = self.lexer.token()"%p or (),
indent + "try:",
self.a.gen_code(indent+tab, counters, p),
indent + "except tpg.WrongToken:",
indent + tab + "self.lexer.back(%s)"%p,
self.b.gen_code(indent+tab, counters, p),
]
def gen_doc(self, parent):
doc = "%s | %s"%(self.a.gen_doc(self), self.b.gen_doc(self))
if isinstance(parent, TPGParser.And) and len(parent) > 1:
doc = "(%s)"%doc
return doc
def balance(self, xs):
if len(xs) == 1:
return xs[0]
else:
m = len(xs)//2
return self.Or(self.balance(xs[:m]), self.balance(xs[m:]))
class Rep(NotEmpty):
def __init__(self, a, min, max):
self.a = a
self.min = min
self.max = max
def get_inline_tokens(self):
for token in self.a.get_inline_tokens():
yield token
def links_symbols_to_tokens(self, tokens):
self.a.links_symbols_to_tokens(tokens)
def gen_code(self, indent, counters, pos):
# A?
if (self.min, self.max) == (0, 1):
p = pos or counters("p")
return [
pos is None and indent + "%s = self.lexer.token()"%p or (),
indent + "try:",
self.a.gen_code(indent+tab, counters, p),
indent + "except tpg.WrongToken:",
indent + tab + "self.lexer.back(%s)"%p,
]
# A*
elif (self.min, self.max) == (0, None):
p = pos or counters("p")
return [
indent + "while True:",
indent + tab + "%s = self.lexer.token()"%p,
indent + tab + "try:",
self.a.gen_code(indent+tab+tab, counters, p),
indent + tab + "except tpg.WrongToken:",
indent + tab + tab + "self.lexer.back(%s)"%p,
indent + tab + tab + "break",
]
# A+
elif (self.min, self.max) == (1, None):
p = pos or counters("p")
n = counters("n")
return [
indent + "%s = 0"%n,
indent + "while True:",
indent + tab + "%s = self.lexer.token()"%p,
indent + tab + "try:",
self.a.gen_code(indent+tab+tab, counters, p),
indent + tab + tab + "%s += 1"%n,
indent + tab + "except tpg.WrongToken:",
indent + tab + tab + "if %s < 1: raise"%n,
indent + tab + tab + "self.lexer.back(%s)"%p,
indent + tab + tab + "break",
]
# A{min, max}
else:
p = pos or counters("p")
n = counters("n")
min = self.min.gen_code()
max = self.max.gen_code()
return [
indent + "%s = 0"%n,
indent + "while %s:"%(max=="None" and "True" or "%s < %s"%(n, max)),
indent + tab + "%s = self.lexer.token()"%p,
indent + tab + "try:",
self.a.gen_code(indent+tab+tab, counters, p),
indent + tab + tab + "%s += 1"%n,
indent + tab + "except tpg.WrongToken:",
indent + tab + tab + "if %s < %s: raise"%(n, min),
indent + tab + tab + "self.lexer.back(%s)"%p,
indent + tab + tab + "break",
]
def gen_doc(self, parent):
doc = self.a.gen_doc(self)
if isinstance(self.a, (TPGParser.And, TPGParser.Or)):
doc = "(%s)"%doc
if (self.min, self.max) == (0, 1):
rep = "?"
elif (self.min, self.max) == (0, None):
rep = "*"
elif (self.min, self.max) == (1, None):
rep = "+"
else:
min = self.min.gen_code()
max = self.max.gen_code()
if min == max:
rep = "{%s}"%min
else:
if min == "0": min = ""
if max == "None": max = ""
rep = "{%s,%s}"%(min, max)
return "%s%s"%(doc, rep)
class Check(NotEmpty):
def __init__(self, cond):
self.cond = cond
def get_inline_tokens(self):
return
yield None
def links_symbols_to_tokens(self, tokens):
pass
def gen_doc(self, parent):
return ""
def gen_code(self, indent, counters, pos):
return indent + "self.check(%s)"%self.cond.gen_code()
class Error(NotEmpty):
def __init__(self, msg):
self.msg = msg
def get_inline_tokens(self):
return
yield None
def links_symbols_to_tokens(self, tokens):
pass
def gen_doc(self, parent):
return ""
def gen_code(self, indent, counters, pos):
return indent + "self.error(%s)"%self.msg.gen_code()
class Mark(NotEmpty):
def __init__(self, mark):
self.mark = mark
def get_inline_tokens(self):
return
yield None
def links_symbols_to_tokens(self, tokens):
pass
def gen_doc(self, parent):
return ""
def gen_code(self, indent, counters, pos):
return indent + "%s = self.mark()"%self.mark.gen_code()
class PY_Ident(str):
def gen_code(self):
return str(self)
def flatten_nl(self, *lines):
for sublines in lines:
if isinstance(sublines, (list, tuple)):
for line in self.flatten_nl(*sublines):
yield line
else:
yield sublines + "\n"
def make_code(self, attribute, *source):
source = "".join(self.flatten_nl(*source))
local_namespace = {}
exec(source, self.env, local_namespace)
code = local_namespace[attribute]
return attribute, source, code
def gen(self, options, tokens, rules):
# building the lexer
lexer = options.lexer
word_bounded = options.word_boundary
lexer_options = options.lexer_compile_options()
explicit_tokens = {}
for token in tokens:
explicit_tokens[token.expr[1:-1]] = token
token_number = 0
inline_tokens = []
for token in rules.get_inline_tokens():
try:
# If the token was already defined just link it to the first definition
token.set_explicit_token(explicit_tokens[token.expr[1:-1]])
except KeyError:
# Otherwise create an explicit definition for the new inline token
token_number += 1
token.set_explicit_token(self.DefToken("_tok_%s"%token_number, self.string_prefix, token.expr))
explicit_tokens[token.expr[1:-1]] = token.explicit_token
inline_tokens.append(token)
yield self.make_code("init_lexer",
"def init_lexer(self):",
lexer is ContextSensitiveLexer and [tab + "self.eat = self.eatCSL"] or (),
tab + "lexer = tpg.%s(%s, %s)"%(lexer.__name__, word_bounded, lexer_options),
[ tab + tok.gen_def() for tok in inline_tokens ],
[ tab + tok.gen_def() for tok in tokens ],
tab + "return lexer",
)
# building the parser
tokens_from_name = {}
for token in inline_tokens:
tokens_from_name[token.explicit_token.name] = token
for token in tokens:
tokens_from_name[token.name] = token
rules.links_symbols_to_tokens(tokens_from_name)
for name, code in rules.gen_code():
yield self.make_code(name, *code)
|
alanqthomas/seawolf-lang
|
tpg.py
|
Python
|
gpl-3.0
| 81,869
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.