hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f721cae3818031e8891791ccb6fed599dead54df | 12,458 | py | Python | vectorbt/utils/decorators.py | RileyMShea/vectorbt | 92ce571ce9fd0667f2994a2c922fb4cfcde10c88 | [
"Apache-2.0"
] | 1 | 2021-01-15T00:02:11.000Z | 2021-01-15T00:02:11.000Z | vectorbt/utils/decorators.py | RileyMShea/vectorbt | 92ce571ce9fd0667f2994a2c922fb4cfcde10c88 | [
"Apache-2.0"
] | null | null | null | vectorbt/utils/decorators.py | RileyMShea/vectorbt | 92ce571ce9fd0667f2994a2c922fb4cfcde10c88 | [
"Apache-2.0"
] | null | null | null | """Class and function decorators."""
from functools import wraps, lru_cache, RLock
import inspect
from vectorbt.utils import checks
class class_or_instancemethod(classmethod):
"""Function decorator that binds `self` to a class if the function is called as class method,
otherwise to an instance."""
def __get__(self, instance, type_):
descr_get = super().__get__ if instance is None else self.__func__.__get__
return descr_get(instance, type_)
class classproperty(object):
"""Property that can be called on a class."""
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
class custom_property():
"""Custom extensible, read-only property.
Can be called both as
```plaintext
@custom_property
def user_function...
```
and
```plaintext
@custom_property(**kwargs)
def user_function...
```
!!! note
`custom_property` instances belong to classes, not class instances. Thus changing the property,
for example, by disabling caching, will do the same for each instance of the class where
the property has been defined."""
def __new__(cls, *args, **kwargs):
if len(args) == 0:
return lambda func: cls(func, **kwargs)
elif len(args) == 1:
return super().__new__(cls)
else:
raise ValueError("Either function or keyword arguments must be passed")
def __init__(self, func, **kwargs):
self.func = func
self.name = func.__name__
self.kwargs = kwargs
self.__doc__ = getattr(func, '__doc__')
def __get__(self, instance, owner=None):
if instance is None:
return self
return self.func(instance)
def __set__(self, obj, value):
raise AttributeError("can't set attribute")
def is_caching_enabled(name, instance, func=None, **kwargs):
"""Check whether caching is enabled for a cacheable property/function.
Each condition has its own rank. A narrower condition has a lower (better) rank than a broader
condition. If the same condition was met in both whitelist and blacklist, whitelist wins.
List of conditions ranked:
```plaintext
1) is function in whitelist/blacklist? (properties are not supported)
2) is (instance, function name) in whitelist/blacklist?
3) is function name in whitelist/blacklist?
4) is instance in whitelist/blacklist?
5) is (class, function name) in whitelist/blacklist?
6) is class in whitelist/blacklist?
7) is "class_name.function_name" in whitelist/blacklist?
8) is class name in whitelist/blacklist?
9) is subset of kwargs in whitelist/blacklist?
10) is caching disabled globally?
All names are case-sensitive.
```"""
from vectorbt import settings
white_rank = 100
if len(settings.caching['whitelist']) > 0:
for obj in settings.caching['whitelist']:
if func is not None and inspect.ismethod(obj) and func == obj:
white_rank = 0
break
if isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[1], str):
if instance is obj[0] and name == obj[1]:
white_rank = 1
break
if isinstance(obj, str) and name == obj:
white_rank = 2
break
if instance is obj:
white_rank = 3
break
if hasattr(instance, '__class__'):
cls = instance.__class__
if isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[1], str):
if inspect.isclass(cls) and cls == obj[0] and name == obj[1]:
white_rank = 4
break
if inspect.isclass(cls) and cls == obj:
white_rank = 5
break
if isinstance(obj, str) and (cls.__name__ + '.' + name) == obj:
white_rank = 6
break
if isinstance(obj, str) and cls.__name__ == obj:
white_rank = 7
break
if isinstance(obj, dict) and obj.items() <= kwargs.items():
white_rank = 8
break
black_rank = 100
if len(settings.caching['blacklist']) > 0:
for obj in settings.caching['blacklist']:
if func is not None and inspect.ismethod(obj) and func == obj:
black_rank = 0
break
if isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[1], str):
if instance is obj[0] and name == obj[1]:
black_rank = 1
break
if isinstance(obj, str) and name == obj:
black_rank = 2
break
if instance is obj:
black_rank = 3
break
if hasattr(instance, '__class__'):
cls = instance.__class__
if isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[1], str):
if inspect.isclass(cls) and cls == obj[0] and name == obj[1]:
black_rank = 4
break
if inspect.isclass(cls) and cls == obj:
black_rank = 5
break
if isinstance(obj, str) and (cls.__name__ + '.' + name) == obj:
black_rank = 6
break
if isinstance(obj, str) and cls.__name__ == obj:
black_rank = 7
break
if isinstance(obj, dict) and obj.items() <= kwargs.items():
black_rank = 8
break
if white_rank == black_rank == 100: # none of the conditions met
return settings.caching['enabled'] # global caching decides
return white_rank <= black_rank # white wins if equal
_NOT_FOUND = object()
class cached_property(custom_property):
"""Extends `custom_property` with caching.
Similar to `functools.cached_property`, but without replacing the original attribute
to be able to re-compute whenever needed.
Disables caching if `is_caching_enabled` yields False.
Cache can be cleared by calling `clear_cache` with instance as argument.
!!! note:
Assumes that the instance (provided as `self`) won't change. If calculation depends
upon object attributes that can be changed, it won't notice the change."""
def __init__(self, func, **kwargs):
super().__init__(func, **kwargs)
self.lock = RLock()
def clear_cache(self, instance):
"""Clear the cache for this property belonging to `instance`."""
if hasattr(instance, self.attrname):
delattr(instance, self.attrname)
@property
def attrname(self):
"""Get name of cached attribute."""
return '__cached_' + self.name
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, owner=None):
if instance is None:
return self
if not is_caching_enabled(self.name, instance, **self.kwargs):
return super().__get__(instance, owner=owner)
cache = instance.__dict__
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
with self.lock:
# check if another thread filled cache while we awaited lock
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.func(instance)
cache[self.attrname] = val
return val
def custom_method(*args, **kwargs):
"""Custom extensible method.
Stores `**kwargs` as attributes of the wrapper function.
Can be called both as
```python-repl
>>> @cached_method
... def user_function(): pass
```
and
```python-repl
>>> @cached_method(maxsize=128, typed=False, **kwargs)
... def user_function(): pass
```
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.func = func
wrapper.kwargs = kwargs
return wrapper
if len(args) == 0:
return decorator
elif len(args) == 1:
return decorator(args[0])
else:
raise ValueError("Either function or keyword arguments must be passed")
def cached_method(*args, maxsize=128, typed=False, **kwargs):
"""Extends `custom_method` with caching.
Internally uses `functools.lru_cache`.
Disables caching if `is_caching_enabled` yields False or a non-hashable object
as argument has been passed.
See notes on `cached_property`."""
def decorator(func):
@wraps(func)
def wrapper(instance, *args, **kwargs):
def partial_func(*args, **kwargs):
# Ignores non-hashable instances
return func(instance, *args, **kwargs)
_func = None
if hasattr(instance, wrapper.name):
_func = getattr(instance, wrapper.name)
if not is_caching_enabled(wrapper.name, instance, func=_func, **wrapper.kwargs):
return func(instance, *args, **kwargs)
cache = instance.__dict__
cached_func = cache.get(wrapper.attrname, _NOT_FOUND)
if cached_func is _NOT_FOUND:
with wrapper.lock:
# check if another thread filled cache while we awaited lock
cached_func = cache.get(wrapper.attrname, _NOT_FOUND)
if cached_func is _NOT_FOUND:
cached_func = lru_cache(maxsize=wrapper.maxsize, typed=wrapper.typed)(partial_func)
cache[wrapper.attrname] = cached_func # store function instead of output
# Check if object can be hashed
hashable = True
for arg in args:
if not checks.is_hashable(arg):
hashable = False
break
for k, v in kwargs.items():
if not checks.is_hashable(v):
hashable = False
break
if not hashable:
# If not, do not invoke lru_cache
return func(instance, *args, **kwargs)
return cached_func(*args, **kwargs)
wrapper.func = func
wrapper.maxsize = maxsize
wrapper.typed = typed
wrapper.name = func.__name__
wrapper.attrname = '__cached_' + func.__name__
wrapper.lock = RLock()
wrapper.kwargs = kwargs
def clear_cache(instance):
"""Clear the cache for this method belonging to `instance`."""
if hasattr(instance, wrapper.attrname):
delattr(instance, wrapper.attrname)
setattr(wrapper, 'clear_cache', clear_cache)
return wrapper
if len(args) == 0:
return decorator
elif len(args) == 1:
return decorator(args[0])
else:
raise ValueError("Either function or keyword arguments must be passed")
def traverse_attr_kwargs(cls, key=None, value=None):
"""Traverse `cls` and its children for properties/methods with `kwargs`,
and optionally a specific `key` and `value`.
Class attributes acting as children should have a key `child_cls`.
Returns a nested dict of attributes."""
checks.assert_type(cls, type)
if value is not None and not isinstance(value, tuple):
value = (value,)
attrs = {}
for attr in dir(cls):
prop = getattr(cls, attr)
if hasattr(prop, 'kwargs'):
kwargs = getattr(prop, 'kwargs')
if key is None:
attrs[attr] = kwargs
else:
if key in kwargs:
if value is None:
attrs[attr] = kwargs
else:
_value = kwargs[key]
if _value in value:
attrs[attr] = kwargs
if 'child_cls' in kwargs:
child_cls = kwargs['child_cls']
checks.assert_type(child_cls, type)
attrs[attr] = kwargs
attrs[attr]['child_attrs'] = traverse_attr_kwargs(child_cls, key, value)
return attrs
| 34.70195 | 107 | 0.571922 |
from functools import wraps, lru_cache, RLock
import inspect
from vectorbt.utils import checks
class class_or_instancemethod(classmethod):
def __get__(self, instance, type_):
descr_get = super().__get__ if instance is None else self.__func__.__get__
return descr_get(instance, type_)
class classproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
class custom_property():
def __new__(cls, *args, **kwargs):
if len(args) == 0:
return lambda func: cls(func, **kwargs)
elif len(args) == 1:
return super().__new__(cls)
else:
raise ValueError("Either function or keyword arguments must be passed")
def __init__(self, func, **kwargs):
self.func = func
self.name = func.__name__
self.kwargs = kwargs
self.__doc__ = getattr(func, '__doc__')
def __get__(self, instance, owner=None):
if instance is None:
return self
return self.func(instance)
def __set__(self, obj, value):
raise AttributeError("can't set attribute")
def is_caching_enabled(name, instance, func=None, **kwargs):
from vectorbt import settings
white_rank = 100
if len(settings.caching['whitelist']) > 0:
for obj in settings.caching['whitelist']:
if func is not None and inspect.ismethod(obj) and func == obj:
white_rank = 0
break
if isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[1], str):
if instance is obj[0] and name == obj[1]:
white_rank = 1
break
if isinstance(obj, str) and name == obj:
white_rank = 2
break
if instance is obj:
white_rank = 3
break
if hasattr(instance, '__class__'):
cls = instance.__class__
if isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[1], str):
if inspect.isclass(cls) and cls == obj[0] and name == obj[1]:
white_rank = 4
break
if inspect.isclass(cls) and cls == obj:
white_rank = 5
break
if isinstance(obj, str) and (cls.__name__ + '.' + name) == obj:
white_rank = 6
break
if isinstance(obj, str) and cls.__name__ == obj:
white_rank = 7
break
if isinstance(obj, dict) and obj.items() <= kwargs.items():
white_rank = 8
break
black_rank = 100
if len(settings.caching['blacklist']) > 0:
for obj in settings.caching['blacklist']:
if func is not None and inspect.ismethod(obj) and func == obj:
black_rank = 0
break
if isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[1], str):
if instance is obj[0] and name == obj[1]:
black_rank = 1
break
if isinstance(obj, str) and name == obj:
black_rank = 2
break
if instance is obj:
black_rank = 3
break
if hasattr(instance, '__class__'):
cls = instance.__class__
if isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[1], str):
if inspect.isclass(cls) and cls == obj[0] and name == obj[1]:
black_rank = 4
break
if inspect.isclass(cls) and cls == obj:
black_rank = 5
break
if isinstance(obj, str) and (cls.__name__ + '.' + name) == obj:
black_rank = 6
break
if isinstance(obj, str) and cls.__name__ == obj:
black_rank = 7
break
if isinstance(obj, dict) and obj.items() <= kwargs.items():
black_rank = 8
break
if white_rank == black_rank == 100: # none of the conditions met
return settings.caching['enabled'] # global caching decides
return white_rank <= black_rank # white wins if equal
_NOT_FOUND = object()
class cached_property(custom_property):
def __init__(self, func, **kwargs):
super().__init__(func, **kwargs)
self.lock = RLock()
def clear_cache(self, instance):
if hasattr(instance, self.attrname):
delattr(instance, self.attrname)
@property
def attrname(self):
return '__cached_' + self.name
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, owner=None):
if instance is None:
return self
if not is_caching_enabled(self.name, instance, **self.kwargs):
return super().__get__(instance, owner=owner)
cache = instance.__dict__
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
with self.lock:
# check if another thread filled cache while we awaited lock
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.func(instance)
cache[self.attrname] = val
return val
def custom_method(*args, **kwargs):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.func = func
wrapper.kwargs = kwargs
return wrapper
if len(args) == 0:
return decorator
elif len(args) == 1:
return decorator(args[0])
else:
raise ValueError("Either function or keyword arguments must be passed")
def cached_method(*args, maxsize=128, typed=False, **kwargs):
def decorator(func):
@wraps(func)
def wrapper(instance, *args, **kwargs):
def partial_func(*args, **kwargs):
# Ignores non-hashable instances
return func(instance, *args, **kwargs)
_func = None
if hasattr(instance, wrapper.name):
_func = getattr(instance, wrapper.name)
if not is_caching_enabled(wrapper.name, instance, func=_func, **wrapper.kwargs):
return func(instance, *args, **kwargs)
cache = instance.__dict__
cached_func = cache.get(wrapper.attrname, _NOT_FOUND)
if cached_func is _NOT_FOUND:
with wrapper.lock:
# check if another thread filled cache while we awaited lock
cached_func = cache.get(wrapper.attrname, _NOT_FOUND)
if cached_func is _NOT_FOUND:
cached_func = lru_cache(maxsize=wrapper.maxsize, typed=wrapper.typed)(partial_func)
cache[wrapper.attrname] = cached_func # store function instead of output
# Check if object can be hashed
hashable = True
for arg in args:
if not checks.is_hashable(arg):
hashable = False
break
for k, v in kwargs.items():
if not checks.is_hashable(v):
hashable = False
break
if not hashable:
# If not, do not invoke lru_cache
return func(instance, *args, **kwargs)
return cached_func(*args, **kwargs)
wrapper.func = func
wrapper.maxsize = maxsize
wrapper.typed = typed
wrapper.name = func.__name__
wrapper.attrname = '__cached_' + func.__name__
wrapper.lock = RLock()
wrapper.kwargs = kwargs
def clear_cache(instance):
if hasattr(instance, wrapper.attrname):
delattr(instance, wrapper.attrname)
setattr(wrapper, 'clear_cache', clear_cache)
return wrapper
if len(args) == 0:
return decorator
elif len(args) == 1:
return decorator(args[0])
else:
raise ValueError("Either function or keyword arguments must be passed")
def traverse_attr_kwargs(cls, key=None, value=None):
checks.assert_type(cls, type)
if value is not None and not isinstance(value, tuple):
value = (value,)
attrs = {}
for attr in dir(cls):
prop = getattr(cls, attr)
if hasattr(prop, 'kwargs'):
kwargs = getattr(prop, 'kwargs')
if key is None:
attrs[attr] = kwargs
else:
if key in kwargs:
if value is None:
attrs[attr] = kwargs
else:
_value = kwargs[key]
if _value in value:
attrs[attr] = kwargs
if 'child_cls' in kwargs:
child_cls = kwargs['child_cls']
checks.assert_type(child_cls, type)
attrs[attr] = kwargs
attrs[attr]['child_attrs'] = traverse_attr_kwargs(child_cls, key, value)
return attrs
| true | true |
f721cbcde3d04a0838563d9e89acdaf3fa845e47 | 799 | py | Python | setup.py | alekslovesdata/lambdata | f9119b9d96a5d9c5f7b957471bf7c78553e07077 | [
"MIT"
] | null | null | null | setup.py | alekslovesdata/lambdata | f9119b9d96a5d9c5f7b957471bf7c78553e07077 | [
"MIT"
] | 4 | 2020-03-24T18:00:50.000Z | 2021-06-02T00:34:10.000Z | setup.py | alekslovesdata/lambdata | f9119b9d96a5d9c5f7b957471bf7c78553e07077 | [
"MIT"
] | null | null | null | """
lambdata - a collection of data science helper functions for lambda school
"""
import setuptools
REQUIRED = [
"numpy",
"pandas"
]
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
setuptools.setup(
name="lambdata-alekslovesdata",
version = "0.1.1",
author = "alekslovesdata",
description = "a collection of data science helper functions",
long_description = LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://lambdaschool.com/courses/data-science",
packages=setuptools.find_packages(),
python_requires=">=3.5",
install_requires = REQUIRED,
classifiers=["Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| 27.551724 | 74 | 0.675845 | import setuptools
REQUIRED = [
"numpy",
"pandas"
]
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
setuptools.setup(
name="lambdata-alekslovesdata",
version = "0.1.1",
author = "alekslovesdata",
description = "a collection of data science helper functions",
long_description = LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://lambdaschool.com/courses/data-science",
packages=setuptools.find_packages(),
python_requires=">=3.5",
install_requires = REQUIRED,
classifiers=["Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| true | true |
f721ccc15b9cee3e5e8517ee7ee869258a9a22fe | 5,773 | py | Python | py/test/plugin/pytest_doctest.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | py/test/plugin/pytest_doctest.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | py/test/plugin/pytest_doctest.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | import py
class DoctestPlugin:
def pytest_addoption(self, parser):
parser.addoption("--doctest-modules",
action="store_true", default=False,
dest="doctestmodules")
def pytest_collect_file(self, path, parent):
if path.ext == ".py":
if parent.config.getvalue("doctestmodules"):
return DoctestModule(path, parent)
if path.check(fnmatch="test_*.txt"):
return DoctestTextfile(path, parent)
from py.__.code.excinfo import Repr, ReprFileLocation
class ReprFailDoctest(Repr):
def __init__(self, reprlocation, lines):
self.reprlocation = reprlocation
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
self.reprlocation.toterminal(tw)
class DoctestItem(py.test.collect.Item):
def __init__(self, path, parent):
name = self.__class__.__name__ + ":" + path.basename
super(DoctestItem, self).__init__(name=name, parent=parent)
self.fspath = path
def repr_failure(self, excinfo, outerr):
if excinfo.errisinstance(py.compat.doctest.DocTestFailure):
doctestfailure = excinfo.value
example = doctestfailure.example
test = doctestfailure.test
filename = test.filename
lineno = example.lineno + 1
message = excinfo.type.__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = py.compat.doctest.OutputChecker()
REPORT_UDIFF = py.compat.doctest.REPORT_UDIFF
filelines = py.path.local(filename).readlines(cr=0)
i = max(0, lineno - 10)
lines = []
for line in filelines[i:lineno]:
lines.append("%03d %s" % (i+1, line))
i += 1
lines += checker.output_difference(example,
doctestfailure.got, REPORT_UDIFF).split("\n")
return ReprFailDoctest(reprlocation, lines)
elif excinfo.errisinstance(py.compat.doctest.UnexpectedException):
excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
return super(DoctestItem, self).repr_failure(excinfo, outerr)
else:
return super(DoctestItem, self).repr_failure(excinfo, outerr)
class DoctestTextfile(DoctestItem):
def runtest(self):
if not self._deprecated_testexecution():
failed, tot = py.compat.doctest.testfile(
str(self.fspath), module_relative=False,
raise_on_error=True, verbose=0)
class DoctestModule(DoctestItem):
def runtest(self):
module = self.fspath.pyimport()
failed, tot = py.compat.doctest.testmod(
module, raise_on_error=True, verbose=0)
#
# Plugin tests
#
class TestDoctests:
def test_collect_testtextfile(self, testdir):
testdir.plugins.append(DoctestPlugin())
testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
""")
for x in (testdir.tmpdir, checkfile):
#print "checking that %s returns custom items" % (x,)
items, events = testdir.inline_genitems(x)
print events.events
assert len(items) == 1
assert isinstance(items[0], DoctestTextfile)
def test_collect_module(self, testdir):
testdir.plugins.append(DoctestPlugin())
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, evrec = testdir.inline_genitems(p, '--doctest-modules')
print evrec.events
assert len(items) == 1
assert isinstance(items[0], DoctestModule)
def test_simple_doctestfile(self, testdir):
testdir.plugins.append(DoctestPlugin())
p = testdir.maketxtfile(test_doc="""
>>> x = 1
>>> x == 1
False
""")
events = testdir.inline_run(p)
ev, = events.getnamed("itemtestreport")
assert ev.failed
def test_doctest_unexpected_exception(self, testdir):
from py.__.test.outcome import Failed
testdir.plugins.append(DoctestPlugin())
p = testdir.maketxtfile("""
>>> i = 0
>>> i = 1
>>> x
2
""")
sorter = testdir.inline_run(p)
events = sorter.getnamed("itemtestreport")
assert len(events) == 1
ev, = events
assert ev.failed
assert ev.longrepr
# XXX
#testitem, = items
#excinfo = py.test.raises(Failed, "testitem.runtest()")
#repr = testitem.repr_failure(excinfo, ("", ""))
#assert repr.reprlocation
def test_doctestmodule(self, testdir):
testdir.plugins.append(DoctestPlugin())
p = testdir.makepyfile("""
'''
>>> x = 1
>>> x == 1
False
'''
""")
events = testdir.inline_run(p, "--doctest-modules")
ev, = events.getnamed("itemtestreport")
assert ev.failed
def test_txtfile_failing(self, testdir):
testdir.plugins.append('pytest_doctest')
p = testdir.maketxtfile("""
>>> i = 0
>>> i + 1
2
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'001 >>> i = 0',
'002 >>> i + 1',
'Expected:',
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure"
])
def test_generic(plugintester):
plugintester.apicheck(DoctestPlugin)
| 33.563953 | 74 | 0.57076 | import py
class DoctestPlugin:
def pytest_addoption(self, parser):
parser.addoption("--doctest-modules",
action="store_true", default=False,
dest="doctestmodules")
def pytest_collect_file(self, path, parent):
if path.ext == ".py":
if parent.config.getvalue("doctestmodules"):
return DoctestModule(path, parent)
if path.check(fnmatch="test_*.txt"):
return DoctestTextfile(path, parent)
from py.__.code.excinfo import Repr, ReprFileLocation
class ReprFailDoctest(Repr):
def __init__(self, reprlocation, lines):
self.reprlocation = reprlocation
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
self.reprlocation.toterminal(tw)
class DoctestItem(py.test.collect.Item):
def __init__(self, path, parent):
name = self.__class__.__name__ + ":" + path.basename
super(DoctestItem, self).__init__(name=name, parent=parent)
self.fspath = path
def repr_failure(self, excinfo, outerr):
if excinfo.errisinstance(py.compat.doctest.DocTestFailure):
doctestfailure = excinfo.value
example = doctestfailure.example
test = doctestfailure.test
filename = test.filename
lineno = example.lineno + 1
message = excinfo.type.__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = py.compat.doctest.OutputChecker()
REPORT_UDIFF = py.compat.doctest.REPORT_UDIFF
filelines = py.path.local(filename).readlines(cr=0)
i = max(0, lineno - 10)
lines = []
for line in filelines[i:lineno]:
lines.append("%03d %s" % (i+1, line))
i += 1
lines += checker.output_difference(example,
doctestfailure.got, REPORT_UDIFF).split("\n")
return ReprFailDoctest(reprlocation, lines)
elif excinfo.errisinstance(py.compat.doctest.UnexpectedException):
excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
return super(DoctestItem, self).repr_failure(excinfo, outerr)
else:
return super(DoctestItem, self).repr_failure(excinfo, outerr)
class DoctestTextfile(DoctestItem):
def runtest(self):
if not self._deprecated_testexecution():
failed, tot = py.compat.doctest.testfile(
str(self.fspath), module_relative=False,
raise_on_error=True, verbose=0)
class DoctestModule(DoctestItem):
def runtest(self):
module = self.fspath.pyimport()
failed, tot = py.compat.doctest.testmod(
module, raise_on_error=True, verbose=0)
class TestDoctests:
def test_collect_testtextfile(self, testdir):
testdir.plugins.append(DoctestPlugin())
testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
""")
for x in (testdir.tmpdir, checkfile):
items, events = testdir.inline_genitems(x)
print events.events
assert len(items) == 1
assert isinstance(items[0], DoctestTextfile)
def test_collect_module(self, testdir):
testdir.plugins.append(DoctestPlugin())
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, evrec = testdir.inline_genitems(p, '--doctest-modules')
print evrec.events
assert len(items) == 1
assert isinstance(items[0], DoctestModule)
def test_simple_doctestfile(self, testdir):
testdir.plugins.append(DoctestPlugin())
p = testdir.maketxtfile(test_doc="""
>>> x = 1
>>> x == 1
False
""")
events = testdir.inline_run(p)
ev, = events.getnamed("itemtestreport")
assert ev.failed
def test_doctest_unexpected_exception(self, testdir):
from py.__.test.outcome import Failed
testdir.plugins.append(DoctestPlugin())
p = testdir.maketxtfile("""
>>> i = 0
>>> i = 1
>>> x
2
""")
sorter = testdir.inline_run(p)
events = sorter.getnamed("itemtestreport")
assert len(events) == 1
ev, = events
assert ev.failed
assert ev.longrepr
def test_doctestmodule(self, testdir):
testdir.plugins.append(DoctestPlugin())
p = testdir.makepyfile("""
'''
>>> x = 1
>>> x == 1
False
'''
""")
events = testdir.inline_run(p, "--doctest-modules")
ev, = events.getnamed("itemtestreport")
assert ev.failed
def test_txtfile_failing(self, testdir):
testdir.plugins.append('pytest_doctest')
p = testdir.maketxtfile("""
>>> i = 0
>>> i + 1
2
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'001 >>> i = 0',
'002 >>> i + 1',
'Expected:',
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure"
])
def test_generic(plugintester):
plugintester.apicheck(DoctestPlugin)
| false | true |
f721ce2ca5cb8a400ff09a032cf59c3f22ccb2ff | 718 | py | Python | ApiRest/__init__.py | daycrom/fiscalberry | 305248e720587753ad65db1aac0339aea30e9c0c | [
"Apache-2.0"
] | 43 | 2017-04-18T01:26:02.000Z | 2022-03-12T14:00:28.000Z | ApiRest/__init__.py | daycrom/fiscalberry | 305248e720587753ad65db1aac0339aea30e9c0c | [
"Apache-2.0"
] | 83 | 2017-04-07T14:38:26.000Z | 2022-03-31T22:45:56.000Z | ApiRest/__init__.py | daycrom/fiscalberry | 305248e720587753ad65db1aac0339aea30e9c0c | [
"Apache-2.0"
] | 40 | 2017-04-25T13:39:19.000Z | 2022-03-12T14:00:50.000Z | #!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__version__ = "1.0" | 42.235294 | 77 | 0.753482 |
__version__ = "1.0" | true | true |
f721cee47df093b0c0ba33adbec9679bb5d8c2de | 46,422 | py | Python | google/cloud/firestore_admin_v1/services/firestore_admin/client.py | MShaffar19/python-firestore | 1fb39140c26e06a3bc28e8304c56270b58a15b0b | [
"Apache-2.0"
] | null | null | null | google/cloud/firestore_admin_v1/services/firestore_admin/client.py | MShaffar19/python-firestore | 1fb39140c26e06a3bc28e8304c56270b58a15b0b | [
"Apache-2.0"
] | null | null | null | google/cloud/firestore_admin_v1/services/firestore_admin/client.py | MShaffar19/python-firestore | 1fb39140c26e06a3bc28e8304c56270b58a15b0b | [
"Apache-2.0"
] | 1 | 2020-10-04T12:11:36.000Z | 2020-10-04T12:11:36.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation as ga_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.firestore_admin_v1.services.firestore_admin import pagers
from google.cloud.firestore_admin_v1.types import field
from google.cloud.firestore_admin_v1.types import field as gfa_field
from google.cloud.firestore_admin_v1.types import firestore_admin
from google.cloud.firestore_admin_v1.types import index
from google.cloud.firestore_admin_v1.types import index as gfa_index
from google.cloud.firestore_admin_v1.types import operation as gfa_operation
from google.protobuf import empty_pb2 as empty # type: ignore
from .transports.base import FirestoreAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FirestoreAdminGrpcTransport
from .transports.grpc_asyncio import FirestoreAdminGrpcAsyncIOTransport
class FirestoreAdminClientMeta(type):
"""Metaclass for the FirestoreAdmin client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FirestoreAdminTransport]]
_transport_registry["grpc"] = FirestoreAdminGrpcTransport
_transport_registry["grpc_asyncio"] = FirestoreAdminGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[FirestoreAdminTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FirestoreAdminClient(metaclass=FirestoreAdminClientMeta):
"""Operations are created by service ``FirestoreAdmin``, but are
accessed via service ``google.longrunning.Operations``.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "firestore.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
{@api.name}: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@staticmethod
def field_path(project: str, database: str, collection: str, field: str,) -> str:
"""Return a fully-qualified field string."""
return "projects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}".format(
project=project, database=database, collection=collection, field=field,
)
@staticmethod
def parse_field_path(path: str) -> Dict[str, str]:
"""Parse a field path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/databases/(?P<database>.+?)/collectionGroups/(?P<collection>.+?)/fields/(?P<field>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def index_path(project: str, database: str, collection: str, index: str,) -> str:
"""Return a fully-qualified index string."""
return "projects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}".format(
project=project, database=database, collection=collection, index=index,
)
@staticmethod
def parse_index_path(path: str) -> Dict[str, str]:
"""Parse a index path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/databases/(?P<database>.+?)/collectionGroups/(?P<collection>.+?)/indexes/(?P<index>.+?)$",
path,
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, FirestoreAdminTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the firestore admin client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.FirestoreAdminTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (client_options_lib.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FirestoreAdminTransport):
# transport is a FirestoreAdminTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
ssl_channel_credentials=ssl_credentials,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_index(
self,
request: firestore_admin.CreateIndexRequest = None,
*,
parent: str = None,
index: gfa_index.Index = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
r"""Creates a composite index. This returns a
[google.longrunning.Operation][google.longrunning.Operation]
which may be used to track the status of the creation. The
metadata for the operation will be the type
[IndexOperationMetadata][google.firestore.admin.v1.IndexOperationMetadata].
Args:
request (:class:`~.firestore_admin.CreateIndexRequest`):
The request object. The request for
[FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex].
parent (:class:`str`):
Required. A parent name of the form
``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
index (:class:`~.gfa_index.Index`):
Required. The composite index to
create.
This corresponds to the ``index`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.ga_operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:``~.gfa_index.Index``: Cloud Firestore indexes
enable simple and complex queries against documents in a
database.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, index])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.CreateIndexRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.CreateIndexRequest):
request = firestore_admin.CreateIndexRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if index is not None:
request.index = index
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_index]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
gfa_index.Index,
metadata_type=gfa_operation.IndexOperationMetadata,
)
# Done; return the response.
return response
def list_indexes(
self,
request: firestore_admin.ListIndexesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListIndexesPager:
r"""Lists composite indexes.
Args:
request (:class:`~.firestore_admin.ListIndexesRequest`):
The request object. The request for
[FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes].
parent (:class:`str`):
Required. A parent name of the form
``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListIndexesPager:
The response for
[FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.ListIndexesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.ListIndexesRequest):
request = firestore_admin.ListIndexesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_indexes]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListIndexesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_index(
self,
request: firestore_admin.GetIndexRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> index.Index:
r"""Gets a composite index.
Args:
request (:class:`~.firestore_admin.GetIndexRequest`):
The request object. The request for
[FirestoreAdmin.GetIndex][google.firestore.admin.v1.FirestoreAdmin.GetIndex].
name (:class:`str`):
Required. A name of the form
``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.index.Index:
Cloud Firestore indexes enable simple
and complex queries against documents in
a database.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.GetIndexRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.GetIndexRequest):
request = firestore_admin.GetIndexRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_index]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_index(
self,
request: firestore_admin.DeleteIndexRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a composite index.
Args:
request (:class:`~.firestore_admin.DeleteIndexRequest`):
The request object. The request for
[FirestoreAdmin.DeleteIndex][google.firestore.admin.v1.FirestoreAdmin.DeleteIndex].
name (:class:`str`):
Required. A name of the form
``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.DeleteIndexRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.DeleteIndexRequest):
request = firestore_admin.DeleteIndexRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_index]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def get_field(
self,
request: firestore_admin.GetFieldRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> field.Field:
r"""Gets the metadata and configuration for a Field.
Args:
request (:class:`~.firestore_admin.GetFieldRequest`):
The request object. The request for
[FirestoreAdmin.GetField][google.firestore.admin.v1.FirestoreAdmin.GetField].
name (:class:`str`):
Required. A name of the form
``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.field.Field:
Represents a single field in the
database.
Fields are grouped by their "Collection
Group", which represent all collections
in the database with the same id.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.GetFieldRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.GetFieldRequest):
request = firestore_admin.GetFieldRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_field]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_field(
self,
request: firestore_admin.UpdateFieldRequest = None,
*,
field: gfa_field.Field = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
r"""Updates a field configuration. Currently, field updates apply
only to single field index configuration. However, calls to
[FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]
should provide a field mask to avoid changing any configuration
that the caller isn't aware of. The field mask should be
specified as: ``{ paths: "index_config" }``.
This call returns a
[google.longrunning.Operation][google.longrunning.Operation]
which may be used to track the status of the field update. The
metadata for the operation will be the type
[FieldOperationMetadata][google.firestore.admin.v1.FieldOperationMetadata].
To configure the default field settings for the database, use
the special ``Field`` with resource name:
``projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*``.
Args:
request (:class:`~.firestore_admin.UpdateFieldRequest`):
The request object. The request for
[FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField].
field (:class:`~.gfa_field.Field`):
Required. The field to be updated.
This corresponds to the ``field`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.ga_operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:``~.gfa_field.Field``: Represents a single field
in the database.
Fields are grouped by their "Collection Group", which
represent all collections in the database with the same
id.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([field])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.UpdateFieldRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.UpdateFieldRequest):
request = firestore_admin.UpdateFieldRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if field is not None:
request.field = field
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_field]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("field.name", request.field.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
gfa_field.Field,
metadata_type=gfa_operation.FieldOperationMetadata,
)
# Done; return the response.
return response
def list_fields(
self,
request: firestore_admin.ListFieldsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListFieldsPager:
r"""Lists the field configuration and metadata for this database.
Currently,
[FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]
only supports listing fields that have been explicitly
overridden. To issue this query, call
[FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]
with the filter set to ``indexConfig.usesAncestorConfig:false``.
Args:
request (:class:`~.firestore_admin.ListFieldsRequest`):
The request object. The request for
[FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields].
parent (:class:`str`):
Required. A parent name of the form
``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListFieldsPager:
The response for
[FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.ListFieldsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.ListFieldsRequest):
request = firestore_admin.ListFieldsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_fields]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListFieldsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def export_documents(
self,
request: firestore_admin.ExportDocumentsRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
r"""Exports a copy of all or a subset of documents from
Google Cloud Firestore to another storage system, such
as Google Cloud Storage. Recent updates to documents may
not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed
via the Operation resource that is created. The output
of an export may only be used once the associated
operation is done. If an export operation is cancelled
before completion it may leave partial data behind in
Google Cloud Storage.
Args:
request (:class:`~.firestore_admin.ExportDocumentsRequest`):
The request object. The request for
[FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments].
name (:class:`str`):
Required. Database to export. Should be of the form:
``projects/{project_id}/databases/{database_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.ga_operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:``~.gfa_operation.ExportDocumentsResponse``:
Returned in the
[google.longrunning.Operation][google.longrunning.Operation]
response field.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.ExportDocumentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.ExportDocumentsRequest):
request = firestore_admin.ExportDocumentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.export_documents]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
gfa_operation.ExportDocumentsResponse,
metadata_type=gfa_operation.ExportDocumentsMetadata,
)
# Done; return the response.
return response
def import_documents(
self,
request: firestore_admin.ImportDocumentsRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
r"""Imports documents into Google Cloud Firestore.
Existing documents with the same name are overwritten.
The import occurs in the background and its progress can
be monitored and managed via the Operation resource that
is created. If an ImportDocuments operation is
cancelled, it is possible that a subset of the data has
already been imported to Cloud Firestore.
Args:
request (:class:`~.firestore_admin.ImportDocumentsRequest`):
The request object. The request for
[FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments].
name (:class:`str`):
Required. Database to import into. Should be of the
form: ``projects/{project_id}/databases/{database_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.ga_operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:``~.empty.Empty``: A generic empty message that
you can re-use to avoid defining duplicated empty
messages in your APIs. A typical example is to use it as
the request or the response type of an API method. For
instance:
::
service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
}
The JSON representation for ``Empty`` is empty JSON
object ``{}``.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore_admin.ImportDocumentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore_admin.ImportDocumentsRequest):
request = firestore_admin.ImportDocumentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.import_documents]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
empty.Empty,
metadata_type=gfa_operation.ImportDocumentsMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-firestore-admin",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FirestoreAdminClient",)
| 42.588991 | 131 | 0.625652 |
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials
from google.auth.transport import mtls
from google.auth.transport.grpc import SslCredentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.api_core import operation as ga_operation
from google.api_core import operation_async
from google.cloud.firestore_admin_v1.services.firestore_admin import pagers
from google.cloud.firestore_admin_v1.types import field
from google.cloud.firestore_admin_v1.types import field as gfa_field
from google.cloud.firestore_admin_v1.types import firestore_admin
from google.cloud.firestore_admin_v1.types import index
from google.cloud.firestore_admin_v1.types import index as gfa_index
from google.cloud.firestore_admin_v1.types import operation as gfa_operation
from google.protobuf import empty_pb2 as empty
from .transports.base import FirestoreAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FirestoreAdminGrpcTransport
from .transports.grpc_asyncio import FirestoreAdminGrpcAsyncIOTransport
class FirestoreAdminClientMeta(type):
_transport_registry = (
OrderedDict()
)
_transport_registry["grpc"] = FirestoreAdminGrpcTransport
_transport_registry["grpc_asyncio"] = FirestoreAdminGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[FirestoreAdminTransport]:
if label:
return cls._transport_registry[label]
return next(iter(cls._transport_registry.values()))
class FirestoreAdminClient(metaclass=FirestoreAdminClientMeta):
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "firestore.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__(
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@staticmethod
def field_path(project: str, database: str, collection: str, field: str,) -> str:
return "projects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}".format(
project=project, database=database, collection=collection, field=field,
)
@staticmethod
def parse_field_path(path: str) -> Dict[str, str]:
m = re.match(
r"^projects/(?P<project>.+?)/databases/(?P<database>.+?)/collectionGroups/(?P<collection>.+?)/fields/(?P<field>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def index_path(project: str, database: str, collection: str, index: str,) -> str:
return "projects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}".format(
project=project, database=database, collection=collection, index=index,
)
@staticmethod
def parse_index_path(path: str) -> Dict[str, str]:
m = re.match(
r"^projects/(?P<project>.+?)/databases/(?P<database>.+?)/collectionGroups/(?P<collection>.+?)/indexes/(?P<index>.+?)$",
path,
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, FirestoreAdminTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
if isinstance(transport, FirestoreAdminTransport):
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
ssl_channel_credentials=ssl_credentials,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_index(
self,
request: firestore_admin.CreateIndexRequest = None,
*,
parent: str = None,
index: gfa_index.Index = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
has_flattened_params = any([parent, index])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.CreateIndexRequest):
request = firestore_admin.CreateIndexRequest(request)
if parent is not None:
request.parent = parent
if index is not None:
request.index = index
rpc = self._transport._wrapped_methods[self._transport.create_index]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
gfa_index.Index,
metadata_type=gfa_operation.IndexOperationMetadata,
)
return response
def list_indexes(
self,
request: firestore_admin.ListIndexesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListIndexesPager:
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.ListIndexesRequest):
request = firestore_admin.ListIndexesRequest(request)
if parent is not None:
request.parent = parent
rpc = self._transport._wrapped_methods[self._transport.list_indexes]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = pagers.ListIndexesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
return response
def get_index(
self,
request: firestore_admin.GetIndexRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> index.Index:
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.GetIndexRequest):
request = firestore_admin.GetIndexRequest(request)
if name is not None:
request.name = name
rpc = self._transport._wrapped_methods[self._transport.get_index]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
return response
def delete_index(
self,
request: firestore_admin.DeleteIndexRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.DeleteIndexRequest):
request = firestore_admin.DeleteIndexRequest(request)
if name is not None:
request.name = name
rpc = self._transport._wrapped_methods[self._transport.delete_index]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def get_field(
self,
request: firestore_admin.GetFieldRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> field.Field:
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.GetFieldRequest):
request = firestore_admin.GetFieldRequest(request)
if name is not None:
request.name = name
rpc = self._transport._wrapped_methods[self._transport.get_field]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
return response
def update_field(
self,
request: firestore_admin.UpdateFieldRequest = None,
*,
field: gfa_field.Field = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
has_flattened_params = any([field])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.UpdateFieldRequest):
request = firestore_admin.UpdateFieldRequest(request)
if field is not None:
request.field = field
rpc = self._transport._wrapped_methods[self._transport.update_field]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("field.name", request.field.name),)
),
)
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
gfa_field.Field,
metadata_type=gfa_operation.FieldOperationMetadata,
)
return response
def list_fields(
self,
request: firestore_admin.ListFieldsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListFieldsPager:
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.ListFieldsRequest):
request = firestore_admin.ListFieldsRequest(request)
if parent is not None:
request.parent = parent
rpc = self._transport._wrapped_methods[self._transport.list_fields]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = pagers.ListFieldsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
return response
def export_documents(
self,
request: firestore_admin.ExportDocumentsRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.ExportDocumentsRequest):
request = firestore_admin.ExportDocumentsRequest(request)
if name is not None:
request.name = name
rpc = self._transport._wrapped_methods[self._transport.export_documents]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
gfa_operation.ExportDocumentsResponse,
metadata_type=gfa_operation.ExportDocumentsMetadata,
)
return response
def import_documents(
self,
request: firestore_admin.ImportDocumentsRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ga_operation.Operation:
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, firestore_admin.ImportDocumentsRequest):
request = firestore_admin.ImportDocumentsRequest(request)
if name is not None:
request.name = name
rpc = self._transport._wrapped_methods[self._transport.import_documents]
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = ga_operation.from_gapic(
response,
self._transport.operations_client,
empty.Empty,
metadata_type=gfa_operation.ImportDocumentsMetadata,
)
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-firestore-admin",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FirestoreAdminClient",)
| true | true |
f721cf98bf4bca78c58d38f387c8dd63e1aabe26 | 279 | py | Python | frontend/urls.py | 0b01/autobasstab-web | b5ad0cef3d160b80ef1c91632b119e83325572d3 | [
"MIT"
] | 4 | 2020-10-07T19:25:41.000Z | 2021-08-13T10:23:29.000Z | frontend/urls.py | 0b01/autobasstab-web | b5ad0cef3d160b80ef1c91632b119e83325572d3 | [
"MIT"
] | null | null | null | frontend/urls.py | 0b01/autobasstab-web | b5ad0cef3d160b80ef1c91632b119e83325572d3 | [
"MIT"
] | 1 | 2021-06-29T07:52:05.000Z | 2021-06-29T07:52:05.000Z | from django.urls import path
from django.views.generic import TemplateView, RedirectView
from . import views
urlpatterns = [
path('', views.index),
path('model.json', RedirectView.as_view(url='http://rickyhan.com/static/crepe_model_full/model.json', permanent=True)),
]
| 31 | 123 | 0.752688 | from django.urls import path
from django.views.generic import TemplateView, RedirectView
from . import views
urlpatterns = [
path('', views.index),
path('model.json', RedirectView.as_view(url='http://rickyhan.com/static/crepe_model_full/model.json', permanent=True)),
]
| true | true |
f721d092cf36ee241d38b77f0107815dfdfd9986 | 30,721 | py | Python | tests/__init__.py | ClementAcher/dd-trace-py | f36519313c64d912ab9010094205ec3a82f1e493 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | ClementAcher/dd-trace-py | f36519313c64d912ab9010094205ec3a82f1e493 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | ClementAcher/dd-trace-py | f36519313c64d912ab9010094205ec3a82f1e493 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import contextlib
from contextlib import contextmanager
import inspect
import os
import sys
from typing import List
import pytest
import ddtrace
from ddtrace import Span
from ddtrace import Tracer
from ddtrace.compat import httplib
from ddtrace.compat import parse
from ddtrace.compat import to_unicode
from ddtrace.constants import SPAN_MEASURED_KEY
from ddtrace.encoding import JSONEncoder
from ddtrace.ext import http
from ddtrace.internal._encoding import MsgpackEncoder
from ddtrace.internal.dogstatsd import get_dogstatsd_client
from ddtrace.internal.writer import AgentWriter
from ddtrace.vendor import wrapt
from tests.subprocesstest import SubprocessTestCase
NO_CHILDREN = object()
def assert_is_measured(span):
"""Assert that the span has the proper _dd.measured tag set"""
assert SPAN_MEASURED_KEY in span.metrics
assert SPAN_MEASURED_KEY not in span.meta
assert span.get_metric(SPAN_MEASURED_KEY) == 1
def assert_is_not_measured(span):
"""Assert that the span does not set _dd.measured"""
assert SPAN_MEASURED_KEY not in span.meta
if SPAN_MEASURED_KEY in span.metrics:
assert span.get_metric(SPAN_MEASURED_KEY) == 0
else:
assert SPAN_MEASURED_KEY not in span.metrics
def assert_span_http_status_code(span, code):
"""Assert on the span's 'http.status_code' tag"""
tag = span.get_tag(http.STATUS_CODE)
code = str(code)
assert tag == code, "%r != %r" % (tag, code)
@contextlib.contextmanager
def override_env(env):
"""
Temporarily override ``os.environ`` with provided values::
>>> with self.override_env(dict(DATADOG_TRACE_DEBUG=True)):
# Your test
"""
# Copy the full original environment
original = dict(os.environ)
# Update based on the passed in arguments
os.environ.update(env)
try:
yield
finally:
# Full clear the environment out and reset back to the original
os.environ.clear()
os.environ.update(original)
@contextlib.contextmanager
def override_global_config(values):
"""
Temporarily override an global configuration::
>>> with self.override_global_config(dict(name=value,...)):
# Your test
"""
# List of global variables we allow overriding
# DEV: We do not do `ddtrace.config.keys()` because we have all of our integrations
global_config_keys = [
"analytics_enabled",
"report_hostname",
"health_metrics_enabled",
"env",
"version",
"service",
]
# Grab the current values of all keys
originals = dict((key, getattr(ddtrace.config, key)) for key in global_config_keys)
# Override from the passed in keys
for key, value in values.items():
if key in global_config_keys:
setattr(ddtrace.config, key, value)
try:
yield
finally:
# Reset all to their original values
for key, value in originals.items():
setattr(ddtrace.config, key, value)
@contextlib.contextmanager
def override_config(integration, values):
"""
Temporarily override an integration configuration value::
>>> with self.override_config('flask', dict(service_name='test-service')):
# Your test
"""
options = getattr(ddtrace.config, integration)
original = dict((key, options.get(key)) for key in values.keys())
options.update(values)
try:
yield
finally:
options.update(original)
@contextlib.contextmanager
def override_http_config(integration, values):
"""
Temporarily override an integration configuration for HTTP value::
>>> with self.override_http_config('flask', dict(trace_query_string=True)):
# Your test
"""
options = getattr(ddtrace.config, integration).http
original = {}
for key, value in values.items():
original[key] = getattr(options, key)
setattr(options, key, value)
try:
yield
finally:
for key, value in original.items():
setattr(options, key, value)
@contextlib.contextmanager
def override_sys_modules(modules):
"""
Temporarily override ``sys.modules`` with provided dictionary of modules::
>>> mock_module = mock.MagicMock()
>>> mock_module.fn.side_effect = lambda: 'test'
>>> with self.override_sys_modules(dict(A=mock_module)):
# Your test
"""
original = dict(sys.modules)
sys.modules.update(modules)
try:
yield
finally:
sys.modules.clear()
sys.modules.update(original)
class BaseTestCase(SubprocessTestCase):
"""
BaseTestCase extends ``unittest.TestCase`` to provide some useful helpers/assertions
Example::
from tests import BaseTestCase
class MyTestCase(BaseTestCase):
def test_case(self):
with self.override_config('flask', dict(distributed_tracing_enabled=True):
pass
"""
override_env = staticmethod(override_env)
override_global_config = staticmethod(override_global_config)
override_config = staticmethod(override_config)
override_http_config = staticmethod(override_http_config)
override_sys_modules = staticmethod(override_sys_modules)
assert_is_measured = staticmethod(assert_is_measured)
assert_is_not_measured = staticmethod(assert_is_not_measured)
class TestSpanContainer(object):
"""
Helper class for a container of Spans.
Subclasses of this class must implement a `get_spans` method::
def get_spans(self):
return []
This class provides methods and assertions over a list of spans::
class TestCases(BaseTracerTestCase):
def test_spans(self):
# TODO: Create spans
self.assert_has_spans()
self.assert_span_count(3)
self.assert_structure( ... )
# Grab only the `requests.request` spans
spans = self.filter_spans(name='requests.request')
"""
def _ensure_test_spans(self, spans):
"""
internal helper to ensure the list of spans are all :class:`tests.utils.span.TestSpan`
:param spans: List of :class:`ddtrace.span.Span` or :class:`tests.utils.span.TestSpan`
:type spans: list
:returns: A list og :class:`tests.utils.span.TestSpan`
:rtype: list
"""
return [span if isinstance(span, TestSpan) else TestSpan(span) for span in spans]
@property
def spans(self):
return self._ensure_test_spans(self.get_spans())
def get_spans(self):
"""subclass required property"""
raise NotImplementedError
def _build_tree(self, root):
"""helper to build a tree structure for the provided root span"""
children = []
for span in self.spans:
if span.parent_id == root.span_id:
children.append(self._build_tree(span))
return TestSpanNode(root, children)
def get_root_span(self):
"""
Helper to get the root span from the list of spans in this container
:returns: The root span if one was found, None if not, and AssertionError if multiple roots were found
:rtype: :class:`tests.utils.span.TestSpanNode`, None
:raises: AssertionError
"""
root = None
for span in self.spans:
if span.parent_id is None:
if root is not None:
raise AssertionError("Multiple root spans found {0!r} {1!r}".format(root, span))
root = span
assert root, "No root span found in {0!r}".format(self.spans)
return self._build_tree(root)
def get_root_spans(self):
"""
Helper to get all root spans from the list of spans in this container
:returns: The root spans if any were found, None if not
:rtype: list of :class:`tests.utils.span.TestSpanNode`, None
"""
roots = []
for span in self.spans:
if span.parent_id is None:
roots.append(self._build_tree(span))
return sorted(roots, key=lambda s: s.start)
def assert_trace_count(self, count):
"""Assert the number of unique trace ids this container has"""
trace_count = len(self.get_root_spans())
assert trace_count == count, "Trace count {0} != {1}".format(trace_count, count)
def assert_span_count(self, count):
"""Assert this container has the expected number of spans"""
assert len(self.spans) == count, "Span count {0} != {1}".format(len(self.spans), count)
def assert_has_spans(self):
"""Assert this container has spans"""
assert len(self.spans), "No spans found"
def assert_has_no_spans(self):
"""Assert this container does not have any spans"""
assert len(self.spans) == 0, "Span count {0}".format(len(self.spans))
def filter_spans(self, *args, **kwargs):
"""
Helper to filter current spans by provided parameters.
This function will yield all spans whose `TestSpan.matches` function return `True`.
:param args: Positional arguments to pass to :meth:`tests.utils.span.TestSpan.matches`
:type args: list
:param kwargs: Keyword arguments to pass to :meth:`tests.utils.span.TestSpan.matches`
:type kwargs: dict
:returns: generator for the matched :class:`tests.utils.span.TestSpan`
:rtype: generator
"""
for span in self.spans:
# ensure we have a TestSpan
if not isinstance(span, TestSpan):
span = TestSpan(span)
if span.matches(*args, **kwargs):
yield span
def find_span(self, *args, **kwargs):
"""
Find a single span matches the provided filter parameters.
This function will find the first span whose `TestSpan.matches` function return `True`.
:param args: Positional arguments to pass to :meth:`tests.utils.span.TestSpan.matches`
:type args: list
:param kwargs: Keyword arguments to pass to :meth:`tests.utils.span.TestSpan.matches`
:type kwargs: dict
:returns: The first matching span
:rtype: :class:`tests.TestSpan`
"""
span = next(self.filter_spans(*args, **kwargs), None)
assert span is not None, "No span found for filter {0!r} {1!r}, have {2} spans".format(
args, kwargs, len(self.spans)
)
return span
class TracerTestCase(TestSpanContainer, BaseTestCase):
"""
BaseTracerTestCase is a base test case for when you need access to a dummy tracer and span assertions
"""
def setUp(self):
"""Before each test case, setup a dummy tracer to use"""
self.tracer = DummyTracer()
super(TracerTestCase, self).setUp()
def tearDown(self):
"""After each test case, reset and remove the dummy tracer"""
super(TracerTestCase, self).tearDown()
self.reset()
delattr(self, "tracer")
def get_spans(self):
"""Required subclass method for TestSpanContainer"""
return self.tracer.writer.spans
def pop_spans(self):
# type: () -> List[Span]
return self.tracer.pop()
def pop_traces(self):
# type: () -> List[List[Span]]
return self.tracer.pop_traces()
def reset(self):
"""Helper to reset the existing list of spans created"""
self.tracer.writer.pop()
def trace(self, *args, **kwargs):
"""Wrapper for self.tracer.trace that returns a TestSpan"""
return TestSpan(self.tracer.trace(*args, **kwargs))
def start_span(self, *args, **kwargs):
"""Helper for self.tracer.start_span that returns a TestSpan"""
return TestSpan(self.tracer.start_span(*args, **kwargs))
def assert_structure(self, root, children=NO_CHILDREN):
"""Helper to call TestSpanNode.assert_structure on the current root span"""
root_span = self.get_root_span()
root_span.assert_structure(root, children)
@contextlib.contextmanager
def override_global_tracer(self, tracer=None):
original = ddtrace.tracer
tracer = tracer or self.tracer
setattr(ddtrace, "tracer", tracer)
try:
yield
finally:
setattr(ddtrace, "tracer", original)
class DummyWriter(AgentWriter):
"""DummyWriter is a small fake writer used for tests. not thread-safe."""
def __init__(self, *args, **kwargs):
# original call
super(DummyWriter, self).__init__(*args, **kwargs)
# dummy components
self.spans = []
self.traces = []
self.json_encoder = JSONEncoder()
self.msgpack_encoder = MsgpackEncoder()
def write(self, spans=None):
if spans:
# the traces encoding expect a list of traces so we
# put spans in a list like we do in the real execution path
# with both encoders
trace = [spans]
self.json_encoder.encode_traces(trace)
self.msgpack_encoder.encode_traces(trace)
self.spans += spans
self.traces += trace
def pop(self):
# type: () -> List[Span]
s = self.spans
self.spans = []
return s
def pop_traces(self):
# type: () -> List[List[Span]]
traces = self.traces
self.traces = []
return traces
class DummyTracer(Tracer):
"""
DummyTracer is a tracer which uses the DummyWriter by default
"""
def __init__(self):
super(DummyTracer, self).__init__()
self._update_writer()
def _update_writer(self):
# Track which writer the DummyWriter was created with, used
# some tests
if not isinstance(self.writer, DummyWriter):
self.original_writer = self.writer
if isinstance(self.writer, AgentWriter):
self.writer = DummyWriter(
agent_url=self.writer.agent_url,
priority_sampler=self.writer._priority_sampler,
dogstatsd=get_dogstatsd_client(self._dogstatsd_url),
)
else:
self.writer = DummyWriter(
priority_sampler=self.writer._priority_sampler,
)
def pop(self):
# type: () -> List[Span]
return self.writer.pop()
def pop_traces(self):
# type: () -> List[List[Span]]
return self.writer.pop_traces()
def configure(self, *args, **kwargs):
super(DummyTracer, self).configure(*args, **kwargs)
# `.configure()` may reset the writer
self._update_writer()
class TestSpan(Span):
"""
Test wrapper for a :class:`ddtrace.span.Span` that provides additional functions and assertions
Example::
span = tracer.trace('my.span')
span = TestSpan(span)
if span.matches(name='my.span'):
print('matches')
# Raises an AssertionError
span.assert_matches(name='not.my.span', meta={'system.pid': getpid()})
"""
def __init__(self, span):
"""
Constructor for TestSpan
:param span: The :class:`ddtrace.span.Span` to wrap
:type span: :class:`ddtrace.span.Span`
"""
if isinstance(span, TestSpan):
span = span._span
# DEV: Use `object.__setattr__` to by-pass this class's `__setattr__`
object.__setattr__(self, "_span", span)
def __getattr__(self, key):
"""
First look for property on the base :class:`ddtrace.span.Span` otherwise return this object's attribute
"""
if hasattr(self._span, key):
return getattr(self._span, key)
return self.__getattribute__(key)
def __setattr__(self, key, value):
"""Pass through all assignment to the base :class:`ddtrace.span.Span`"""
return setattr(self._span, key, value)
def __eq__(self, other):
"""
Custom equality code to ensure we are using the base :class:`ddtrace.span.Span.__eq__`
:param other: The object to check equality with
:type other: object
:returns: True if equal, False otherwise
:rtype: bool
"""
if isinstance(other, TestSpan):
return other._span == self._span
elif isinstance(other, Span):
return other == self._span
return other == self
def matches(self, **kwargs):
"""
Helper function to check if this span's properties matches the expected.
Example::
span = TestSpan(span)
span.matches(name='my.span', resource='GET /')
:param kwargs: Property/Value pairs to evaluate on this span
:type kwargs: dict
:returns: True if the arguments passed match, False otherwise
:rtype: bool
"""
for name, value in kwargs.items():
# Special case for `meta`
if name == "meta" and not self.meta_matches(value):
return False
# Ensure it has the property first
if not hasattr(self, name):
return False
# Ensure the values match
if getattr(self, name) != value:
return False
return True
def meta_matches(self, meta, exact=False):
"""
Helper function to check if this span's meta matches the expected
Example::
span = TestSpan(span)
span.meta_matches({'system.pid': getpid()})
:param meta: Property/Value pairs to evaluate on this span
:type meta: dict
:param exact: Whether to do an exact match on the meta values or not, default: False
:type exact: bool
:returns: True if the arguments passed match, False otherwise
:rtype: bool
"""
if exact:
return self.meta == meta
for key, value in meta.items():
if key not in self.meta:
return False
if self.meta[key] != value:
return False
return True
def assert_matches(self, **kwargs):
"""
Assertion method to ensure this span's properties match as expected
Example::
span = TestSpan(span)
span.assert_matches(name='my.span')
:param kwargs: Property/Value pairs to evaluate on this span
:type kwargs: dict
:raises: AssertionError
"""
for name, value in kwargs.items():
# Special case for `meta`
if name == "meta":
self.assert_meta(value)
elif name == "metrics":
self.assert_metrics(value)
else:
assert hasattr(self, name), "{0!r} does not have property {1!r}".format(self, name)
assert getattr(self, name) == value, "{0!r} property {1}: {2!r} != {3!r}".format(
self, name, getattr(self, name), value
)
def assert_meta(self, meta, exact=False):
"""
Assertion method to ensure this span's meta match as expected
Example::
span = TestSpan(span)
span.assert_meta({'system.pid': getpid()})
:param meta: Property/Value pairs to evaluate on this span
:type meta: dict
:param exact: Whether to do an exact match on the meta values or not, default: False
:type exact: bool
:raises: AssertionError
"""
if exact:
assert self.meta == meta
else:
for key, value in meta.items():
assert key in self.meta, "{0} meta does not have property {1!r}".format(self, key)
assert self.meta[key] == value, "{0} meta property {1!r}: {2!r} != {3!r}".format(
self, key, self.meta[key], value
)
def assert_metrics(self, metrics, exact=False):
"""
Assertion method to ensure this span's metrics match as expected
Example::
span = TestSpan(span)
span.assert_metrics({'_dd1.sr.eausr': 1})
:param metrics: Property/Value pairs to evaluate on this span
:type metrics: dict
:param exact: Whether to do an exact match on the metrics values or not, default: False
:type exact: bool
:raises: AssertionError
"""
if exact:
assert self.metrics == metrics
else:
for key, value in metrics.items():
assert key in self.metrics, "{0} metrics does not have property {1!r}".format(self, key)
assert self.metrics[key] == value, "{0} metrics property {1!r}: {2!r} != {3!r}".format(
self, key, self.metrics[key], value
)
class TracerSpanContainer(TestSpanContainer):
"""
A class to wrap a :class:`tests.utils.tracer.DummyTracer` with a
:class:`tests.utils.span.TestSpanContainer` to use in tests
"""
def __init__(self, tracer):
self.tracer = tracer
super(TracerSpanContainer, self).__init__()
def get_spans(self):
"""
Overridden method to return all spans attached to this tracer
:returns: List of spans attached to this tracer
:rtype: list
"""
return self.tracer.writer.spans
def pop(self):
return self.tracer.pop()
def pop_traces(self):
return self.tracer.pop_traces()
def reset(self):
"""Helper to reset the existing list of spans created"""
self.tracer.pop()
class TestSpanNode(TestSpan, TestSpanContainer):
"""
A :class:`tests.utils.span.TestSpan` which is used as part of a span tree.
Each :class:`tests.utils.span.TestSpanNode` represents the current :class:`ddtrace.span.Span`
along with any children who have that span as it's parent.
This class can be used to assert on the parent/child relationships between spans.
Example::
class TestCase(BaseTestCase):
def test_case(self):
# TODO: Create spans
self.assert_structure( ... )
tree = self.get_root_span()
# Find the first child of the root span with the matching name
request = tree.find_span(name='requests.request')
# Assert the parent/child relationship of this `request` span
request.assert_structure( ... )
"""
def __init__(self, root, children=None):
super(TestSpanNode, self).__init__(root)
object.__setattr__(self, "_children", children or [])
def get_spans(self):
"""required subclass property, returns this spans children"""
return self._children
def assert_structure(self, root, children=NO_CHILDREN):
"""
Assertion to assert on the structure of this node and it's children.
This assertion takes a dictionary of properties to assert for this node
along with a list of assertions to make for it's children.
Example::
def test_case(self):
# Assert the following structure
#
# One root_span, with two child_spans, one with a requests.request span
#
# | root_span |
# | child_span | | child_span |
# | requests.request |
self.assert_structure(
# Root span with two child_span spans
dict(name='root_span'),
(
# Child span with one child of it's own
(
dict(name='child_span'),
# One requests.request span with no children
(
dict(name='requests.request'),
),
),
# Child span with no children
dict(name='child_span'),
),
)
:param root: Properties to assert for this root span, these are passed to
:meth:`tests.utils.span.TestSpan.assert_matches`
:type root: dict
:param children: List of child assertions to make, if children is None then do not make any
assertions about this nodes children. Each list element must be a list with 2 items
the first is a ``dict`` of property assertions on that child, and the second is a ``list``
of child assertions to make.
:type children: list, None
:raises:
"""
self.assert_matches(**root)
# Give them a way to ignore asserting on children
if children is None:
return
elif children is NO_CHILDREN:
children = ()
spans = self.spans
self.assert_span_count(len(children))
for i, child in enumerate(children):
if not isinstance(child, (list, tuple)):
child = (child, NO_CHILDREN)
root, _children = child
spans[i].assert_matches(parent_id=self.span_id, trace_id=self.trace_id, _parent=self)
spans[i].assert_structure(root, _children)
def pprint(self):
parts = [super(TestSpanNode, self).pprint()]
for child in self._children:
parts.append("-" * 20)
parts.append(child.pprint())
return "\r\n".join(parts)
def assert_dict_issuperset(a, b):
assert set(a.items()).issuperset(set(b.items())), "{a} is not a superset of {b}".format(a=a, b=b)
@contextmanager
def override_global_tracer(tracer):
"""Helper functions that overrides the global tracer available in the
`ddtrace` package. This is required because in some `httplib` tests we
can't get easily the PIN object attached to the `HTTPConnection` to
replace the used tracer with a dummy tracer.
"""
original_tracer = ddtrace.tracer
ddtrace.tracer = tracer
yield
ddtrace.tracer = original_tracer
class SnapshotFailed(Exception):
pass
def snapshot(ignores=None, include_tracer=False, variants=None, async_mode=True):
"""Performs a snapshot integration test with the testing agent.
All traces sent to the agent will be recorded and compared to a snapshot
created for the test case.
:param ignores: A list of keys to ignore when comparing snapshots. To refer
to keys in the meta or metrics maps use "meta.key" and
"metrics.key"
:param tracer: A tracer providing the agent connection information to use.
"""
ignores = ignores or []
if include_tracer:
tracer = Tracer()
else:
tracer = ddtrace.tracer
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
if len(args) > 1:
self = args[0]
clsname = self.__class__.__name__
else:
clsname = ""
module = inspect.getmodule(wrapped)
# Use the fully qualified function name as a unique test token to
# identify the snapshot.
token = "{}{}{}.{}".format(module.__name__, "." if clsname else "", clsname, wrapped.__name__)
# Use variant that applies to update test token. One must apply. If none
# apply, the test should have been marked as skipped.
if variants:
applicable_variant_ids = [k for (k, v) in variants.items() if v]
assert len(applicable_variant_ids) == 1
variant_id = applicable_variant_ids[0]
token = "{}_{}".format(token, variant_id) if variant_id else token
parsed = parse.urlparse(tracer.writer.agent_url)
conn = httplib.HTTPConnection(parsed.hostname, parsed.port)
try:
# clear queue in case traces have been generated before test case is
# itself run
try:
tracer.writer.flush_queue()
except Exception as e:
pytest.fail("Could not flush the queue before test case: %s" % str(e), pytrace=True)
if async_mode:
# Patch the tracer writer to include the test token header for all requests.
tracer.writer._headers["X-Datadog-Test-Token"] = token
else:
# Signal the start of this test case to the test agent.
try:
conn.request("GET", "/test/start?token=%s" % token)
except Exception as e:
pytest.fail("Could not connect to test agent: %s" % str(e), pytrace=False)
else:
r = conn.getresponse()
if r.status != 200:
# The test agent returns nice error messages we can forward to the user.
raise SnapshotFailed(r.read())
# Run the test.
try:
if include_tracer:
kwargs["tracer"] = tracer
ret = wrapped(*args, **kwargs)
# Force a flush so all traces are submitted.
tracer.writer.flush_queue()
finally:
if async_mode:
del tracer.writer._headers["X-Datadog-Test-Token"]
# Query for the results of the test.
conn = httplib.HTTPConnection(parsed.hostname, parsed.port)
conn.request("GET", "/test/snapshot?ignores=%s&token=%s" % (",".join(ignores), token))
r = conn.getresponse()
if r.status != 200:
raise SnapshotFailed(r.read())
return ret
except SnapshotFailed as e:
# Fail the test if a failure has occurred and print out the
# message we got from the test agent.
pytest.fail(to_unicode(e.args[0]), pytrace=False)
except Exception as e:
# Even though it's unlikely any traces have been sent, make the
# final request to the test agent so that the test case is finished.
conn = httplib.HTTPConnection(parsed.hostname, parsed.port)
conn.request("GET", "/test/snapshot?ignores=%s&token=%s" % (",".join(ignores), token))
conn.getresponse()
pytest.fail("Unexpected test failure during snapshot test: %s" % str(e), pytrace=True)
finally:
conn.close()
return wrapper
class AnyStr(object):
def __eq__(self, other):
return isinstance(other, str)
class AnyInt(object):
def __eq__(self, other):
return isinstance(other, int)
class AnyFloat(object):
def __eq__(self, other):
return isinstance(other, float)
| 33.176026 | 111 | 0.603073 | import contextlib
from contextlib import contextmanager
import inspect
import os
import sys
from typing import List
import pytest
import ddtrace
from ddtrace import Span
from ddtrace import Tracer
from ddtrace.compat import httplib
from ddtrace.compat import parse
from ddtrace.compat import to_unicode
from ddtrace.constants import SPAN_MEASURED_KEY
from ddtrace.encoding import JSONEncoder
from ddtrace.ext import http
from ddtrace.internal._encoding import MsgpackEncoder
from ddtrace.internal.dogstatsd import get_dogstatsd_client
from ddtrace.internal.writer import AgentWriter
from ddtrace.vendor import wrapt
from tests.subprocesstest import SubprocessTestCase
NO_CHILDREN = object()
def assert_is_measured(span):
assert SPAN_MEASURED_KEY in span.metrics
assert SPAN_MEASURED_KEY not in span.meta
assert span.get_metric(SPAN_MEASURED_KEY) == 1
def assert_is_not_measured(span):
assert SPAN_MEASURED_KEY not in span.meta
if SPAN_MEASURED_KEY in span.metrics:
assert span.get_metric(SPAN_MEASURED_KEY) == 0
else:
assert SPAN_MEASURED_KEY not in span.metrics
def assert_span_http_status_code(span, code):
tag = span.get_tag(http.STATUS_CODE)
code = str(code)
assert tag == code, "%r != %r" % (tag, code)
@contextlib.contextmanager
def override_env(env):
original = dict(os.environ)
os.environ.update(env)
try:
yield
finally:
os.environ.clear()
os.environ.update(original)
@contextlib.contextmanager
def override_global_config(values):
global_config_keys = [
"analytics_enabled",
"report_hostname",
"health_metrics_enabled",
"env",
"version",
"service",
]
originals = dict((key, getattr(ddtrace.config, key)) for key in global_config_keys)
for key, value in values.items():
if key in global_config_keys:
setattr(ddtrace.config, key, value)
try:
yield
finally:
for key, value in originals.items():
setattr(ddtrace.config, key, value)
@contextlib.contextmanager
def override_config(integration, values):
options = getattr(ddtrace.config, integration)
original = dict((key, options.get(key)) for key in values.keys())
options.update(values)
try:
yield
finally:
options.update(original)
@contextlib.contextmanager
def override_http_config(integration, values):
options = getattr(ddtrace.config, integration).http
original = {}
for key, value in values.items():
original[key] = getattr(options, key)
setattr(options, key, value)
try:
yield
finally:
for key, value in original.items():
setattr(options, key, value)
@contextlib.contextmanager
def override_sys_modules(modules):
original = dict(sys.modules)
sys.modules.update(modules)
try:
yield
finally:
sys.modules.clear()
sys.modules.update(original)
class BaseTestCase(SubprocessTestCase):
override_env = staticmethod(override_env)
override_global_config = staticmethod(override_global_config)
override_config = staticmethod(override_config)
override_http_config = staticmethod(override_http_config)
override_sys_modules = staticmethod(override_sys_modules)
assert_is_measured = staticmethod(assert_is_measured)
assert_is_not_measured = staticmethod(assert_is_not_measured)
class TestSpanContainer(object):
def _ensure_test_spans(self, spans):
return [span if isinstance(span, TestSpan) else TestSpan(span) for span in spans]
@property
def spans(self):
return self._ensure_test_spans(self.get_spans())
def get_spans(self):
raise NotImplementedError
def _build_tree(self, root):
children = []
for span in self.spans:
if span.parent_id == root.span_id:
children.append(self._build_tree(span))
return TestSpanNode(root, children)
def get_root_span(self):
root = None
for span in self.spans:
if span.parent_id is None:
if root is not None:
raise AssertionError("Multiple root spans found {0!r} {1!r}".format(root, span))
root = span
assert root, "No root span found in {0!r}".format(self.spans)
return self._build_tree(root)
def get_root_spans(self):
roots = []
for span in self.spans:
if span.parent_id is None:
roots.append(self._build_tree(span))
return sorted(roots, key=lambda s: s.start)
def assert_trace_count(self, count):
trace_count = len(self.get_root_spans())
assert trace_count == count, "Trace count {0} != {1}".format(trace_count, count)
def assert_span_count(self, count):
assert len(self.spans) == count, "Span count {0} != {1}".format(len(self.spans), count)
def assert_has_spans(self):
assert len(self.spans), "No spans found"
def assert_has_no_spans(self):
assert len(self.spans) == 0, "Span count {0}".format(len(self.spans))
def filter_spans(self, *args, **kwargs):
for span in self.spans:
if not isinstance(span, TestSpan):
span = TestSpan(span)
if span.matches(*args, **kwargs):
yield span
def find_span(self, *args, **kwargs):
span = next(self.filter_spans(*args, **kwargs), None)
assert span is not None, "No span found for filter {0!r} {1!r}, have {2} spans".format(
args, kwargs, len(self.spans)
)
return span
class TracerTestCase(TestSpanContainer, BaseTestCase):
def setUp(self):
self.tracer = DummyTracer()
super(TracerTestCase, self).setUp()
def tearDown(self):
super(TracerTestCase, self).tearDown()
self.reset()
delattr(self, "tracer")
def get_spans(self):
return self.tracer.writer.spans
def pop_spans(self):
return self.tracer.pop()
def pop_traces(self):
return self.tracer.pop_traces()
def reset(self):
self.tracer.writer.pop()
def trace(self, *args, **kwargs):
return TestSpan(self.tracer.trace(*args, **kwargs))
def start_span(self, *args, **kwargs):
return TestSpan(self.tracer.start_span(*args, **kwargs))
def assert_structure(self, root, children=NO_CHILDREN):
root_span = self.get_root_span()
root_span.assert_structure(root, children)
@contextlib.contextmanager
def override_global_tracer(self, tracer=None):
original = ddtrace.tracer
tracer = tracer or self.tracer
setattr(ddtrace, "tracer", tracer)
try:
yield
finally:
setattr(ddtrace, "tracer", original)
class DummyWriter(AgentWriter):
def __init__(self, *args, **kwargs):
super(DummyWriter, self).__init__(*args, **kwargs)
self.spans = []
self.traces = []
self.json_encoder = JSONEncoder()
self.msgpack_encoder = MsgpackEncoder()
def write(self, spans=None):
if spans:
trace = [spans]
self.json_encoder.encode_traces(trace)
self.msgpack_encoder.encode_traces(trace)
self.spans += spans
self.traces += trace
def pop(self):
s = self.spans
self.spans = []
return s
def pop_traces(self):
traces = self.traces
self.traces = []
return traces
class DummyTracer(Tracer):
def __init__(self):
super(DummyTracer, self).__init__()
self._update_writer()
def _update_writer(self):
if not isinstance(self.writer, DummyWriter):
self.original_writer = self.writer
if isinstance(self.writer, AgentWriter):
self.writer = DummyWriter(
agent_url=self.writer.agent_url,
priority_sampler=self.writer._priority_sampler,
dogstatsd=get_dogstatsd_client(self._dogstatsd_url),
)
else:
self.writer = DummyWriter(
priority_sampler=self.writer._priority_sampler,
)
def pop(self):
return self.writer.pop()
def pop_traces(self):
return self.writer.pop_traces()
def configure(self, *args, **kwargs):
super(DummyTracer, self).configure(*args, **kwargs)
self._update_writer()
class TestSpan(Span):
def __init__(self, span):
if isinstance(span, TestSpan):
span = span._span
object.__setattr__(self, "_span", span)
def __getattr__(self, key):
if hasattr(self._span, key):
return getattr(self._span, key)
return self.__getattribute__(key)
def __setattr__(self, key, value):
return setattr(self._span, key, value)
def __eq__(self, other):
if isinstance(other, TestSpan):
return other._span == self._span
elif isinstance(other, Span):
return other == self._span
return other == self
def matches(self, **kwargs):
for name, value in kwargs.items():
# Special case for `meta`
if name == "meta" and not self.meta_matches(value):
return False
# Ensure it has the property first
if not hasattr(self, name):
return False
# Ensure the values match
if getattr(self, name) != value:
return False
return True
def meta_matches(self, meta, exact=False):
if exact:
return self.meta == meta
for key, value in meta.items():
if key not in self.meta:
return False
if self.meta[key] != value:
return False
return True
def assert_matches(self, **kwargs):
for name, value in kwargs.items():
# Special case for `meta`
if name == "meta":
self.assert_meta(value)
elif name == "metrics":
self.assert_metrics(value)
else:
assert hasattr(self, name), "{0!r} does not have property {1!r}".format(self, name)
assert getattr(self, name) == value, "{0!r} property {1}: {2!r} != {3!r}".format(
self, name, getattr(self, name), value
)
def assert_meta(self, meta, exact=False):
if exact:
assert self.meta == meta
else:
for key, value in meta.items():
assert key in self.meta, "{0} meta does not have property {1!r}".format(self, key)
assert self.meta[key] == value, "{0} meta property {1!r}: {2!r} != {3!r}".format(
self, key, self.meta[key], value
)
def assert_metrics(self, metrics, exact=False):
if exact:
assert self.metrics == metrics
else:
for key, value in metrics.items():
assert key in self.metrics, "{0} metrics does not have property {1!r}".format(self, key)
assert self.metrics[key] == value, "{0} metrics property {1!r}: {2!r} != {3!r}".format(
self, key, self.metrics[key], value
)
class TracerSpanContainer(TestSpanContainer):
def __init__(self, tracer):
self.tracer = tracer
super(TracerSpanContainer, self).__init__()
def get_spans(self):
return self.tracer.writer.spans
def pop(self):
return self.tracer.pop()
def pop_traces(self):
return self.tracer.pop_traces()
def reset(self):
self.tracer.pop()
class TestSpanNode(TestSpan, TestSpanContainer):
def __init__(self, root, children=None):
super(TestSpanNode, self).__init__(root)
object.__setattr__(self, "_children", children or [])
def get_spans(self):
return self._children
def assert_structure(self, root, children=NO_CHILDREN):
self.assert_matches(**root)
# Give them a way to ignore asserting on children
if children is None:
return
elif children is NO_CHILDREN:
children = ()
spans = self.spans
self.assert_span_count(len(children))
for i, child in enumerate(children):
if not isinstance(child, (list, tuple)):
child = (child, NO_CHILDREN)
root, _children = child
spans[i].assert_matches(parent_id=self.span_id, trace_id=self.trace_id, _parent=self)
spans[i].assert_structure(root, _children)
def pprint(self):
parts = [super(TestSpanNode, self).pprint()]
for child in self._children:
parts.append("-" * 20)
parts.append(child.pprint())
return "\r\n".join(parts)
def assert_dict_issuperset(a, b):
assert set(a.items()).issuperset(set(b.items())), "{a} is not a superset of {b}".format(a=a, b=b)
@contextmanager
def override_global_tracer(tracer):
original_tracer = ddtrace.tracer
ddtrace.tracer = tracer
yield
ddtrace.tracer = original_tracer
class SnapshotFailed(Exception):
pass
def snapshot(ignores=None, include_tracer=False, variants=None, async_mode=True):
ignores = ignores or []
if include_tracer:
tracer = Tracer()
else:
tracer = ddtrace.tracer
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
if len(args) > 1:
self = args[0]
clsname = self.__class__.__name__
else:
clsname = ""
module = inspect.getmodule(wrapped)
# Use the fully qualified function name as a unique test token to
# identify the snapshot.
token = "{}{}{}.{}".format(module.__name__, "." if clsname else "", clsname, wrapped.__name__)
# Use variant that applies to update test token. One must apply. If none
# apply, the test should have been marked as skipped.
if variants:
applicable_variant_ids = [k for (k, v) in variants.items() if v]
assert len(applicable_variant_ids) == 1
variant_id = applicable_variant_ids[0]
token = "{}_{}".format(token, variant_id) if variant_id else token
parsed = parse.urlparse(tracer.writer.agent_url)
conn = httplib.HTTPConnection(parsed.hostname, parsed.port)
try:
# clear queue in case traces have been generated before test case is
# itself run
try:
tracer.writer.flush_queue()
except Exception as e:
pytest.fail("Could not flush the queue before test case: %s" % str(e), pytrace=True)
if async_mode:
# Patch the tracer writer to include the test token header for all requests.
tracer.writer._headers["X-Datadog-Test-Token"] = token
else:
# Signal the start of this test case to the test agent.
try:
conn.request("GET", "/test/start?token=%s" % token)
except Exception as e:
pytest.fail("Could not connect to test agent: %s" % str(e), pytrace=False)
else:
r = conn.getresponse()
if r.status != 200:
# The test agent returns nice error messages we can forward to the user.
raise SnapshotFailed(r.read())
# Run the test.
try:
if include_tracer:
kwargs["tracer"] = tracer
ret = wrapped(*args, **kwargs)
# Force a flush so all traces are submitted.
tracer.writer.flush_queue()
finally:
if async_mode:
del tracer.writer._headers["X-Datadog-Test-Token"]
# Query for the results of the test.
conn = httplib.HTTPConnection(parsed.hostname, parsed.port)
conn.request("GET", "/test/snapshot?ignores=%s&token=%s" % (",".join(ignores), token))
r = conn.getresponse()
if r.status != 200:
raise SnapshotFailed(r.read())
return ret
except SnapshotFailed as e:
# Fail the test if a failure has occurred and print out the
# message we got from the test agent.
pytest.fail(to_unicode(e.args[0]), pytrace=False)
except Exception as e:
# Even though it's unlikely any traces have been sent, make the
conn = httplib.HTTPConnection(parsed.hostname, parsed.port)
conn.request("GET", "/test/snapshot?ignores=%s&token=%s" % (",".join(ignores), token))
conn.getresponse()
pytest.fail("Unexpected test failure during snapshot test: %s" % str(e), pytrace=True)
finally:
conn.close()
return wrapper
class AnyStr(object):
def __eq__(self, other):
return isinstance(other, str)
class AnyInt(object):
def __eq__(self, other):
return isinstance(other, int)
class AnyFloat(object):
def __eq__(self, other):
return isinstance(other, float)
| true | true |
f721d0c46749a5a74c06f1c993568537b6bb35d9 | 257 | py | Python | tests/falcon/app.py | neetjn/falcon-pagination-processor | 547f10b4577933af97bd692866a1776d9f582773 | [
"MIT"
] | 2 | 2019-05-17T09:40:46.000Z | 2020-02-25T03:16:10.000Z | tests/falcon/app.py | neetjn/falcon-pagination-processor | 547f10b4577933af97bd692866a1776d9f582773 | [
"MIT"
] | null | null | null | tests/falcon/app.py | neetjn/falcon-pagination-processor | 547f10b4577933af97bd692866a1776d9f582773 | [
"MIT"
] | null | null | null | import falcon
from falcon_pagination_processor import PaginationProcessor
from tests.falcon.resources import TestResourceCollection
api = falcon.API(middleware=[PaginationProcessor()])
api.add_route(TestResourceCollection.route, TestResourceCollection())
| 32.125 | 69 | 0.863813 | import falcon
from falcon_pagination_processor import PaginationProcessor
from tests.falcon.resources import TestResourceCollection
api = falcon.API(middleware=[PaginationProcessor()])
api.add_route(TestResourceCollection.route, TestResourceCollection())
| true | true |
f721d144b892e99706ae1d0f4f99bff00bf06970 | 926 | py | Python | ibllib/tests/extractors/test_ephys_passive.py | nbonacchi/ibllib | 9066c00a8e9a65a1d209144a2ac54d0b87bec0b3 | [
"MIT"
] | null | null | null | ibllib/tests/extractors/test_ephys_passive.py | nbonacchi/ibllib | 9066c00a8e9a65a1d209144a2ac54d0b87bec0b3 | [
"MIT"
] | null | null | null | ibllib/tests/extractors/test_ephys_passive.py | nbonacchi/ibllib | 9066c00a8e9a65a1d209144a2ac54d0b87bec0b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Friday, October 30th 2020, 10:42:49 am
import unittest
import ibllib.io.extractors.ephys_passive as passive
import numpy as np
class TestsPassiveExtractor(unittest.TestCase):
def setUp(self):
pass
def test_load_passive_stim_meta(self):
meta = passive._load_passive_stim_meta()
self.assertTrue(isinstance(meta, dict))
def test_interpolate_rf_mapping_stimulus(self):
idxs_up = np.array([0, 4, 8])
idxs_dn = np.array([1, 5, 9])
times = np.array([0, 1, 4, 5, 8, 9])
Xq = np.arange(15)
t_bin = 1 # Use 1 so can compare directly Xq and Tq
Tq = passive._interpolate_rf_mapping_stimulus(
idxs_up=idxs_up, idxs_dn=idxs_dn, times=times, Xq=Xq, t_bin=t_bin
)
self.assertTrue(np.array_equal(Tq, Xq))
def tearDown(self):
pass
| 28.9375 | 77 | 0.646868 |
import unittest
import ibllib.io.extractors.ephys_passive as passive
import numpy as np
class TestsPassiveExtractor(unittest.TestCase):
def setUp(self):
pass
def test_load_passive_stim_meta(self):
meta = passive._load_passive_stim_meta()
self.assertTrue(isinstance(meta, dict))
def test_interpolate_rf_mapping_stimulus(self):
idxs_up = np.array([0, 4, 8])
idxs_dn = np.array([1, 5, 9])
times = np.array([0, 1, 4, 5, 8, 9])
Xq = np.arange(15)
t_bin = 1
Tq = passive._interpolate_rf_mapping_stimulus(
idxs_up=idxs_up, idxs_dn=idxs_dn, times=times, Xq=Xq, t_bin=t_bin
)
self.assertTrue(np.array_equal(Tq, Xq))
def tearDown(self):
pass
| true | true |
f721d14b099b4093262cf01e174d72143b14627d | 41,291 | py | Python | openff/evaluator/datasets/curation/components/filtering.py | lilyminium/openff-evaluator | 21da54363009d83110b54d57e4416ae31df3868b | [
"MIT"
] | null | null | null | openff/evaluator/datasets/curation/components/filtering.py | lilyminium/openff-evaluator | 21da54363009d83110b54d57e4416ae31df3868b | [
"MIT"
] | null | null | null | openff/evaluator/datasets/curation/components/filtering.py | lilyminium/openff-evaluator | 21da54363009d83110b54d57e4416ae31df3868b | [
"MIT"
] | null | null | null | import functools
import itertools
import logging
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import numpy
import pandas
from pydantic import Field, root_validator, validator
from scipy.optimize import linear_sum_assignment
from typing_extensions import Literal
from openff.evaluator.datasets.curation.components import (
CurationComponent,
CurationComponentSchema,
)
from openff.evaluator.datasets.utilities import (
data_frame_to_substances,
reorder_data_frame,
)
from openff.evaluator.utils.checkmol import (
ChemicalEnvironment,
analyse_functional_groups,
)
if TYPE_CHECKING:
conint = int
confloat = float
PositiveInt = int
PositiveFloat = float
else:
from pydantic import PositiveFloat, PositiveInt, confloat, conint, constr
logger = logging.getLogger(__name__)
ComponentEnvironments = List[List[ChemicalEnvironment]]
MoleFractionRange = Tuple[confloat(ge=0.0, le=1.0), confloat(ge=0.0, le=1.0)]
class FilterDuplicatesSchema(CurationComponentSchema):
type: Literal["FilterDuplicates"] = "FilterDuplicates"
temperature_precision: conint(ge=0) = Field(
2,
description="The number of decimal places to compare temperatures (K) to "
"within.",
)
pressure_precision: conint(ge=0) = Field(
3,
description="The number of decimal places to compare pressures (kPa) to "
"within.",
)
mole_fraction_precision: conint(ge=0) = Field(
6,
description="The number of decimal places to compare mole fractions to within.",
)
class FilterDuplicates(CurationComponent):
"""A component to remove duplicate data points (within a specified precision)
from a data set.
"""
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterDuplicatesSchema, n_processes
) -> pandas.DataFrame:
if len(data_frame) == 0:
return data_frame
data_frame = data_frame.copy()
data_frame = reorder_data_frame(data_frame)
minimum_n_components = data_frame["N Components"].min()
maximum_n_components = data_frame["N Components"].max()
filtered_data = []
for n_components in range(minimum_n_components, maximum_n_components + 1):
component_data = data_frame[
data_frame["N Components"] == n_components
].copy()
component_data["Temperature (K)"] = component_data["Temperature (K)"].round(
schema.temperature_precision
)
component_data["Pressure (kPa)"] = component_data["Pressure (kPa)"].round(
schema.pressure_precision
)
subset_columns = ["Temperature (K)", "Pressure (kPa)", "Phase"]
for index in range(n_components):
component_data[f"Mole Fraction {index + 1}"] = component_data[
f"Mole Fraction {index + 1}"
].round(schema.mole_fraction_precision)
subset_columns.extend(
[
f"Component {index + 1}",
f"Role {index + 1}",
f"Mole Fraction {index + 1}",
f"Exact Amount {index + 1}",
]
)
subset_columns = [x for x in subset_columns if x in component_data]
value_headers = [x for x in component_data if x.find(" Value ") >= 0]
sorted_filtered_data = []
for value_header in value_headers:
uncertainty_header = value_header.replace("Value", "Uncertainty")
property_data = component_data[component_data[value_header].notna()]
if uncertainty_header in component_data:
property_data = property_data.sort_values(uncertainty_header)
property_data = property_data.drop_duplicates(
subset=subset_columns, keep="last"
)
sorted_filtered_data.append(property_data)
sorted_filtered_data = pandas.concat(
sorted_filtered_data, ignore_index=True, sort=False
)
filtered_data.append(sorted_filtered_data)
filtered_data = pandas.concat(filtered_data, ignore_index=True, sort=False)
return filtered_data
class FilterByTemperatureSchema(CurationComponentSchema):
type: Literal["FilterByTemperature"] = "FilterByTemperature"
minimum_temperature: Optional[PositiveFloat] = Field(
...,
description="Retain data points measured for temperatures above this value (K)",
)
maximum_temperature: Optional[PositiveFloat] = Field(
...,
description="Retain data points measured for temperatures below this value (K)",
)
@root_validator
def _min_max(cls, values):
minimum_temperature = values.get("minimum_temperature")
maximum_temperature = values.get("maximum_temperature")
if minimum_temperature is not None and maximum_temperature is not None:
assert maximum_temperature > minimum_temperature
return values
class FilterByTemperature(CurationComponent):
"""A component which will filter out data points which were measured outside of a
specified temperature range
"""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByTemperatureSchema,
n_processes,
) -> pandas.DataFrame:
filtered_frame = data_frame
if schema.minimum_temperature is not None:
filtered_frame = filtered_frame[
schema.minimum_temperature < filtered_frame["Temperature (K)"]
]
if schema.maximum_temperature is not None:
filtered_frame = filtered_frame[
filtered_frame["Temperature (K)"] < schema.maximum_temperature
]
return filtered_frame
class FilterByPressureSchema(CurationComponentSchema):
type: Literal["FilterByPressure"] = "FilterByPressure"
minimum_pressure: Optional[PositiveFloat] = Field(
...,
description="Retain data points measured for pressures above this value (kPa)",
)
maximum_pressure: Optional[PositiveFloat] = Field(
...,
description="Retain data points measured for pressures below this value (kPa)",
)
@root_validator
def _min_max(cls, values):
minimum_pressure = values.get("minimum_pressure")
maximum_pressure = values.get("maximum_pressure")
if minimum_pressure is not None and maximum_pressure is not None:
assert maximum_pressure > minimum_pressure
return values
class FilterByPressure(CurationComponent):
"""A component which will filter out data points which were measured outside of a
specified pressure range.
"""
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterByPressureSchema, n_processes
) -> pandas.DataFrame:
filtered_frame = data_frame
if schema.minimum_pressure is not None:
filtered_frame = filtered_frame[
schema.minimum_pressure < filtered_frame["Pressure (kPa)"]
]
if schema.maximum_pressure is not None:
filtered_frame = filtered_frame[
filtered_frame["Pressure (kPa)"] < schema.maximum_pressure
]
return filtered_frame
class FilterByMoleFractionSchema(CurationComponentSchema):
type: Literal["FilterByMoleFraction"] = "FilterByMoleFraction"
mole_fraction_ranges: Dict[conint(gt=1), List[List[MoleFractionRange]]] = Field(
...,
description="The ranges of mole fractions to retain. Each key in the "
"dictionary corresponds to a number of components in the system. Each value "
"is a list of the allowed mole fraction ranges for all but one of the "
"components, i.e for a binary system, the allowed mole fraction for only the "
"first component must be specified.",
)
@validator("mole_fraction_ranges")
def _validate_ranges(cls, value: Dict[int, List[List[MoleFractionRange]]]):
for n_components, ranges in value.items():
assert len(ranges) == n_components - 1
assert all(
mole_fraction_range[0] < mole_fraction_range[1]
for component_ranges in ranges
for mole_fraction_range in component_ranges
)
return value
class FilterByMoleFraction(CurationComponent):
"""A component which will filter out data points which were measured outside of a
specified mole fraction range.
"""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByMoleFractionSchema,
n_processes,
) -> pandas.DataFrame:
filtered_frame = data_frame
full_query = ~filtered_frame["N Components"].isin(schema.mole_fraction_ranges)
for n_components, ranges in schema.mole_fraction_ranges.items():
# Build the query to apply
n_component_query = filtered_frame["N Components"] == n_components
for index, component_ranges in enumerate(ranges):
component_query = None
for mole_fraction_range in component_ranges:
fraction_query = (
filtered_frame[f"Mole Fraction {index + 1}"]
> mole_fraction_range[0]
) & (
filtered_frame[f"Mole Fraction {index + 1}"]
< mole_fraction_range[1]
)
if component_query is None:
component_query = fraction_query
else:
component_query |= fraction_query
n_component_query &= component_query
full_query |= n_component_query
filtered_frame = filtered_frame[full_query]
return filtered_frame
class FilterByRacemicSchema(CurationComponentSchema):
type: Literal["FilterByRacemic"] = "FilterByRacemic"
class FilterByRacemic(CurationComponent):
"""A component which will filter out data points which were measured for racemic
mixtures.
"""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByMoleFractionSchema,
n_processes,
) -> pandas.DataFrame:
# Begin building the query. All pure substances should be
# retained by default.
query = data_frame["N Components"] < 2
for n_components in range(2, data_frame["N Components"].max() + 1):
component_data = data_frame[data_frame["N Components"] == n_components]
if len(component_data) == 0:
continue
component_combinations = itertools.combinations(range(n_components), 2)
is_racemic = None
for index_0, index_1 in component_combinations:
components_racemic = component_data[
f"Component {index_0 + 1}"
].str.replace("@", "") == component_data[
f"Component {index_1 + 1}"
].str.replace(
"@", ""
)
is_racemic = (
components_racemic
if is_racemic is None
else (is_racemic | components_racemic)
)
not_racemic = ~is_racemic
query |= not_racemic
filtered_frame = data_frame[query]
return filtered_frame
class FilterByElementsSchema(CurationComponentSchema):
type: Literal["FilterByElements"] = "FilterByElements"
allowed_elements: Optional[List[constr(min_length=1)]] = Field(
None,
description="The only elements which must be present in the measured system "
"for the data point to be retained. This option is mutually exclusive with "
"`forbidden_elements`",
)
forbidden_elements: Optional[List[constr(min_length=1)]] = Field(
None,
description="The elements which must not be present in the measured system for "
"the data point to be retained. This option is mutually exclusive with "
"`allowed_elements`",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
allowed_elements = values.get("allowed_elements")
forbidden_elements = values.get("forbidden_elements")
assert allowed_elements is not None or forbidden_elements is not None
assert allowed_elements is None or forbidden_elements is None
return values
class FilterByElements(CurationComponent):
"""A component which will filter out data points which were measured for systems
which contain specific elements."""
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterByElementsSchema, n_processes
) -> pandas.DataFrame:
from openff.toolkit.topology import Molecule
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
if schema.allowed_elements is not None and not all(
[
x.element.symbol in schema.allowed_elements
for x in molecule.atoms
]
):
return False
if schema.forbidden_elements is not None and any(
[
x.element.symbol in schema.forbidden_elements
for x in molecule.atoms
]
):
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByPropertyTypesSchema(CurationComponentSchema):
type: Literal["FilterByPropertyTypes"] = "FilterByPropertyTypes"
property_types: List[constr(min_length=1)] = Field(
...,
description="The types of property to retain.",
)
n_components: Dict[constr(min_length=1), List[PositiveInt]] = Field(
default_factory=dict,
description="Optionally specify the number of components that a property "
"should have been measured for (e.g. pure, binary) in order for that data "
"point to be retained.",
)
strict: bool = Field(
False,
description="If true, only substances (defined without consideration for their "
"mole fractions or exact amount) which have data available for all of the "
"specified property types will be retained. Note that the data points aren't "
"required to have been measured at the same state.",
)
@root_validator
def _validate_n_components(cls, values):
property_types = values.get("property_types")
n_components = values.get("n_components")
assert all(x in property_types for x in n_components)
return values
class FilterByPropertyTypes(CurationComponent):
"""A component which will apply a filter which only retains properties of specified
types."""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByPropertyTypesSchema,
n_processes,
) -> pandas.DataFrame:
property_headers = [
header for header in data_frame if header.find(" Value ") >= 0
]
# Removes the columns for properties which are not of interest.
for header in property_headers:
property_type = header.split(" ")[0]
if property_type in schema.property_types:
continue
data_frame = data_frame.drop(header, axis=1)
uncertainty_header = header.replace(" Value ", " Uncertainty ")
if uncertainty_header in data_frame:
data_frame = data_frame.drop(uncertainty_header, axis=1)
# Drop any rows which do not contain any values for the property types of
# interest.
property_headers = [
header
for header in property_headers
if header.split(" ")[0] in schema.property_types
]
data_frame = data_frame.dropna(subset=property_headers, how="all")
# Apply a more specific filter which only retain which contain values
# for the specific property types, and which were measured for the
# specified number of components.
for property_type, n_components in schema.n_components.items():
property_header = next(
iter(x for x in property_headers if x.find(f"{property_type} ") == 0),
None,
)
if property_header is None:
continue
data_frame = data_frame[
data_frame[property_header].isna()
| data_frame["N Components"].isin(n_components)
]
# Apply the strict filter if requested
if schema.strict:
reordered_data_frame = reorder_data_frame(data_frame)
# Build a dictionary of which properties should be present partitioned
# by the number of components they should have been be measured for.
property_types = defaultdict(list)
if len(schema.n_components) > 0:
for property_type, n_components in schema.n_components.items():
for n_component in n_components:
property_types[n_component].append(property_type)
min_n_components = min(property_types)
max_n_components = max(property_types)
else:
min_n_components = reordered_data_frame["N Components"].min()
max_n_components = reordered_data_frame["N Components"].max()
for n_components in range(min_n_components, max_n_components + 1):
property_types[n_components].extend(schema.property_types)
substances_with_data = set()
components_with_data = {}
# For each N component find substances which have data points for
# all of the specified property types.
for n_components in range(min_n_components, max_n_components + 1):
component_data = reordered_data_frame[
reordered_data_frame["N Components"] == n_components
]
if n_components not in property_types or len(component_data) == 0:
continue
n_component_headers = [
header
for header in property_headers
if header.split(" ")[0] in property_types[n_components]
and header in component_data
]
if len(n_component_headers) != len(property_types[n_components]):
continue
n_component_substances = set.intersection(
*[
data_frame_to_substances(
component_data[component_data[header].notna()]
)
for header in n_component_headers
]
)
substances_with_data.update(n_component_substances)
components_with_data[n_components] = {
component
for substance in n_component_substances
for component in substance
}
if len(schema.n_components) > 0:
components_with_all_data = set.intersection(
*components_with_data.values()
)
# Filter out any smiles for don't appear in all of the N component
# substances.
data_frame = FilterBySmiles.apply(
data_frame,
FilterBySmilesSchema(smiles_to_include=[*components_with_all_data]),
)
# Filter out any substances which (within each N component) don't have
# all of the specified data types.
data_frame = FilterBySubstances.apply(
data_frame,
FilterBySubstancesSchema(substances_to_include=[*substances_with_data]),
)
data_frame = data_frame.dropna(axis=1, how="all")
return data_frame
class FilterByStereochemistrySchema(CurationComponentSchema):
type: Literal["FilterByStereochemistry"] = "FilterByStereochemistry"
class FilterByStereochemistry(CurationComponent):
"""A component which filters out data points measured for systems whereby the
stereochemistry of a number of components is undefined."""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByStereochemistrySchema,
n_processes,
) -> pandas.DataFrame:
from openff.toolkit.topology import Molecule
from openff.toolkit.utils import UndefinedStereochemistryError
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
try:
Molecule.from_smiles(smiles)
except UndefinedStereochemistryError:
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByChargedSchema(CurationComponentSchema):
type: Literal["FilterByCharged"] = "FilterByCharged"
class FilterByCharged(CurationComponent):
"""A component which filters out data points measured for substances where any of
the constituent components have a net non-zero charge.
"""
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterByChargedSchema, n_processes
) -> pandas.DataFrame:
from openff.toolkit.topology import Molecule
from simtk import unit as simtk_unit
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
# noinspection PyUnresolvedReferences
atom_charges = [
atom.formal_charge
if isinstance(atom.formal_charge, int)
else atom.formal_charge.value_in_unit(simtk_unit.elementary_charge)
for atom in molecule.atoms
]
if numpy.isclose(sum(atom_charges), 0.0):
continue
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByIonicLiquidSchema(CurationComponentSchema):
type: Literal["FilterByIonicLiquid"] = "FilterByIonicLiquid"
class FilterByIonicLiquid(CurationComponent):
"""A component which filters out data points measured for substances which
contain or are classed as an ionic liquids.
"""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByIonicLiquidSchema,
n_processes,
) -> pandas.DataFrame:
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
if "." in smiles:
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterBySmilesSchema(CurationComponentSchema):
type: Literal["FilterBySmiles"] = "FilterBySmiles"
smiles_to_include: Optional[List[str]] = Field(
None,
description="The smiles patterns to retain. This option is mutually "
"exclusive with `smiles_to_exclude`",
)
smiles_to_exclude: Optional[List[str]] = Field(
None,
description="The smiles patterns to exclude. This option is mutually "
"exclusive with `smiles_to_include`",
)
allow_partial_inclusion: bool = Field(
False,
description="If False, all the components in a substance must appear in "
"the `smiles_to_include` list, otherwise, only some must appear. "
"This option only applies when `smiles_to_include` is set.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
smiles_to_include = values.get("smiles_to_include")
smiles_to_exclude = values.get("smiles_to_exclude")
assert smiles_to_include is not None or smiles_to_exclude is not None
assert smiles_to_include is None or smiles_to_exclude is None
return values
class FilterBySmiles(CurationComponent):
"""A component which filters the data set so that it only contains either a
specific set of smiles, or does not contain any of a set of specifically excluded
smiles.
"""
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterBySmilesSchema, n_processes
) -> pandas.DataFrame:
smiles_to_include = schema.smiles_to_include
smiles_to_exclude = schema.smiles_to_exclude
if smiles_to_include is not None:
smiles_to_exclude = []
elif smiles_to_exclude is not None:
smiles_to_include = []
def filter_function(data_row):
n_components = data_row["N Components"]
component_smiles = [
data_row[f"Component {index + 1}"] for index in range(n_components)
]
if any(x in smiles_to_exclude for x in component_smiles):
return False
elif len(smiles_to_exclude) > 0:
return True
if not schema.allow_partial_inclusion and not all(
x in smiles_to_include for x in component_smiles
):
return False
if schema.allow_partial_inclusion and not any(
x in smiles_to_include for x in component_smiles
):
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterBySmirksSchema(CurationComponentSchema):
type: Literal["FilterBySmirks"] = "FilterBySmirks"
smirks_to_include: Optional[List[str]] = Field(
None,
description="The smirks patterns which must be matched by a substance in "
"order to retain a measurement. This option is mutually exclusive with "
"`smirks_to_exclude`",
)
smirks_to_exclude: Optional[List[str]] = Field(
None,
description="The smirks patterns which must not be matched by a substance in "
"order to retain a measurement. This option is mutually exclusive with "
"`smirks_to_include`",
)
allow_partial_inclusion: bool = Field(
False,
description="If False, all the components in a substance must match at least "
"one pattern in `smirks_to_include` in order to retain a measurement, "
"otherwise, only a least one component must match. This option only applies "
"when `smirks_to_include` is set.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
smirks_to_include = values.get("smirks_to_include")
smirks_to_exclude = values.get("smirks_to_exclude")
assert smirks_to_include is not None or smirks_to_exclude is not None
assert smirks_to_include is None or smirks_to_exclude is None
return values
class FilterBySmirks(CurationComponent):
"""A component which filters a data set so that it only contains measurements made
for molecules which contain (or don't) a set of chemical environments
represented by SMIRKS patterns.
"""
@staticmethod
@functools.lru_cache(1000)
def _find_smirks_matches(smiles_pattern, *smirks_patterns):
"""Determines which (if any) of the specified smirks match the specified
molecule.
Parameters
----------
smiles_pattern: str
The SMILES representation to try and match against.
smirks_patterns: str
The smirks patterns to try and match.
Returns
-------
list of str
The matched smirks patterns.
"""
from openff.toolkit.topology import Molecule
if len(smirks_patterns) == 0:
return []
molecule = Molecule.from_smiles(smiles_pattern, allow_undefined_stereo=True)
matches = [
smirks
for smirks in smirks_patterns
if len(molecule.chemical_environment_matches(smirks)) > 0
]
return matches
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterBySmirksSchema, n_processes
) -> pandas.DataFrame:
smirks_to_match = (
schema.smirks_to_include
if schema.smirks_to_include
else schema.smirks_to_exclude
)
def filter_function(data_row):
n_components = data_row["N Components"]
component_smiles = [
data_row[f"Component {index + 1}"] for index in range(n_components)
]
smirks_matches = {
smiles: cls._find_smirks_matches(smiles, *smirks_to_match)
for smiles in component_smiles
}
if schema.smirks_to_exclude is not None:
return not any(len(x) > 0 for x in smirks_matches.values())
if schema.allow_partial_inclusion:
return any(len(x) > 0 for x in smirks_matches.values())
return all(len(x) > 0 for x in smirks_matches.values())
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByNComponentsSchema(CurationComponentSchema):
type: Literal["FilterByNComponents"] = "FilterByNComponents"
n_components: List[PositiveInt] = Field(
...,
description="The number of components that measurements should have been "
"measured for in order to be retained.",
)
class FilterByNComponents(CurationComponent):
"""A component which filters out data points measured for systems with specified
number of components.
"""
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByNComponentsSchema,
n_processes,
) -> pandas.DataFrame:
return data_frame[data_frame["N Components"].isin(schema.n_components)]
class FilterBySubstancesSchema(CurationComponentSchema):
type: Literal["FilterBySubstances"] = "FilterBySubstances"
substances_to_include: Optional[List[Tuple[str, ...]]] = Field(
None,
description="The substances compositions to retain, where each tuple in the "
"list contains the smiles patterns which make up the substance to include. "
"This option is mutually exclusive with `substances_to_exclude`.",
)
substances_to_exclude: Optional[List[Tuple[str, ...]]] = Field(
None,
description="The substances compositions to retain, where each tuple in the "
"list contains the smiles patterns which make up the substance to exclude. "
"This option is mutually exclusive with `substances_to_include`.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
substances_to_include = values.get("substances_to_include")
substances_to_exclude = values.get("substances_to_exclude")
assert substances_to_include is not None or substances_to_exclude is not None
assert substances_to_include is None or substances_to_exclude is None
return values
class FilterBySubstances(CurationComponent):
"""A component which filters the data set so that it only contains properties
measured for particular substances.
This method is similar to `filter_by_smiles`, however here we explicitly define
the full substances compositions, rather than individual smiles which should
either be included or excluded.
Examples
--------
To filter the data set to only include measurements for pure methanol, pure
benzene or an aqueous ethanol mix:
>>> schema = FilterBySubstancesSchema(
>>> substances_to_include=[
>>> ('CO',),
>>> ('C1=CC=CC=C1',),
>>> ('CCO', 'O')
>>> ]
>>> )
To filter out measurements made for an aqueous mix of benzene:
>>> schema = FilterBySubstancesSchema(
>>> substances_to_exclude=[('O', 'C1=CC=CC=C1')]
>>> )
"""
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterBySubstancesSchema, n_processes
) -> pandas.DataFrame:
def filter_function(data_row):
n_components = data_row["N Components"]
substances_to_include = schema.substances_to_include
substances_to_exclude = schema.substances_to_exclude
if substances_to_include is not None:
substances_to_include = [
tuple(sorted(x)) for x in substances_to_include
]
if substances_to_exclude is not None:
substances_to_exclude = [
tuple(sorted(x)) for x in substances_to_exclude
]
substance = tuple(
sorted(
[
data_row[f"Component {index + 1}"]
for index in range(n_components)
]
)
)
return (
substances_to_exclude is not None
and substance not in substances_to_exclude
) or (
substances_to_include is not None and substance in substances_to_include
)
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByEnvironmentsSchema(CurationComponentSchema):
type: Literal["FilterByEnvironments"] = "FilterByEnvironments"
per_component_environments: Optional[Dict[int, ComponentEnvironments]] = Field(
None,
description="The environments which should be present in the components of "
"the substance for which the measurements were made. Each dictionary "
"key corresponds to a number of components in the system, and each "
"value the environments which should be matched by those n components. "
"This option is mutually exclusive with `environments`.",
)
environments: Optional[List[ChemicalEnvironment]] = Field(
None,
description="The environments which should be present in the substances for "
"which measurements were made. This option is mutually exclusive with "
"`per_component_environments`.",
)
at_least_one_environment: bool = Field(
True,
description="If true, data points will only be retained if all of the "
"components in the measured system contain at least one of the specified "
"environments. This option is mutually exclusive with "
"`strictly_specified_environments`.",
)
strictly_specified_environments: bool = Field(
False,
description="If true, data points will only be retained if all of the "
"components in the measured system strictly contain only the specified "
"environments and no others. This option is mutually exclusive with "
"`at_least_one_environment`.",
)
@validator("per_component_environments")
def _validate_per_component_environments(cls, value):
if value is None:
return value
assert all(len(y) == x for x, y in value.items())
return value
@root_validator
def _validate_mutually_exclusive(cls, values):
at_least_one_environment = values.get("at_least_one_environment")
strictly_specified_environments = values.get("strictly_specified_environments")
assert (
at_least_one_environment is True or strictly_specified_environments is True
)
assert (
at_least_one_environment is False
or strictly_specified_environments is False
)
per_component_environments = values.get("per_component_environments")
environments = values.get("environments")
assert per_component_environments is not None or environments is not None
assert per_component_environments is None or environments is None
return values
class FilterByEnvironments(CurationComponent):
"""A component which filters a data set so that it only contains measurements made
for substances which contain specific chemical environments.
"""
@classmethod
def _find_environments_per_component(cls, data_row: pandas.Series):
n_components = data_row["N Components"]
component_smiles = [
data_row[f"Component {index + 1}"] for index in range(n_components)
]
component_moieties = [analyse_functional_groups(x) for x in component_smiles]
if any(x is None for x in component_moieties):
logger.info(
f"Checkmol was unable to parse the system with components="
f"{component_smiles} and so this data point was discarded."
)
return None
return component_moieties
@classmethod
def _is_match(cls, component_environments, environments_to_match, schema):
operator = all if schema.strictly_specified_environments else any
return operator(
environment in environments_to_match
for environment in component_environments
)
@classmethod
def _filter_by_environments(cls, data_row, schema: FilterByEnvironmentsSchema):
environments_per_component = cls._find_environments_per_component(data_row)
if environments_per_component is None:
return False
return all(
cls._is_match(component_environments, schema.environments, schema)
for component_environments in environments_per_component
)
@classmethod
def _filter_by_per_component(cls, data_row, schema: FilterByEnvironmentsSchema):
n_components = data_row["N Components"]
if (
schema.per_component_environments is not None
and n_components not in schema.per_component_environments
):
# No filter was specified for this number of components.
return True
environments_per_component = cls._find_environments_per_component(data_row)
if environments_per_component is None:
return False
match_matrix = numpy.zeros((n_components, n_components))
for component_index, component_environments in enumerate(
environments_per_component
):
# noinspection PyUnresolvedReferences
for environments_index, environments_to_match in enumerate(
schema.per_component_environments[n_components]
):
match_matrix[component_index, environments_index] = cls._is_match(
component_environments, environments_to_match, schema
)
x_indices, y_indices = linear_sum_assignment(match_matrix, maximize=True)
return numpy.all(match_matrix[x_indices, y_indices] > 0)
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByEnvironmentsSchema,
n_processes,
) -> pandas.DataFrame:
if schema.environments is not None:
filter_function = functools.partial(
cls._filter_by_environments, schema=schema
)
else:
filter_function = functools.partial(
cls._filter_by_per_component, schema=schema
)
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
FilterComponentSchema = Union[
FilterDuplicatesSchema,
FilterByTemperatureSchema,
FilterByPressureSchema,
FilterByMoleFractionSchema,
FilterByRacemicSchema,
FilterByElementsSchema,
FilterByPropertyTypesSchema,
FilterByStereochemistrySchema,
FilterByChargedSchema,
FilterByIonicLiquidSchema,
FilterBySmilesSchema,
FilterBySmirksSchema,
FilterByNComponentsSchema,
FilterBySubstancesSchema,
FilterByEnvironmentsSchema,
]
| 32.901195 | 88 | 0.636071 | import functools
import itertools
import logging
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import numpy
import pandas
from pydantic import Field, root_validator, validator
from scipy.optimize import linear_sum_assignment
from typing_extensions import Literal
from openff.evaluator.datasets.curation.components import (
CurationComponent,
CurationComponentSchema,
)
from openff.evaluator.datasets.utilities import (
data_frame_to_substances,
reorder_data_frame,
)
from openff.evaluator.utils.checkmol import (
ChemicalEnvironment,
analyse_functional_groups,
)
if TYPE_CHECKING:
conint = int
confloat = float
PositiveInt = int
PositiveFloat = float
else:
from pydantic import PositiveFloat, PositiveInt, confloat, conint, constr
logger = logging.getLogger(__name__)
ComponentEnvironments = List[List[ChemicalEnvironment]]
MoleFractionRange = Tuple[confloat(ge=0.0, le=1.0), confloat(ge=0.0, le=1.0)]
class FilterDuplicatesSchema(CurationComponentSchema):
type: Literal["FilterDuplicates"] = "FilterDuplicates"
temperature_precision: conint(ge=0) = Field(
2,
description="The number of decimal places to compare temperatures (K) to "
"within.",
)
pressure_precision: conint(ge=0) = Field(
3,
description="The number of decimal places to compare pressures (kPa) to "
"within.",
)
mole_fraction_precision: conint(ge=0) = Field(
6,
description="The number of decimal places to compare mole fractions to within.",
)
class FilterDuplicates(CurationComponent):
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterDuplicatesSchema, n_processes
) -> pandas.DataFrame:
if len(data_frame) == 0:
return data_frame
data_frame = data_frame.copy()
data_frame = reorder_data_frame(data_frame)
minimum_n_components = data_frame["N Components"].min()
maximum_n_components = data_frame["N Components"].max()
filtered_data = []
for n_components in range(minimum_n_components, maximum_n_components + 1):
component_data = data_frame[
data_frame["N Components"] == n_components
].copy()
component_data["Temperature (K)"] = component_data["Temperature (K)"].round(
schema.temperature_precision
)
component_data["Pressure (kPa)"] = component_data["Pressure (kPa)"].round(
schema.pressure_precision
)
subset_columns = ["Temperature (K)", "Pressure (kPa)", "Phase"]
for index in range(n_components):
component_data[f"Mole Fraction {index + 1}"] = component_data[
f"Mole Fraction {index + 1}"
].round(schema.mole_fraction_precision)
subset_columns.extend(
[
f"Component {index + 1}",
f"Role {index + 1}",
f"Mole Fraction {index + 1}",
f"Exact Amount {index + 1}",
]
)
subset_columns = [x for x in subset_columns if x in component_data]
value_headers = [x for x in component_data if x.find(" Value ") >= 0]
sorted_filtered_data = []
for value_header in value_headers:
uncertainty_header = value_header.replace("Value", "Uncertainty")
property_data = component_data[component_data[value_header].notna()]
if uncertainty_header in component_data:
property_data = property_data.sort_values(uncertainty_header)
property_data = property_data.drop_duplicates(
subset=subset_columns, keep="last"
)
sorted_filtered_data.append(property_data)
sorted_filtered_data = pandas.concat(
sorted_filtered_data, ignore_index=True, sort=False
)
filtered_data.append(sorted_filtered_data)
filtered_data = pandas.concat(filtered_data, ignore_index=True, sort=False)
return filtered_data
class FilterByTemperatureSchema(CurationComponentSchema):
type: Literal["FilterByTemperature"] = "FilterByTemperature"
minimum_temperature: Optional[PositiveFloat] = Field(
...,
description="Retain data points measured for temperatures above this value (K)",
)
maximum_temperature: Optional[PositiveFloat] = Field(
...,
description="Retain data points measured for temperatures below this value (K)",
)
@root_validator
def _min_max(cls, values):
minimum_temperature = values.get("minimum_temperature")
maximum_temperature = values.get("maximum_temperature")
if minimum_temperature is not None and maximum_temperature is not None:
assert maximum_temperature > minimum_temperature
return values
class FilterByTemperature(CurationComponent):
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByTemperatureSchema,
n_processes,
) -> pandas.DataFrame:
filtered_frame = data_frame
if schema.minimum_temperature is not None:
filtered_frame = filtered_frame[
schema.minimum_temperature < filtered_frame["Temperature (K)"]
]
if schema.maximum_temperature is not None:
filtered_frame = filtered_frame[
filtered_frame["Temperature (K)"] < schema.maximum_temperature
]
return filtered_frame
class FilterByPressureSchema(CurationComponentSchema):
type: Literal["FilterByPressure"] = "FilterByPressure"
minimum_pressure: Optional[PositiveFloat] = Field(
...,
description="Retain data points measured for pressures above this value (kPa)",
)
maximum_pressure: Optional[PositiveFloat] = Field(
...,
description="Retain data points measured for pressures below this value (kPa)",
)
@root_validator
def _min_max(cls, values):
minimum_pressure = values.get("minimum_pressure")
maximum_pressure = values.get("maximum_pressure")
if minimum_pressure is not None and maximum_pressure is not None:
assert maximum_pressure > minimum_pressure
return values
class FilterByPressure(CurationComponent):
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterByPressureSchema, n_processes
) -> pandas.DataFrame:
filtered_frame = data_frame
if schema.minimum_pressure is not None:
filtered_frame = filtered_frame[
schema.minimum_pressure < filtered_frame["Pressure (kPa)"]
]
if schema.maximum_pressure is not None:
filtered_frame = filtered_frame[
filtered_frame["Pressure (kPa)"] < schema.maximum_pressure
]
return filtered_frame
class FilterByMoleFractionSchema(CurationComponentSchema):
type: Literal["FilterByMoleFraction"] = "FilterByMoleFraction"
mole_fraction_ranges: Dict[conint(gt=1), List[List[MoleFractionRange]]] = Field(
...,
description="The ranges of mole fractions to retain. Each key in the "
"dictionary corresponds to a number of components in the system. Each value "
"is a list of the allowed mole fraction ranges for all but one of the "
"components, i.e for a binary system, the allowed mole fraction for only the "
"first component must be specified.",
)
@validator("mole_fraction_ranges")
def _validate_ranges(cls, value: Dict[int, List[List[MoleFractionRange]]]):
for n_components, ranges in value.items():
assert len(ranges) == n_components - 1
assert all(
mole_fraction_range[0] < mole_fraction_range[1]
for component_ranges in ranges
for mole_fraction_range in component_ranges
)
return value
class FilterByMoleFraction(CurationComponent):
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByMoleFractionSchema,
n_processes,
) -> pandas.DataFrame:
filtered_frame = data_frame
full_query = ~filtered_frame["N Components"].isin(schema.mole_fraction_ranges)
for n_components, ranges in schema.mole_fraction_ranges.items():
n_component_query = filtered_frame["N Components"] == n_components
for index, component_ranges in enumerate(ranges):
component_query = None
for mole_fraction_range in component_ranges:
fraction_query = (
filtered_frame[f"Mole Fraction {index + 1}"]
> mole_fraction_range[0]
) & (
filtered_frame[f"Mole Fraction {index + 1}"]
< mole_fraction_range[1]
)
if component_query is None:
component_query = fraction_query
else:
component_query |= fraction_query
n_component_query &= component_query
full_query |= n_component_query
filtered_frame = filtered_frame[full_query]
return filtered_frame
class FilterByRacemicSchema(CurationComponentSchema):
type: Literal["FilterByRacemic"] = "FilterByRacemic"
class FilterByRacemic(CurationComponent):
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByMoleFractionSchema,
n_processes,
) -> pandas.DataFrame:
query = data_frame["N Components"] < 2
for n_components in range(2, data_frame["N Components"].max() + 1):
component_data = data_frame[data_frame["N Components"] == n_components]
if len(component_data) == 0:
continue
component_combinations = itertools.combinations(range(n_components), 2)
is_racemic = None
for index_0, index_1 in component_combinations:
components_racemic = component_data[
f"Component {index_0 + 1}"
].str.replace("@", "") == component_data[
f"Component {index_1 + 1}"
].str.replace(
"@", ""
)
is_racemic = (
components_racemic
if is_racemic is None
else (is_racemic | components_racemic)
)
not_racemic = ~is_racemic
query |= not_racemic
filtered_frame = data_frame[query]
return filtered_frame
class FilterByElementsSchema(CurationComponentSchema):
type: Literal["FilterByElements"] = "FilterByElements"
allowed_elements: Optional[List[constr(min_length=1)]] = Field(
None,
description="The only elements which must be present in the measured system "
"for the data point to be retained. This option is mutually exclusive with "
"`forbidden_elements`",
)
forbidden_elements: Optional[List[constr(min_length=1)]] = Field(
None,
description="The elements which must not be present in the measured system for "
"the data point to be retained. This option is mutually exclusive with "
"`allowed_elements`",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
allowed_elements = values.get("allowed_elements")
forbidden_elements = values.get("forbidden_elements")
assert allowed_elements is not None or forbidden_elements is not None
assert allowed_elements is None or forbidden_elements is None
return values
class FilterByElements(CurationComponent):
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterByElementsSchema, n_processes
) -> pandas.DataFrame:
from openff.toolkit.topology import Molecule
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
if schema.allowed_elements is not None and not all(
[
x.element.symbol in schema.allowed_elements
for x in molecule.atoms
]
):
return False
if schema.forbidden_elements is not None and any(
[
x.element.symbol in schema.forbidden_elements
for x in molecule.atoms
]
):
return False
return True
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByPropertyTypesSchema(CurationComponentSchema):
type: Literal["FilterByPropertyTypes"] = "FilterByPropertyTypes"
property_types: List[constr(min_length=1)] = Field(
...,
description="The types of property to retain.",
)
n_components: Dict[constr(min_length=1), List[PositiveInt]] = Field(
default_factory=dict,
description="Optionally specify the number of components that a property "
"should have been measured for (e.g. pure, binary) in order for that data "
"point to be retained.",
)
strict: bool = Field(
False,
description="If true, only substances (defined without consideration for their "
"mole fractions or exact amount) which have data available for all of the "
"specified property types will be retained. Note that the data points aren't "
"required to have been measured at the same state.",
)
@root_validator
def _validate_n_components(cls, values):
property_types = values.get("property_types")
n_components = values.get("n_components")
assert all(x in property_types for x in n_components)
return values
class FilterByPropertyTypes(CurationComponent):
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByPropertyTypesSchema,
n_processes,
) -> pandas.DataFrame:
property_headers = [
header for header in data_frame if header.find(" Value ") >= 0
]
# Removes the columns for properties which are not of interest.
for header in property_headers:
property_type = header.split(" ")[0]
if property_type in schema.property_types:
continue
data_frame = data_frame.drop(header, axis=1)
uncertainty_header = header.replace(" Value ", " Uncertainty ")
if uncertainty_header in data_frame:
data_frame = data_frame.drop(uncertainty_header, axis=1)
# Drop any rows which do not contain any values for the property types of
# interest.
property_headers = [
header
for header in property_headers
if header.split(" ")[0] in schema.property_types
]
data_frame = data_frame.dropna(subset=property_headers, how="all")
# Apply a more specific filter which only retain which contain values
# for the specific property types, and which were measured for the
# specified number of components.
for property_type, n_components in schema.n_components.items():
property_header = next(
iter(x for x in property_headers if x.find(f"{property_type} ") == 0),
None,
)
if property_header is None:
continue
data_frame = data_frame[
data_frame[property_header].isna()
| data_frame["N Components"].isin(n_components)
]
# Apply the strict filter if requested
if schema.strict:
reordered_data_frame = reorder_data_frame(data_frame)
# Build a dictionary of which properties should be present partitioned
# by the number of components they should have been be measured for.
property_types = defaultdict(list)
if len(schema.n_components) > 0:
for property_type, n_components in schema.n_components.items():
for n_component in n_components:
property_types[n_component].append(property_type)
min_n_components = min(property_types)
max_n_components = max(property_types)
else:
min_n_components = reordered_data_frame["N Components"].min()
max_n_components = reordered_data_frame["N Components"].max()
for n_components in range(min_n_components, max_n_components + 1):
property_types[n_components].extend(schema.property_types)
substances_with_data = set()
components_with_data = {}
# For each N component find substances which have data points for
# all of the specified property types.
for n_components in range(min_n_components, max_n_components + 1):
component_data = reordered_data_frame[
reordered_data_frame["N Components"] == n_components
]
if n_components not in property_types or len(component_data) == 0:
continue
n_component_headers = [
header
for header in property_headers
if header.split(" ")[0] in property_types[n_components]
and header in component_data
]
if len(n_component_headers) != len(property_types[n_components]):
continue
n_component_substances = set.intersection(
*[
data_frame_to_substances(
component_data[component_data[header].notna()]
)
for header in n_component_headers
]
)
substances_with_data.update(n_component_substances)
components_with_data[n_components] = {
component
for substance in n_component_substances
for component in substance
}
if len(schema.n_components) > 0:
components_with_all_data = set.intersection(
*components_with_data.values()
)
# Filter out any smiles for don't appear in all of the N component
data_frame = FilterBySmiles.apply(
data_frame,
FilterBySmilesSchema(smiles_to_include=[*components_with_all_data]),
)
# all of the specified data types.
data_frame = FilterBySubstances.apply(
data_frame,
FilterBySubstancesSchema(substances_to_include=[*substances_with_data]),
)
data_frame = data_frame.dropna(axis=1, how="all")
return data_frame
class FilterByStereochemistrySchema(CurationComponentSchema):
type: Literal["FilterByStereochemistry"] = "FilterByStereochemistry"
class FilterByStereochemistry(CurationComponent):
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByStereochemistrySchema,
n_processes,
) -> pandas.DataFrame:
from openff.toolkit.topology import Molecule
from openff.toolkit.utils import UndefinedStereochemistryError
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
try:
Molecule.from_smiles(smiles)
except UndefinedStereochemistryError:
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByChargedSchema(CurationComponentSchema):
type: Literal["FilterByCharged"] = "FilterByCharged"
class FilterByCharged(CurationComponent):
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterByChargedSchema, n_processes
) -> pandas.DataFrame:
from openff.toolkit.topology import Molecule
from simtk import unit as simtk_unit
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
# noinspection PyUnresolvedReferences
atom_charges = [
atom.formal_charge
if isinstance(atom.formal_charge, int)
else atom.formal_charge.value_in_unit(simtk_unit.elementary_charge)
for atom in molecule.atoms
]
if numpy.isclose(sum(atom_charges), 0.0):
continue
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByIonicLiquidSchema(CurationComponentSchema):
type: Literal["FilterByIonicLiquid"] = "FilterByIonicLiquid"
class FilterByIonicLiquid(CurationComponent):
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByIonicLiquidSchema,
n_processes,
) -> pandas.DataFrame:
def filter_function(data_row):
n_components = data_row["N Components"]
for index in range(n_components):
smiles = data_row[f"Component {index + 1}"]
if "." in smiles:
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterBySmilesSchema(CurationComponentSchema):
type: Literal["FilterBySmiles"] = "FilterBySmiles"
smiles_to_include: Optional[List[str]] = Field(
None,
description="The smiles patterns to retain. This option is mutually "
"exclusive with `smiles_to_exclude`",
)
smiles_to_exclude: Optional[List[str]] = Field(
None,
description="The smiles patterns to exclude. This option is mutually "
"exclusive with `smiles_to_include`",
)
allow_partial_inclusion: bool = Field(
False,
description="If False, all the components in a substance must appear in "
"the `smiles_to_include` list, otherwise, only some must appear. "
"This option only applies when `smiles_to_include` is set.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
smiles_to_include = values.get("smiles_to_include")
smiles_to_exclude = values.get("smiles_to_exclude")
assert smiles_to_include is not None or smiles_to_exclude is not None
assert smiles_to_include is None or smiles_to_exclude is None
return values
class FilterBySmiles(CurationComponent):
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterBySmilesSchema, n_processes
) -> pandas.DataFrame:
smiles_to_include = schema.smiles_to_include
smiles_to_exclude = schema.smiles_to_exclude
if smiles_to_include is not None:
smiles_to_exclude = []
elif smiles_to_exclude is not None:
smiles_to_include = []
def filter_function(data_row):
n_components = data_row["N Components"]
component_smiles = [
data_row[f"Component {index + 1}"] for index in range(n_components)
]
if any(x in smiles_to_exclude for x in component_smiles):
return False
elif len(smiles_to_exclude) > 0:
return True
if not schema.allow_partial_inclusion and not all(
x in smiles_to_include for x in component_smiles
):
return False
if schema.allow_partial_inclusion and not any(
x in smiles_to_include for x in component_smiles
):
return False
return True
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterBySmirksSchema(CurationComponentSchema):
type: Literal["FilterBySmirks"] = "FilterBySmirks"
smirks_to_include: Optional[List[str]] = Field(
None,
description="The smirks patterns which must be matched by a substance in "
"order to retain a measurement. This option is mutually exclusive with "
"`smirks_to_exclude`",
)
smirks_to_exclude: Optional[List[str]] = Field(
None,
description="The smirks patterns which must not be matched by a substance in "
"order to retain a measurement. This option is mutually exclusive with "
"`smirks_to_include`",
)
allow_partial_inclusion: bool = Field(
False,
description="If False, all the components in a substance must match at least "
"one pattern in `smirks_to_include` in order to retain a measurement, "
"otherwise, only a least one component must match. This option only applies "
"when `smirks_to_include` is set.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
smirks_to_include = values.get("smirks_to_include")
smirks_to_exclude = values.get("smirks_to_exclude")
assert smirks_to_include is not None or smirks_to_exclude is not None
assert smirks_to_include is None or smirks_to_exclude is None
return values
class FilterBySmirks(CurationComponent):
@staticmethod
@functools.lru_cache(1000)
def _find_smirks_matches(smiles_pattern, *smirks_patterns):
from openff.toolkit.topology import Molecule
if len(smirks_patterns) == 0:
return []
molecule = Molecule.from_smiles(smiles_pattern, allow_undefined_stereo=True)
matches = [
smirks
for smirks in smirks_patterns
if len(molecule.chemical_environment_matches(smirks)) > 0
]
return matches
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterBySmirksSchema, n_processes
) -> pandas.DataFrame:
smirks_to_match = (
schema.smirks_to_include
if schema.smirks_to_include
else schema.smirks_to_exclude
)
def filter_function(data_row):
n_components = data_row["N Components"]
component_smiles = [
data_row[f"Component {index + 1}"] for index in range(n_components)
]
smirks_matches = {
smiles: cls._find_smirks_matches(smiles, *smirks_to_match)
for smiles in component_smiles
}
if schema.smirks_to_exclude is not None:
return not any(len(x) > 0 for x in smirks_matches.values())
if schema.allow_partial_inclusion:
return any(len(x) > 0 for x in smirks_matches.values())
return all(len(x) > 0 for x in smirks_matches.values())
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByNComponentsSchema(CurationComponentSchema):
type: Literal["FilterByNComponents"] = "FilterByNComponents"
n_components: List[PositiveInt] = Field(
...,
description="The number of components that measurements should have been "
"measured for in order to be retained.",
)
class FilterByNComponents(CurationComponent):
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByNComponentsSchema,
n_processes,
) -> pandas.DataFrame:
return data_frame[data_frame["N Components"].isin(schema.n_components)]
class FilterBySubstancesSchema(CurationComponentSchema):
type: Literal["FilterBySubstances"] = "FilterBySubstances"
substances_to_include: Optional[List[Tuple[str, ...]]] = Field(
None,
description="The substances compositions to retain, where each tuple in the "
"list contains the smiles patterns which make up the substance to include. "
"This option is mutually exclusive with `substances_to_exclude`.",
)
substances_to_exclude: Optional[List[Tuple[str, ...]]] = Field(
None,
description="The substances compositions to retain, where each tuple in the "
"list contains the smiles patterns which make up the substance to exclude. "
"This option is mutually exclusive with `substances_to_include`.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
substances_to_include = values.get("substances_to_include")
substances_to_exclude = values.get("substances_to_exclude")
assert substances_to_include is not None or substances_to_exclude is not None
assert substances_to_include is None or substances_to_exclude is None
return values
class FilterBySubstances(CurationComponent):
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: FilterBySubstancesSchema, n_processes
) -> pandas.DataFrame:
def filter_function(data_row):
n_components = data_row["N Components"]
substances_to_include = schema.substances_to_include
substances_to_exclude = schema.substances_to_exclude
if substances_to_include is not None:
substances_to_include = [
tuple(sorted(x)) for x in substances_to_include
]
if substances_to_exclude is not None:
substances_to_exclude = [
tuple(sorted(x)) for x in substances_to_exclude
]
substance = tuple(
sorted(
[
data_row[f"Component {index + 1}"]
for index in range(n_components)
]
)
)
return (
substances_to_exclude is not None
and substance not in substances_to_exclude
) or (
substances_to_include is not None and substance in substances_to_include
)
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
class FilterByEnvironmentsSchema(CurationComponentSchema):
type: Literal["FilterByEnvironments"] = "FilterByEnvironments"
per_component_environments: Optional[Dict[int, ComponentEnvironments]] = Field(
None,
description="The environments which should be present in the components of "
"the substance for which the measurements were made. Each dictionary "
"key corresponds to a number of components in the system, and each "
"value the environments which should be matched by those n components. "
"This option is mutually exclusive with `environments`.",
)
environments: Optional[List[ChemicalEnvironment]] = Field(
None,
description="The environments which should be present in the substances for "
"which measurements were made. This option is mutually exclusive with "
"`per_component_environments`.",
)
at_least_one_environment: bool = Field(
True,
description="If true, data points will only be retained if all of the "
"components in the measured system contain at least one of the specified "
"environments. This option is mutually exclusive with "
"`strictly_specified_environments`.",
)
strictly_specified_environments: bool = Field(
False,
description="If true, data points will only be retained if all of the "
"components in the measured system strictly contain only the specified "
"environments and no others. This option is mutually exclusive with "
"`at_least_one_environment`.",
)
@validator("per_component_environments")
def _validate_per_component_environments(cls, value):
if value is None:
return value
assert all(len(y) == x for x, y in value.items())
return value
@root_validator
def _validate_mutually_exclusive(cls, values):
at_least_one_environment = values.get("at_least_one_environment")
strictly_specified_environments = values.get("strictly_specified_environments")
assert (
at_least_one_environment is True or strictly_specified_environments is True
)
assert (
at_least_one_environment is False
or strictly_specified_environments is False
)
per_component_environments = values.get("per_component_environments")
environments = values.get("environments")
assert per_component_environments is not None or environments is not None
assert per_component_environments is None or environments is None
return values
class FilterByEnvironments(CurationComponent):
@classmethod
def _find_environments_per_component(cls, data_row: pandas.Series):
n_components = data_row["N Components"]
component_smiles = [
data_row[f"Component {index + 1}"] for index in range(n_components)
]
component_moieties = [analyse_functional_groups(x) for x in component_smiles]
if any(x is None for x in component_moieties):
logger.info(
f"Checkmol was unable to parse the system with components="
f"{component_smiles} and so this data point was discarded."
)
return None
return component_moieties
@classmethod
def _is_match(cls, component_environments, environments_to_match, schema):
operator = all if schema.strictly_specified_environments else any
return operator(
environment in environments_to_match
for environment in component_environments
)
@classmethod
def _filter_by_environments(cls, data_row, schema: FilterByEnvironmentsSchema):
environments_per_component = cls._find_environments_per_component(data_row)
if environments_per_component is None:
return False
return all(
cls._is_match(component_environments, schema.environments, schema)
for component_environments in environments_per_component
)
@classmethod
def _filter_by_per_component(cls, data_row, schema: FilterByEnvironmentsSchema):
n_components = data_row["N Components"]
if (
schema.per_component_environments is not None
and n_components not in schema.per_component_environments
):
# No filter was specified for this number of components.
return True
environments_per_component = cls._find_environments_per_component(data_row)
if environments_per_component is None:
return False
match_matrix = numpy.zeros((n_components, n_components))
for component_index, component_environments in enumerate(
environments_per_component
):
# noinspection PyUnresolvedReferences
for environments_index, environments_to_match in enumerate(
schema.per_component_environments[n_components]
):
match_matrix[component_index, environments_index] = cls._is_match(
component_environments, environments_to_match, schema
)
x_indices, y_indices = linear_sum_assignment(match_matrix, maximize=True)
return numpy.all(match_matrix[x_indices, y_indices] > 0)
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: FilterByEnvironmentsSchema,
n_processes,
) -> pandas.DataFrame:
if schema.environments is not None:
filter_function = functools.partial(
cls._filter_by_environments, schema=schema
)
else:
filter_function = functools.partial(
cls._filter_by_per_component, schema=schema
)
# noinspection PyTypeChecker
return data_frame[data_frame.apply(filter_function, axis=1)]
FilterComponentSchema = Union[
FilterDuplicatesSchema,
FilterByTemperatureSchema,
FilterByPressureSchema,
FilterByMoleFractionSchema,
FilterByRacemicSchema,
FilterByElementsSchema,
FilterByPropertyTypesSchema,
FilterByStereochemistrySchema,
FilterByChargedSchema,
FilterByIonicLiquidSchema,
FilterBySmilesSchema,
FilterBySmirksSchema,
FilterByNComponentsSchema,
FilterBySubstancesSchema,
FilterByEnvironmentsSchema,
]
| true | true |
f721d1659e4a76de250c7e3aa60844e53d5b0b27 | 1,929 | py | Python | migrations/versions/9ede8d2d7089_initialize_migration.py | casio-ka/DailyBlog | 4668a977c540308b2f00fcc86e5e02cb3878edc8 | [
"MIT"
] | null | null | null | migrations/versions/9ede8d2d7089_initialize_migration.py | casio-ka/DailyBlog | 4668a977c540308b2f00fcc86e5e02cb3878edc8 | [
"MIT"
] | null | null | null | migrations/versions/9ede8d2d7089_initialize_migration.py | casio-ka/DailyBlog | 4668a977c540308b2f00fcc86e5e02cb3878edc8 | [
"MIT"
] | null | null | null | """Initialize Migration
Revision ID: 9ede8d2d7089
Revises:
Create Date: 2020-09-28 00:25:38.033227
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9ede8d2d7089'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('profile_photos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pic_path', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('profile_photos')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
# ### end Alembic commands ###
| 33.842105 | 83 | 0.671332 | from alembic import op
import sqlalchemy as sa
revision = '9ede8d2d7089'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('profile_photos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pic_path', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
| true | true |
f721d2802ff021d2044c01d4e530003b8cd4e151 | 329 | py | Python | sparkdq/analytics/states/ModeState.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | 1 | 2021-02-08T07:49:54.000Z | 2021-02-08T07:49:54.000Z | sparkdq/analytics/states/ModeState.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | null | null | null | sparkdq/analytics/states/ModeState.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | null | null | null | from sparkdq.analytics.states.State import DoubleValuedState
class ModeState(DoubleValuedState):
def __init__(self, mode_value):
self.mode_value = mode_value
def metric_value(self):
return self.mode_value
# def sum(self, other):
# return ModeState(max(self.mode_value, other.mode_value))
| 23.5 | 66 | 0.714286 | from sparkdq.analytics.states.State import DoubleValuedState
class ModeState(DoubleValuedState):
def __init__(self, mode_value):
self.mode_value = mode_value
def metric_value(self):
return self.mode_value
| true | true |
f721d29f05b3f1aa5d1ea37c61f38905f80ae65c | 6,502 | py | Python | otherCodeTaskSnippets/14.01.2022.py | s2812135/Data_Challenges_WiSe2122 | a55372f444e7344af4e2e1f04e4244fb8cefeefe | [
"MIT"
] | null | null | null | otherCodeTaskSnippets/14.01.2022.py | s2812135/Data_Challenges_WiSe2122 | a55372f444e7344af4e2e1f04e4244fb8cefeefe | [
"MIT"
] | null | null | null | otherCodeTaskSnippets/14.01.2022.py | s2812135/Data_Challenges_WiSe2122 | a55372f444e7344af4e2e1f04e4244fb8cefeefe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 14 16:03:32 2022
@author: dariu
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 12:43:25 2021
@author: dariu
"""
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
#import sklearn.cluster
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.cluster import OPTICS, cluster_optics_dbscan
import matplotlib.gridspec as gridspec
from sklearn.cluster import SpectralCoclustering
from sklearn.metrics import consensus_score
from sklearn.cluster import SpectralBiclustering
from sklearn import svm
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import NearMiss
from imblearn.pipeline import make_pipeline
from imblearn.metrics import classification_report_imbalanced
path = "C:\\Users\dariu\\Documents\\Master Wirtschaftsinformatik\\Data Challenges\Data\\"
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training_setB/', 'p1']
]
#%%
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
# patient_gender = df_temp["Gender"][1]
# if df_temp["Age"][1] >= 40:
dfs.append(df_temp)
df = pd.concat(dfs)
labels_true = df["SepsisLabel"].tolist()
#%%
'''
#df = df[["HR", "O2Sat", "Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2"]]
df = df[["Age", "Gender", "Unit1", "Unit2", "HospAdmTime", "ICULOS"]]
labels_gender = df["Gender"].tolist()
labels_unit1 = df["Unit1"].tolist()
labels_unit2 = df["Unit2"].tolist()
#############################################
'''
#%%
'''
df = df[[
"BaseExcess",
"HCO3",
"FiO2",
"pH",
"PaCO2",
"SaO2",
"AST",
"BUN",
"Alkalinephos",
"Calcium",
"Chloride",
"Creatinine",
"Bilirubin_direct",
"Glucose",
"Lactate",
"Magnesium",
"Phosphate",
"Potassium",
"Bilirubin_total",
"TroponinI",
"Hct",
"Hgb",
"PTT",
"WBC",
"Fibrinogen",
"Platelets"
]]
#%%
'''
#############################################
imputation_dims = [
'DBP',
'HR',
'O2Sat',
'Temp',
'SBP',
'MAP',
'Resp',
]
for d in imputation_dims:
mean = round(df[d].sum()/df.shape[0], 2)
df.loc[df[d].isna(), d] = mean
####################################################
df = df.drop(columns=["SepsisLabel"])
df_current = df.fillna(df.mean())
#df_current = df.fillna(2)
###########################################################
#df_current = df
##############################
#85 labels_pred?
#%%
'''
def calc_scores(X, labels_true, labels_pred):
rand_score = metrics.rand_score(labels_true, labels_pred)
adjusted_rand_score = metrics.adjusted_rand_score(labels_true, labels_pred)
adjusted_mutual_info_score = metrics.cluster.adjusted_mutual_info_score(labels_true, labels_pred)
silhouette_score = metrics.silhouette_score(X, labels_pred, metric='euclidean', sample_size=None, random_state=None)
print("Rand Score: " , str(rand_score) + "\n" +
"Adjusted Rand Score: " , str(adjusted_rand_score) + "\n"
"Adjusted Mutual Information Score: " + str(adjusted_mutual_info_score) + "\n"
"Silhouette Score: " , str(silhouette_score) + "\n"
)
'''
#%%
'''
############################################################
# initializing the pacmap instance
# Setting n_neighbors to "None" leads to a default choice shown below in "parameter" section
embedding = pacmap.PaCMAP(n_dims=5, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0)
# fit the data (The index of transformed data corresponds to the index of the original data)
X_transformed = embedding.fit_transform(df_current.values, init="pca")
####################################################################
'''
#%%
'''
model = SpectralCoclustering(n_clusters=9, random_state=0)
#model.fit(df_current.values)
model.fit(X_transformed)
#score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx]))
#print("consensus score: {:.3f}".format(score))
#fit_data = df_current.values[np.argsort(model.row_labels_)]
fit_data = X_transformed[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
fit_data = fit_data[0:len(labels_true), 0:41]
#plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.matshow(fit_data, cmap='Spectral')
#plt.matshow(fit_data, cmap=plt.cm.RdYlGn)
#plt.matshow(fit_data, cmap=plt.cm.YlOrRd)
#plt.matshow(fit_data)
#plt.matshow(fit_data, cmap='rainbow')
#plt.matshow(fit_data, cmap='Set1')
#plt.matshow(fit_data, cmap='tab20')
#plt.matshow(fit_data, cmap='gist_rainbow')
plt.gca().set_aspect('auto')
#plt.gca().set_aspect('equal', adjustable='box')
#plt.axis('scaled')
#plt.title("After biclustering; rearranged to show biclusters")
plt.show()
#%%
'''
#
#%%
'''
model = SpectralBiclustering(n_clusters=(10, 5), method="log", random_state=0)
#model = SpectralBiclustering(n_clusters=(10, 5), method="bistochastic", random_state=0)
model.fit(df_current.values)
#model.fit(X_transformed)
#fit_data = df_current.values[np.argsort(model.row_labels_)]
fit_data = df_current.values[np.argsort(model.row_labels_)]
#fit_data = X_transformed[:, np.argsort(model.column_labels_)]
#plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.matshow(fit_data, cmap='Spectral')
plt.gca().set_aspect('auto')
#plt.title("After biclustering; rearranged to show biclusters")
#plt.matshow(
# np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1),
# cmap=plt.cm.Blues,
#)
plt.matshow(
np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1),
cmap='Spectral',
)
plt.gca().set_aspect('auto')
#plt.title("Checkerboard structure of rearranged data")
plt.show()
'''
#%%
X_train, X_test, y_train, y_test = train_test_split(df_current, labels_true, test_size=0.2)
#%%
X_train_ss = X_train[0:int(0.1*len(X_train))]
y_train_ss = y_train[0:int(0.1*len(y_train))]
# Create a pipeline
pipeline = make_pipeline(
NearMiss(version=2), svm.SVC())
pipeline.fit(X_train_ss, y_train_ss)
# Classify and report the results
print(classification_report_imbalanced(y_test, pipeline.predict(X_test)))
| 21.529801 | 120 | 0.652261 |
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.cluster import OPTICS, cluster_optics_dbscan
import matplotlib.gridspec as gridspec
from sklearn.cluster import SpectralCoclustering
from sklearn.metrics import consensus_score
from sklearn.cluster import SpectralBiclustering
from sklearn import svm
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import NearMiss
from imblearn.pipeline import make_pipeline
from imblearn.metrics import classification_report_imbalanced
path = "C:\\Users\dariu\\Documents\\Master Wirtschaftsinformatik\\Data Challenges\Data\\"
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training_setB/', 'p1']
]
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
dfs.append(df_temp)
df = pd.concat(dfs)
labels_true = df["SepsisLabel"].tolist()
| true | true |
f721d55c74656c111fb68f8f7273731a712814c3 | 8,851 | py | Python | archive/maketiles_old.py | arroqc/pandacancer_kaggle | a6945dcac041dac744570d61ee630ee6f32e7117 | [
"MIT"
] | 3 | 2020-07-23T02:02:19.000Z | 2020-07-23T02:58:08.000Z | archive/maketiles_old.py | arroqc/pandacancer_kaggle | a6945dcac041dac744570d61ee630ee6f32e7117 | [
"MIT"
] | null | null | null | archive/maketiles_old.py | arroqc/pandacancer_kaggle | a6945dcac041dac744570d61ee630ee6f32e7117 | [
"MIT"
] | 3 | 2020-07-23T02:02:45.000Z | 2020-11-16T02:58:51.000Z | import skimage.io
import numpy as np
import pandas as pd
import sys
from pathlib import Path
import pickle
import argparse
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--base_dir", default='G:/Datasets/panda', required=False)
parser.add_argument("--out_dir", default='D:/Datasets/panda', required=False)
args = parser.parse_args()
BASE_PATH = Path(args.base_dir)
OUTPUT_BASE = Path(args.out_dir)
SIZE = 128
NUM = 16
LEVEL = 1
STRIDE = False
TRAIN_PATH = BASE_PATH/'train_images/'
MASKS_TRAIN_PATH = BASE_PATH/'train_label_masks/'
OUTPUT_IMG_PATH = OUTPUT_BASE/f'train_tiles_{SIZE}_{LEVEL}/imgs/'
OUTPUT_MASK_PATH = OUTPUT_BASE/f'train_tiles_{SIZE}_{LEVEL}/masks/'
PICKLE_NAME = OUTPUT_BASE/f'stats_{SIZE}_{LEVEL}.pkl'
CSV_PATH = BASE_PATH/'train.csv'
pen_marked_images = [
'fd6fe1a3985b17d067f2cb4d5bc1e6e1',
'ebb6a080d72e09f6481721ef9f88c472',
'ebb6d5ca45942536f78beb451ee43cc4',
'ea9d52d65500acc9b9d89eb6b82cdcdf',
'e726a8eac36c3d91c3c4f9edba8ba713',
'e90abe191f61b6fed6d6781c8305fe4b',
'fd0bb45eba479a7f7d953f41d574bf9f',
'ff10f937c3d52eff6ad4dd733f2bc3ac',
'feee2e895355a921f2b75b54debad328',
'feac91652a1c5accff08217d19116f1c',
'fb01a0a69517bb47d7f4699b6217f69d',
'f00ec753b5618cfb30519db0947fe724',
'e9a4f528b33479412ee019e155e1a197',
'f062f6c1128e0e9d51a76747d9018849',
'f39bf22d9a2f313425ee201932bac91a',
]
def remove_pen_marks(img):
# Define elliptic kernel
kernel5x5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
# use cv2.inRange to mask pen marks (hardcoded for now)
lower = np.array([0, 0, 0])
upper = np.array([200, 255, 255])
img_mask1 = cv2.inRange(img, lower, upper)
# Use erosion and findContours to remove masked tissue (side effect of above)
img_mask1 = cv2.erode(img_mask1, kernel5x5, iterations=4)
img_mask2 = np.zeros(img_mask1.shape, dtype=np.uint8)
contours, _ = cv2.findContours(img_mask1, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
x, y = contour[:, 0, 0], contour[:, 0, 1]
w, h = x.max() - x.min(), y.max() - y.min()
if w > 100 and h > 100:
cv2.drawContours(img_mask2, [contour], 0, 1, -1)
# expand the area of the pen marks
img_mask2 = cv2.dilate(img_mask2, kernel5x5, iterations=3)
img_mask2 = (1 - img_mask2)
# Mask out pen marks from original image
img = cv2.bitwise_and(img, img, mask=img_mask2)
img[img == 0] = 255
return img, img_mask1, img_mask2
class TileMaker:
def __init__(self, size, number):
self.size = size
self.number = number
def make_multistride(self, image, mask):
# Pad only once
image, mask = self.__pad(image, mask)
s0, _ = self.__get_tiles(image, mask)
# For strided grids, need to also remove on the right/bottom
s1, _ = self.__get_tiles(image[self.size // 2:-self.size // 2, :],
mask[self.size // 2:-self.size // 2, :])
s2, _ = self.__get_tiles(image[:, self.size // 2:-self.size // 2],
image[:, self.size // 2:-self.size // 2])
s3, _ = self.__get_tiles(image[self.size // 2:-self.size // 2, self.size // 2:-self.size // 2],
image[self.size // 2:-self.size // 2, self.size // 2:-self.size // 2])
all_tiles = np.concatenate([s0, s1, s2, s3], axis=0)
# Find the images with the most stuff (the most red):
red_channel = all_tiles[:, :, :, 0]
tissue = np.where((red_channel < 230) & (red_channel > 200), red_channel, 0)
sorted_tiles = np.argsort(np.sum(tissue, axis=(1, 2)))[::-1]
sorted_tiles = sorted_tiles[:self.number * 4]
return all_tiles[sorted_tiles], _
def __pad(self, image, mask):
h, w, c = image.shape
horizontal_pad = 0 if (w % self.size) == 0 else self.size - (w % self.size)
vertical_pad = 0 if (h % self.size) == 0 else self.size - (h % self.size)
image = np.pad(image, pad_width=((vertical_pad // 2, vertical_pad - vertical_pad // 2),
(horizontal_pad // 2, horizontal_pad - horizontal_pad // 2),
(0, 0)),
mode='constant', constant_values=255) # Empty is white in this data
mask = np.pad(mask, pad_width=((vertical_pad // 2, vertical_pad - vertical_pad // 2),
(horizontal_pad // 2, horizontal_pad - horizontal_pad // 2),
(0, 0)),
mode='constant', constant_values=0) # Empty is black in this data
return image, mask
def __get_tiles(self, image, mask):
h, w, c = image.shape
image = image.reshape(h // self.size, self.size, w // self.size, self.size, c)
image = image.swapaxes(1, 2).reshape(-1, self.size, self.size, c)
mask = mask.reshape(h // self.size, self.size, w // self.size, self.size, c)
mask = mask.swapaxes(1, 2).reshape(-1, self.size, self.size, c)
if image.shape[0] < self.number:
image = np.pad(image, pad_width=((0, self.number - image.shape[0]), (0, 0), (0, 0), (0, 0)),
mode='constant', constant_values=255)
mask = np.pad(mask, pad_width=((0, self.number - mask.shape[0]), (0, 0), (0, 0), (0, 0)),
mode='constant', constant_values=0)
return image, mask
def make(self, image, mask):
image, mask = self.__pad(image, mask)
image, mask = self.__get_tiles(image, mask)
# Find the images with the most dark (epithelium) stuff
red_channel = image[:, :, :, 0]
tissue = np.where((red_channel < 230) & (red_channel > 200), red_channel, 0)
sorted_tiles = np.argsort(np.sum(tissue, axis=(1, 2)))[::-1]
sorted_tiles = sorted_tiles[:self.number]
return image[sorted_tiles], mask[sorted_tiles]
if __name__ == "__main__":
OUTPUT_IMG_PATH.mkdir(exist_ok=True, parents=True)
OUTPUT_MASK_PATH.mkdir(exist_ok=True, parents=True)
tile_maker = TileMaker(SIZE, NUM)
img_list = list(TRAIN_PATH.glob('**/*.tiff'))
# img_list.pop(5765)
bad_images = []
bad_masks = []
image_stats = []
files = []
for i, img_fn in enumerate(img_list):
img_id = img_fn.stem
mask_fn = MASKS_TRAIN_PATH / (img_id + '_mask.tiff')
try:
col = skimage.io.MultiImage(str(img_fn))
image = col[-LEVEL]
except:
bad_images.append(img_id)
continue
if img_id in pen_marked_images:
image, _, _ = remove_pen_marks(image)
if mask_fn.exists():
try:
mask = skimage.io.MultiImage(str(mask_fn))[-LEVEL]
except:
bad_masks.append(img_id)
mask = np.zeros_like(image)
else:
mask = np.zeros_like(image)
if STRIDE:
image, mask = tile_maker.make_multistride(image, mask)
else:
image, mask = tile_maker.make(image, mask)
sys.stdout.write(f'\r{i + 1}/{len(img_list)}')
image_stats.append({'image_id': img_id, 'mean': image.mean(axis=(0, 1, 2)) / 255,
'mean_square': ((image / 255) ** 2).mean(axis=(0, 1, 2)),
'img_mean': (255 - image).mean()})
for i, (tile_image, tile_mask) in enumerate(zip(image, mask)):
a = (img_id + '_' + str(i) + '.png')
b = (img_id + '_' + str(i) + '.png')
files.append({'image_id': img_id, 'num': i, 'filename': a, 'maskname': b,
'value': (255-tile_image[:, :, 0]).mean()})
skimage.io.imsave(OUTPUT_IMG_PATH / a, tile_image, check_contrast=False)
skimage.io.imsave(OUTPUT_MASK_PATH / b, tile_mask, check_contrast=False)
image_stats = pd.DataFrame(image_stats)
df = pd.read_csv(CSV_PATH)
df = pd.merge(df, image_stats, on='image_id', how='left')
df[['image_id', 'img_mean']].to_csv(OUTPUT_BASE/f'img_mean_{SIZE}_{LEVEL}.csv', index=False)
provider_stats = {}
for provider in df['data_provider'].unique():
mean = (df[df['data_provider'] == provider]['mean']).mean(0)
std = np.sqrt((df[df['data_provider'] == provider]['mean_square']).mean(0) - mean ** 2)
provider_stats[provider] = (mean, std)
mean = (df['mean']).mean()
std = np.sqrt((df['mean_square']).mean() - mean ** 2)
provider_stats['all'] = (mean, std)
with open(PICKLE_NAME, 'wb') as file:
pickle.dump(provider_stats, file)
pd.DataFrame(files).to_csv(OUTPUT_BASE/f'files_{SIZE}_{LEVEL}.csv', index=False)
print(bad_images)
print(bad_masks)
print(provider_stats)
| 38.316017 | 104 | 0.600949 | import skimage.io
import numpy as np
import pandas as pd
import sys
from pathlib import Path
import pickle
import argparse
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--base_dir", default='G:/Datasets/panda', required=False)
parser.add_argument("--out_dir", default='D:/Datasets/panda', required=False)
args = parser.parse_args()
BASE_PATH = Path(args.base_dir)
OUTPUT_BASE = Path(args.out_dir)
SIZE = 128
NUM = 16
LEVEL = 1
STRIDE = False
TRAIN_PATH = BASE_PATH/'train_images/'
MASKS_TRAIN_PATH = BASE_PATH/'train_label_masks/'
OUTPUT_IMG_PATH = OUTPUT_BASE/f'train_tiles_{SIZE}_{LEVEL}/imgs/'
OUTPUT_MASK_PATH = OUTPUT_BASE/f'train_tiles_{SIZE}_{LEVEL}/masks/'
PICKLE_NAME = OUTPUT_BASE/f'stats_{SIZE}_{LEVEL}.pkl'
CSV_PATH = BASE_PATH/'train.csv'
pen_marked_images = [
'fd6fe1a3985b17d067f2cb4d5bc1e6e1',
'ebb6a080d72e09f6481721ef9f88c472',
'ebb6d5ca45942536f78beb451ee43cc4',
'ea9d52d65500acc9b9d89eb6b82cdcdf',
'e726a8eac36c3d91c3c4f9edba8ba713',
'e90abe191f61b6fed6d6781c8305fe4b',
'fd0bb45eba479a7f7d953f41d574bf9f',
'ff10f937c3d52eff6ad4dd733f2bc3ac',
'feee2e895355a921f2b75b54debad328',
'feac91652a1c5accff08217d19116f1c',
'fb01a0a69517bb47d7f4699b6217f69d',
'f00ec753b5618cfb30519db0947fe724',
'e9a4f528b33479412ee019e155e1a197',
'f062f6c1128e0e9d51a76747d9018849',
'f39bf22d9a2f313425ee201932bac91a',
]
def remove_pen_marks(img):
kernel5x5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
lower = np.array([0, 0, 0])
upper = np.array([200, 255, 255])
img_mask1 = cv2.inRange(img, lower, upper)
img_mask1 = cv2.erode(img_mask1, kernel5x5, iterations=4)
img_mask2 = np.zeros(img_mask1.shape, dtype=np.uint8)
contours, _ = cv2.findContours(img_mask1, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
x, y = contour[:, 0, 0], contour[:, 0, 1]
w, h = x.max() - x.min(), y.max() - y.min()
if w > 100 and h > 100:
cv2.drawContours(img_mask2, [contour], 0, 1, -1)
img_mask2 = cv2.dilate(img_mask2, kernel5x5, iterations=3)
img_mask2 = (1 - img_mask2)
img = cv2.bitwise_and(img, img, mask=img_mask2)
img[img == 0] = 255
return img, img_mask1, img_mask2
class TileMaker:
def __init__(self, size, number):
self.size = size
self.number = number
def make_multistride(self, image, mask):
image, mask = self.__pad(image, mask)
s0, _ = self.__get_tiles(image, mask)
s1, _ = self.__get_tiles(image[self.size // 2:-self.size // 2, :],
mask[self.size // 2:-self.size // 2, :])
s2, _ = self.__get_tiles(image[:, self.size // 2:-self.size // 2],
image[:, self.size // 2:-self.size // 2])
s3, _ = self.__get_tiles(image[self.size // 2:-self.size // 2, self.size // 2:-self.size // 2],
image[self.size // 2:-self.size // 2, self.size // 2:-self.size // 2])
all_tiles = np.concatenate([s0, s1, s2, s3], axis=0)
red_channel = all_tiles[:, :, :, 0]
tissue = np.where((red_channel < 230) & (red_channel > 200), red_channel, 0)
sorted_tiles = np.argsort(np.sum(tissue, axis=(1, 2)))[::-1]
sorted_tiles = sorted_tiles[:self.number * 4]
return all_tiles[sorted_tiles], _
def __pad(self, image, mask):
h, w, c = image.shape
horizontal_pad = 0 if (w % self.size) == 0 else self.size - (w % self.size)
vertical_pad = 0 if (h % self.size) == 0 else self.size - (h % self.size)
image = np.pad(image, pad_width=((vertical_pad // 2, vertical_pad - vertical_pad // 2),
(horizontal_pad // 2, horizontal_pad - horizontal_pad // 2),
(0, 0)),
mode='constant', constant_values=255)
mask = np.pad(mask, pad_width=((vertical_pad // 2, vertical_pad - vertical_pad // 2),
(horizontal_pad // 2, horizontal_pad - horizontal_pad // 2),
(0, 0)),
mode='constant', constant_values=0)
return image, mask
def __get_tiles(self, image, mask):
h, w, c = image.shape
image = image.reshape(h // self.size, self.size, w // self.size, self.size, c)
image = image.swapaxes(1, 2).reshape(-1, self.size, self.size, c)
mask = mask.reshape(h // self.size, self.size, w // self.size, self.size, c)
mask = mask.swapaxes(1, 2).reshape(-1, self.size, self.size, c)
if image.shape[0] < self.number:
image = np.pad(image, pad_width=((0, self.number - image.shape[0]), (0, 0), (0, 0), (0, 0)),
mode='constant', constant_values=255)
mask = np.pad(mask, pad_width=((0, self.number - mask.shape[0]), (0, 0), (0, 0), (0, 0)),
mode='constant', constant_values=0)
return image, mask
def make(self, image, mask):
image, mask = self.__pad(image, mask)
image, mask = self.__get_tiles(image, mask)
red_channel = image[:, :, :, 0]
tissue = np.where((red_channel < 230) & (red_channel > 200), red_channel, 0)
sorted_tiles = np.argsort(np.sum(tissue, axis=(1, 2)))[::-1]
sorted_tiles = sorted_tiles[:self.number]
return image[sorted_tiles], mask[sorted_tiles]
if __name__ == "__main__":
OUTPUT_IMG_PATH.mkdir(exist_ok=True, parents=True)
OUTPUT_MASK_PATH.mkdir(exist_ok=True, parents=True)
tile_maker = TileMaker(SIZE, NUM)
img_list = list(TRAIN_PATH.glob('**/*.tiff'))
bad_images = []
bad_masks = []
image_stats = []
files = []
for i, img_fn in enumerate(img_list):
img_id = img_fn.stem
mask_fn = MASKS_TRAIN_PATH / (img_id + '_mask.tiff')
try:
col = skimage.io.MultiImage(str(img_fn))
image = col[-LEVEL]
except:
bad_images.append(img_id)
continue
if img_id in pen_marked_images:
image, _, _ = remove_pen_marks(image)
if mask_fn.exists():
try:
mask = skimage.io.MultiImage(str(mask_fn))[-LEVEL]
except:
bad_masks.append(img_id)
mask = np.zeros_like(image)
else:
mask = np.zeros_like(image)
if STRIDE:
image, mask = tile_maker.make_multistride(image, mask)
else:
image, mask = tile_maker.make(image, mask)
sys.stdout.write(f'\r{i + 1}/{len(img_list)}')
image_stats.append({'image_id': img_id, 'mean': image.mean(axis=(0, 1, 2)) / 255,
'mean_square': ((image / 255) ** 2).mean(axis=(0, 1, 2)),
'img_mean': (255 - image).mean()})
for i, (tile_image, tile_mask) in enumerate(zip(image, mask)):
a = (img_id + '_' + str(i) + '.png')
b = (img_id + '_' + str(i) + '.png')
files.append({'image_id': img_id, 'num': i, 'filename': a, 'maskname': b,
'value': (255-tile_image[:, :, 0]).mean()})
skimage.io.imsave(OUTPUT_IMG_PATH / a, tile_image, check_contrast=False)
skimage.io.imsave(OUTPUT_MASK_PATH / b, tile_mask, check_contrast=False)
image_stats = pd.DataFrame(image_stats)
df = pd.read_csv(CSV_PATH)
df = pd.merge(df, image_stats, on='image_id', how='left')
df[['image_id', 'img_mean']].to_csv(OUTPUT_BASE/f'img_mean_{SIZE}_{LEVEL}.csv', index=False)
provider_stats = {}
for provider in df['data_provider'].unique():
mean = (df[df['data_provider'] == provider]['mean']).mean(0)
std = np.sqrt((df[df['data_provider'] == provider]['mean_square']).mean(0) - mean ** 2)
provider_stats[provider] = (mean, std)
mean = (df['mean']).mean()
std = np.sqrt((df['mean_square']).mean() - mean ** 2)
provider_stats['all'] = (mean, std)
with open(PICKLE_NAME, 'wb') as file:
pickle.dump(provider_stats, file)
pd.DataFrame(files).to_csv(OUTPUT_BASE/f'files_{SIZE}_{LEVEL}.csv', index=False)
print(bad_images)
print(bad_masks)
print(provider_stats)
| true | true |
f721d6bf664d81374c648dd133a87502c3abb0e5 | 475 | py | Python | mooring/migrations/0040_admissionsbooking_expirytime.py | jawaidm/moorings | 22db3fa5917fb13cbee144e64529221ef862cb39 | [
"Apache-2.0"
] | null | null | null | mooring/migrations/0040_admissionsbooking_expirytime.py | jawaidm/moorings | 22db3fa5917fb13cbee144e64529221ef862cb39 | [
"Apache-2.0"
] | 2 | 2020-04-30T12:02:15.000Z | 2021-03-19T22:41:46.000Z | mooring/migrations/0040_admissionsbooking_expirytime.py | jawaidm/moorings | 22db3fa5917fb13cbee144e64529221ef862cb39 | [
"Apache-2.0"
] | 6 | 2020-01-13T08:45:09.000Z | 2021-02-24T03:31:02.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-08-21 06:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooring', '0039_admissionsbooking'),
]
operations = [
migrations.AddField(
model_name='admissionsbooking',
name='expiryTime',
field=models.DateTimeField(blank=True, null=True),
),
]
| 22.619048 | 62 | 0.629474 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooring', '0039_admissionsbooking'),
]
operations = [
migrations.AddField(
model_name='admissionsbooking',
name='expiryTime',
field=models.DateTimeField(blank=True, null=True),
),
]
| true | true |
f721d7d8b4c42f9662d809895e1a8f8424cec320 | 445 | py | Python | dexcom_reader/record_test.py | ijustlovemath/dexcom_reader | c7ee4cf0f5fa7f96f7186635513e7662fd845d16 | [
"MIT"
] | 1 | 2020-10-22T14:26:57.000Z | 2020-10-22T14:26:57.000Z | dexcom_reader/record_test.py | ijustlovemath/dexcom_reader | c7ee4cf0f5fa7f96f7186635513e7662fd845d16 | [
"MIT"
] | null | null | null | dexcom_reader/record_test.py | ijustlovemath/dexcom_reader | c7ee4cf0f5fa7f96f7186635513e7662fd845d16 | [
"MIT"
] | 1 | 2020-10-22T14:50:56.000Z | 2020-10-22T14:50:56.000Z | from future import print_function
import readdata
dd = readdata.Dexcom.FindDevice()
dr = readdata.Dexcom(dd)
meter_records = dr.ReadRecords('METER_DATA')
print('First Meter Record = ')
print(meter_records[0])
print('Last Meter Record =')
print(meter_records[-1])
insertion_records = dr.ReadRecords('INSERTION_TIME')
print('First Insertion Record = ')
print(insertion_records[0])
print('Last Insertion Record = ')
print(insertion_records[-1])
| 27.8125 | 52 | 0.768539 | from future import print_function
import readdata
dd = readdata.Dexcom.FindDevice()
dr = readdata.Dexcom(dd)
meter_records = dr.ReadRecords('METER_DATA')
print('First Meter Record = ')
print(meter_records[0])
print('Last Meter Record =')
print(meter_records[-1])
insertion_records = dr.ReadRecords('INSERTION_TIME')
print('First Insertion Record = ')
print(insertion_records[0])
print('Last Insertion Record = ')
print(insertion_records[-1])
| true | true |
f721d8be22e254b94549fbcd3026017e54e07cae | 444 | py | Python | microkg/releaser.py | goude/microkg | 2d4007bc2dcd6e240b6ba84991189ff66ff80969 | [
"MIT"
] | null | null | null | microkg/releaser.py | goude/microkg | 2d4007bc2dcd6e240b6ba84991189ff66ff80969 | [
"MIT"
] | 1 | 2021-06-02T00:43:28.000Z | 2021-06-02T00:43:28.000Z | microkg/releaser.py | goude/microkg | 2d4007bc2dcd6e240b6ba84991189ff66ff80969 | [
"MIT"
] | null | null | null | from pathlib import Path
def release(data: dict, outfile: Path) -> None:
with open(outfile, "w") as fh:
fls = [dd["flags"] for dd in data["data"]]
fh.write("|".join(fls))
def main() -> None:
from . import parser
infile = Path("sources/rout.txt")
outfile = Path("releases/latest/window.txt")
output = parser.parse(infile)
release(data=output, outfile=outfile)
if __name__ == "__main__":
main()
| 21.142857 | 50 | 0.617117 | from pathlib import Path
def release(data: dict, outfile: Path) -> None:
with open(outfile, "w") as fh:
fls = [dd["flags"] for dd in data["data"]]
fh.write("|".join(fls))
def main() -> None:
from . import parser
infile = Path("sources/rout.txt")
outfile = Path("releases/latest/window.txt")
output = parser.parse(infile)
release(data=output, outfile=outfile)
if __name__ == "__main__":
main()
| true | true |
f721d928941c5327cd287c478c3dc6357184dfb5 | 528 | py | Python | 278. First Bad Version.py | patrick-luo/Leet-Code | 989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a | [
"MIT"
] | null | null | null | 278. First Bad Version.py | patrick-luo/Leet-Code | 989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a | [
"MIT"
] | null | null | null | 278. First Bad Version.py | patrick-luo/Leet-Code | 989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a | [
"MIT"
] | null | null | null | # The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution(object):
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
low, high = 1, n
while True:
if isBadVersion(low):
return low
mid = low + (high-low)/2
isBad = isBadVersion(mid)
if isBad:
high = mid
else:
low = mid + 1
| 24 | 50 | 0.482955 |
class Solution(object):
def firstBadVersion(self, n):
low, high = 1, n
while True:
if isBadVersion(low):
return low
mid = low + (high-low)/2
isBad = isBadVersion(mid)
if isBad:
high = mid
else:
low = mid + 1
| true | true |
f721d9b1288c7cad5f15bfa3c14c94e43bf2cc77 | 3,624 | py | Python | benchmarks/f3_wrong_hints/scaling_software_termination/2-2Nested_false-termination_21.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints/scaling_software_termination/2-2Nested_false-termination_21.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints/scaling_software_termination/2-2Nested_false-termination_21.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
return frozenset(res)
| 28.761905 | 77 | 0.561258 | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
return frozenset(res)
| true | true |
f721db3583a7683724f3bd4358f1cd3bd9a389f6 | 236 | py | Python | Python/Ex029.py | renato-rt/Python | ba033094e1da5b55cf9ce4c8a5cf2cd90247db36 | [
"MIT"
] | null | null | null | Python/Ex029.py | renato-rt/Python | ba033094e1da5b55cf9ce4c8a5cf2cd90247db36 | [
"MIT"
] | null | null | null | Python/Ex029.py | renato-rt/Python | ba033094e1da5b55cf9ce4c8a5cf2cd90247db36 | [
"MIT"
] | 1 | 2021-11-30T17:34:33.000Z | 2021-11-30T17:34:33.000Z | v = int(input('\033[4;33;42mDigite a velocidade do carro: \033[m'))
lim = 80
m = 7
km = (v-lim)
if v > lim:
print('\033[1;30;41mVocê será multado em R${:.2f}.\033[m'.format(km*m))
else:
print('Parabéns você dirige com atenção!') | 29.5 | 75 | 0.631356 | v = int(input('\033[4;33;42mDigite a velocidade do carro: \033[m'))
lim = 80
m = 7
km = (v-lim)
if v > lim:
print('\033[1;30;41mVocê será multado em R${:.2f}.\033[m'.format(km*m))
else:
print('Parabéns você dirige com atenção!') | true | true |
f721db77d83f00f40283fd23d4e3b37bc36d2b31 | 3,524 | py | Python | kiddytimer/src/KTglob.py | TwolDE2/enigma2-plugins | 06685a5ce6a65a8724d3b32c8f7906714650ca2c | [
"OLDAP-2.3"
] | 30 | 2015-05-08T22:10:00.000Z | 2022-03-13T22:09:31.000Z | kiddytimer/src/KTglob.py | TwolDE2/enigma2-plugins | 06685a5ce6a65a8724d3b32c8f7906714650ca2c | [
"OLDAP-2.3"
] | 124 | 2015-04-27T21:30:48.000Z | 2022-03-29T10:21:39.000Z | kiddytimer/src/KTglob.py | TwolDE2/enigma2-plugins | 06685a5ce6a65a8724d3b32c8f7906714650ca2c | [
"OLDAP-2.3"
] | 193 | 2015-01-10T09:21:26.000Z | 2022-03-21T08:19:33.000Z | from __future__ import absolute_import
from .__init__ import _
from Components.config import config
import time
PLUGIN_BASE = "KiddyTimer"
PLUGIN_VERSION = "1.3"
DAYNAMES = (_("Sunday"),
_("Monday"),
_("Tuesday"),
_("Wednesday"),
_("Thursday"),
_("Friday"),
_("Saturday"),
)
ONEHOUR = 3600
ONEMINUTE = 60
MOVEPOSITIONSTEP = 10
#This is a hack to get the times in the current timezone to feed as default value for the ConfigClock
ONEOCLOCK = time.mktime([2000, 1, 1, 1, 0, 0, 5, 1, time.timezone])
FOUROCLOCK = time.mktime([2000, 1, 1, 4, 0, 0, 5, 1, time.timezone])
EIGHTOCLOCK = time.mktime([2000, 1, 1, 8, 0, 0, 5, 1, time.timezone])
EIGHTOCLOCKNOON = time.mktime([2000, 1, 1, 20, 0, 0, 5, 1, time.timezone])
plugin_path = ""
##############################################################################
SKIN = """
<screen flags="wfNoBorder" position="0,0" size="82,104" title="Kiddy Timer" backgroundColor="#ff000000">
<ePixmap pixmap="~/img/Smiley-Background.png" position="0,0" zPosition="1" size="82,82" alphatest="blend" transparent="1"/>
<widget name="TimerSlider" pixmap="~/img/Smiley-Slider.png" zPosition="4" position="0,0" size="82,82" transparent="1" orientation="orBottomToTop" />
<widget name="TimerSliderText" zPosition="5" position="0,83" size="82,21" font="Regular;18" halign="center" valign="center" foregroundColor="#000000" backgroundColor="#aaffffff" />
<widget name="TimerGraph" pixmaps="~/img/Timer1000.png,~/img/Timer0950.png,~/img/Timer0900.png,~/img/Timer0850.png,~/img/Timer0800.png,~/img/Timer0750.png,~/img/Timer0700.png,~/img/Timer0650.png,~/img/Timer0600.png,~/img/Timer0550.png,~/img/Timer0500.png,~/img/Timer0450.png,~/img/Timer0400.png,~/img/Timer0350.png,~/img/Timer0300.png,~/img/Timer0250.png,~/img/Timer0200.png,~/img/Timer0150.png,~/img/Timer0100.png,~/img/Timer0050.png,~/img/Timer0000.png" position="0,0" zPosition="2" size="82,82" transparent="1" alphatest="on" />
<widget name="TimerText" zPosition="3" position="0,30" size="82,21" font="Regular;18" halign="center" valign="center" foregroundColor="#000000" transparent = "1" />
<widget name="TimerTransparent" pixmap="~/img/Transparent.png" zPosition="2" position="0,0" size="82,82" transparent="1" alphatest="off" />
<widget name="TimerTransparentText" zPosition="3" position="0,30" size="82,21" font="Regular;18" halign="center" valign="center" foregroundColor="#000000" backgroundColor="#aaffffff" />
</screen>"""
##############################################################################
def getTodaysTimeInSeconds():
# Number of the current day
dayNr = int(time.strftime("%w", time.localtime()))
# Number of seconds for the current day
iDayTime = getSecondsFromClock(config.plugins.KiddyTimer.dayTimes[dayNr].timeValue.value)
return(iDayTime)
def getSecondsFromClock(aClock):
iSeconds = 60 * (int(aClock[0]) * 60 + int(aClock[1]))
return iSeconds
def getTimeFromSeconds(iSecondsLeft, bReturnSeconds):
iHours = int(iSecondsLeft // 3600)
iHourRest = iSecondsLeft - (iHours * 3600)
iMinutes = int(iHourRest // 60)
if bReturnSeconds == False:
return(("00" + str(iHours))[-2:] + ":" + ("00" + str(iMinutes))[-2:])
else:
iSeconds = int(iHourRest - (iMinutes * 60))
return(("00" + str(iHours))[-2:] + ":" + ("00" + str(iMinutes))[-2:] + ":" + ("00" + str(iSeconds))[-2:])
| 50.342857 | 539 | 0.628547 | from __future__ import absolute_import
from .__init__ import _
from Components.config import config
import time
PLUGIN_BASE = "KiddyTimer"
PLUGIN_VERSION = "1.3"
DAYNAMES = (_("Sunday"),
_("Monday"),
_("Tuesday"),
_("Wednesday"),
_("Thursday"),
_("Friday"),
_("Saturday"),
)
ONEHOUR = 3600
ONEMINUTE = 60
MOVEPOSITIONSTEP = 10
ONEOCLOCK = time.mktime([2000, 1, 1, 1, 0, 0, 5, 1, time.timezone])
FOUROCLOCK = time.mktime([2000, 1, 1, 4, 0, 0, 5, 1, time.timezone])
EIGHTOCLOCK = time.mktime([2000, 1, 1, 8, 0, 0, 5, 1, time.timezone])
EIGHTOCLOCKNOON = time.mktime([2000, 1, 1, 20, 0, 0, 5, 1, time.timezone])
plugin_path = ""
| true | true |
f721dc0f7b2a6690beba2a884bde66614f03a8de | 83,659 | py | Python | Tax-Calculator-3.0.0/taxcalc/calcfunctions.py | grantseiter/Biden-Tax-Proposals | c215ff845264f3fce9281c7fbb343ed10758a4b6 | [
"MIT"
] | null | null | null | Tax-Calculator-3.0.0/taxcalc/calcfunctions.py | grantseiter/Biden-Tax-Proposals | c215ff845264f3fce9281c7fbb343ed10758a4b6 | [
"MIT"
] | null | null | null | Tax-Calculator-3.0.0/taxcalc/calcfunctions.py | grantseiter/Biden-Tax-Proposals | c215ff845264f3fce9281c7fbb343ed10758a4b6 | [
"MIT"
] | null | null | null | """
Tax-Calculator functions that calculate payroll and individual income taxes.
These functions are imported into the Calculator class.
Note: the parameter_indexing_CPI_offset policy parameter is the only
policy parameter that does not appear here; it is used in the policy.py
file to possibly adjust the price inflation rate used to index policy
parameters (as would be done in a reform that introduces chained-CPI
indexing).
"""
# CODING-STYLE CHECKS:
# pycodestyle calcfunctions.py
# pylint --disable=locally-disabled calcfunctions.py
#
# pylint: disable=too-many-lines
# pylint: disable=invalid-name
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
import math
import copy
import numpy as np
from taxcalc.decorators import iterate_jit, JIT
def BenefitPrograms(calc):
"""
Calculate total government cost and consumption value of benefits
delivered by non-repealed benefit programs.
"""
# zero out benefits delivered by repealed programs
zero = np.zeros(calc.array_len)
if calc.policy_param('BEN_housing_repeal'):
calc.array('housing_ben', zero)
if calc.policy_param('BEN_ssi_repeal'):
calc.array('ssi_ben', zero)
if calc.policy_param('BEN_snap_repeal'):
calc.array('snap_ben', zero)
if calc.policy_param('BEN_tanf_repeal'):
calc.array('tanf_ben', zero)
if calc.policy_param('BEN_vet_repeal'):
calc.array('vet_ben', zero)
if calc.policy_param('BEN_wic_repeal'):
calc.array('wic_ben', zero)
if calc.policy_param('BEN_mcare_repeal'):
calc.array('mcare_ben', zero)
if calc.policy_param('BEN_mcaid_repeal'):
calc.array('mcaid_ben', zero)
if calc.policy_param('BEN_oasdi_repeal'):
calc.array('e02400', zero)
if calc.policy_param('BEN_ui_repeal'):
calc.array('e02300', zero)
if calc.policy_param('BEN_other_repeal'):
calc.array('other_ben', zero)
# calculate government cost of all benefits
cost = np.array(
calc.array('housing_ben') +
calc.array('ssi_ben') +
calc.array('snap_ben') +
calc.array('tanf_ben') +
calc.array('vet_ben') +
calc.array('wic_ben') +
calc.array('mcare_ben') +
calc.array('mcaid_ben') +
calc.array('e02400') +
calc.array('e02300') +
calc.array('ubi') +
calc.array('other_ben')
)
calc.array('benefit_cost_total', cost)
# calculate consumption value of all benefits
# (assuming that cash benefits have full value)
value = np.array(
calc.array('housing_ben') * calc.consump_param('BEN_housing_value') +
calc.array('ssi_ben') +
calc.array('snap_ben') * calc.consump_param('BEN_snap_value') +
calc.array('tanf_ben') * calc.consump_param('BEN_tanf_value') +
calc.array('vet_ben') * calc.consump_param('BEN_vet_value') +
calc.array('wic_ben') * calc.consump_param('BEN_wic_value') +
calc.array('mcare_ben') * calc.consump_param('BEN_mcare_value') +
calc.array('mcaid_ben') * calc.consump_param('BEN_mcaid_value') +
calc.array('e02400') +
calc.array('e02300') +
calc.array('ubi') +
calc.array('other_ben') * calc.consump_param('BEN_other_value')
)
calc.array('benefit_value_total', value)
@iterate_jit(nopython=True)
def EI_PayrollTax(SS_Earnings_c, e00200p, e00200s, pencon_p, pencon_s,
FICA_ss_trt, FICA_mc_trt, ALD_SelfEmploymentTax_hc,
SS_Earnings_thd, e00900p, e00900s, e02100p, e02100s, k1bx14p,
k1bx14s, payrolltax, ptax_was, setax, c03260, ptax_oasdi,
sey, earned, earned_p, earned_s,
was_plus_sey_p, was_plus_sey_s):
"""
Compute part of total OASDI+HI payroll taxes and earned income variables.
"""
# compute sey and its individual components
sey_p = e00900p + e02100p + k1bx14p
sey_s = e00900s + e02100s + k1bx14s
sey = sey_p + sey_s # total self-employment income for filing unit
# compute gross wage and salary income ('was' denotes 'wage and salary')
gross_was_p = e00200p + pencon_p
gross_was_s = e00200s + pencon_s
# compute taxable gross earnings for OASDI FICA
txearn_was_p = min(SS_Earnings_c, gross_was_p)
txearn_was_s = min(SS_Earnings_c, gross_was_s)
# compute OASDI and HI payroll taxes on wage-and-salary income, FICA
ptax_ss_was_p = FICA_ss_trt * txearn_was_p
ptax_ss_was_s = FICA_ss_trt * txearn_was_s
ptax_mc_was_p = FICA_mc_trt * gross_was_p
ptax_mc_was_s = FICA_mc_trt * gross_was_s
ptax_was = ptax_ss_was_p + ptax_ss_was_s + ptax_mc_was_p + ptax_mc_was_s
# compute taxable self-employment income for OASDI SECA
sey_frac = 1.0 - 0.5 * (FICA_ss_trt + FICA_mc_trt)
txearn_sey_p = min(max(0., sey_p * sey_frac), SS_Earnings_c - txearn_was_p)
txearn_sey_s = min(max(0., sey_s * sey_frac), SS_Earnings_c - txearn_was_s)
# compute self-employment tax on taxable self-employment income, SECA
setax_ss_p = FICA_ss_trt * txearn_sey_p
setax_ss_s = FICA_ss_trt * txearn_sey_s
setax_mc_p = FICA_mc_trt * max(0., sey_p * sey_frac)
setax_mc_s = FICA_mc_trt * max(0., sey_s * sey_frac)
setax_p = setax_ss_p + setax_mc_p
setax_s = setax_ss_s + setax_mc_s
setax = setax_p + setax_s
# compute extra OASDI payroll taxes on the portion of the sum
# of wage-and-salary income and taxable self employment income
# that exceeds SS_Earnings_thd
sey_frac = 1.0 - 0.5 * FICA_ss_trt
was_plus_sey_p = gross_was_p + max(0., sey_p * sey_frac)
was_plus_sey_s = gross_was_s + max(0., sey_s * sey_frac)
extra_ss_income_p = max(0., was_plus_sey_p - SS_Earnings_thd)
extra_ss_income_s = max(0., was_plus_sey_s - SS_Earnings_thd)
extra_payrolltax = (extra_ss_income_p * FICA_ss_trt +
extra_ss_income_s * FICA_ss_trt)
# compute part of total payroll taxes for filing unit
# (the ptax_amc part of total payroll taxes for the filing unit is
# computed in the AdditionalMedicareTax function below)
payrolltax = ptax_was + setax + extra_payrolltax
# compute OASDI part of payroll taxes
ptax_oasdi = (ptax_ss_was_p + ptax_ss_was_s +
setax_ss_p + setax_ss_s +
extra_payrolltax)
# compute earned* variables and AGI deduction for
# "employer share" of self-employment tax, c03260
# Note: c03260 is the amount on 2015 Form 1040, line 27
c03260 = (1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax
earned = max(0., e00200p + e00200s + sey - c03260)
earned_p = max(0., (e00200p + sey_p -
(1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax_p))
earned_s = max(0., (e00200s + sey_s -
(1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax_s))
return (sey, payrolltax, ptax_was, setax, c03260, ptax_oasdi,
earned, earned_p, earned_s, was_plus_sey_p, was_plus_sey_s)
@iterate_jit(nopython=True)
def DependentCare(nu13, elderly_dependents, earned,
MARS, ALD_Dependents_thd, ALD_Dependents_hc,
ALD_Dependents_Child_c, ALD_Dependents_Elder_c,
care_deduction):
"""
Computes dependent-care above-the-line deduction.
Parameters
----------
nu13: Number of dependents under 13 years old
elderly_dependents: number of elderly dependents
earned: Form 2441 earned income amount
MARS: Marital Status
ALD_Dependents_thd: Maximum income to qualify for deduction
ALD_Dependents_hc: Deduction for dependent care haircut
ALD_Dependents_Child_c: National weighted average cost of childcare
ALD_Dependents_Elder_c: Eldercare deduction ceiling
Returns
-------
care_deduction: Total above the line deductions for dependent care.
"""
if earned <= ALD_Dependents_thd[MARS - 1]:
care_deduction = (((1. - ALD_Dependents_hc) * nu13 *
ALD_Dependents_Child_c) +
((1. - ALD_Dependents_hc) * elderly_dependents *
ALD_Dependents_Elder_c))
else:
care_deduction = 0.
return care_deduction
@iterate_jit(nopython=True)
def Adj(e03150, e03210, c03260,
e03270, e03300, e03400, e03500, e00800,
e03220, e03230, e03240, e03290, care_deduction,
ALD_StudentLoan_hc, ALD_SelfEmp_HealthIns_hc, ALD_KEOGH_SEP_hc,
ALD_EarlyWithdraw_hc, ALD_AlimonyPaid_hc, ALD_AlimonyReceived_hc,
ALD_EducatorExpenses_hc, ALD_HSADeduction_hc, ALD_IRAContributions_hc,
ALD_DomesticProduction_hc, ALD_Tuition_hc,
c02900):
"""
Adj calculates Form 1040 AGI adjustments (i.e., Above-the-Line Deductions).
Notes
-----
Taxpayer characteristics:
e03210 : Student loan interest paid
e03220 : Educator expenses
e03150 : Total deductible IRA plan contributions
e03230 : Tuition and fees (Form 8917)
e03240 : Domestic production activity deduction (Form 8903)
c03260 : Self-employment tax deduction (after haircut)
e03270 : Self-employed health insurance premiums
e03290 : HSA deduction (Form 8889)
e03300 : Total deductible KEOGH/SEP/SIMPLE/etc. plan contributions
e03400 : Penalty on early withdrawal of savings deduction
e03500 : Alimony paid
e00800 : Alimony received
care_deduction : Dependent care expense deduction
Tax law parameters:
ALD_StudentLoan_hc : Student loan interest deduction haircut
ALD_SelfEmp_HealthIns_hc : Self-employed h.i. deduction haircut
ALD_KEOGH_SEP_hc : KEOGH/etc. plan contribution deduction haircut
ALD_EarlyWithdraw_hc : Penalty on early withdrawal deduction haricut
ALD_AlimonyPaid_hc : Alimony paid deduction haircut
ALD_AlimonyReceived_hc : Alimony received deduction haircut
ALD_EducatorExpenses_hc: Eductor expenses haircut
ALD_HSADeduction_hc: HSA Deduction haircut
ALD_IRAContributions_hc: IRA Contribution haircut
ALD_DomesticProduction_hc: Domestic production haircut
ALD_Tuition_hc: Tuition and fees haircut
Returns
-------
c02900 : total Form 1040 adjustments, which are not included in AGI
"""
# Form 2555 foreign earned income exclusion is assumed to be zero
# Form 1040 adjustments that are included in expanded income:
c02900 = ((1. - ALD_StudentLoan_hc) * e03210 +
c03260 +
(1. - ALD_EarlyWithdraw_hc) * e03400 +
(1. - ALD_AlimonyPaid_hc) * e03500 +
(1. - ALD_AlimonyReceived_hc) * e00800 +
(1. - ALD_EducatorExpenses_hc) * e03220 +
(1. - ALD_Tuition_hc) * e03230 +
(1. - ALD_DomesticProduction_hc) * e03240 +
(1. - ALD_HSADeduction_hc) * e03290 +
(1. - ALD_SelfEmp_HealthIns_hc) * e03270 +
(1. - ALD_IRAContributions_hc) * e03150 +
(1. - ALD_KEOGH_SEP_hc) * e03300 +
care_deduction)
return c02900
@iterate_jit(nopython=True)
def ALD_InvInc_ec_base(p22250, p23250, sep,
e00300, e00600, e01100, e01200,
invinc_ec_base):
"""
Computes invinc_ec_base.
"""
# limitation on net short-term and long-term capital losses
cgain = max((-3000. / sep), p22250 + p23250)
# compute exclusion of investment income from AGI
invinc_ec_base = e00300 + e00600 + cgain + e01100 + e01200
return invinc_ec_base
@iterate_jit(nopython=True)
def CapGains(p23250, p22250, sep, ALD_StudentLoan_hc,
ALD_InvInc_ec_rt, invinc_ec_base,
e00200, e00300, e00600, e00650, e00700, e00800,
CG_nodiff, CG_ec, CG_reinvest_ec_rt,
ALD_BusinessLosses_c, MARS,
e00900, e01100, e01200, e01400, e01700, e02000, e02100,
e02300, e00400, e02400, c02900, e03210, e03230, e03240,
c01000, c23650, ymod, ymod1, invinc_agi_ec,
gains_at_death, CG_death, CG_death_ec):
"""
CapGains function: ...
"""
# compute taxable portion of capital gains at death (gains_at_death - CG_death_ec)
if CG_death is True:
taxable_gains_at_death = max(0., gains_at_death - CG_death_ec[MARS-1])
else:
taxable_gains_at_death = 0.
# net capital gain (long term + short term + gains at death) before exclusion
c23650 = p23250 + p22250 + taxable_gains_at_death
# limitation on capital losses
c01000 = max((-3000. / sep), c23650)
# compute total investment income
invinc = e00300 + e00600 + c01000 + e01100 + e01200
# compute exclusion of investment income from AGI
invinc_agi_ec = ALD_InvInc_ec_rt * max(0., invinc_ec_base)
# compute ymod1 variable that is included in AGI
ymod1 = (e00200 + e00700 + e00800 + e01400 + e01700 +
invinc - invinc_agi_ec + e02100 + e02300 +
max(e00900 + e02000, -ALD_BusinessLosses_c[MARS - 1]))
if CG_nodiff:
# apply QDIV+CG exclusion if QDIV+LTCG receive no special tax treatment
qdcg_pos = max(0., e00650 + c01000)
qdcg_exclusion = (min(CG_ec, qdcg_pos) +
CG_reinvest_ec_rt * max(0., qdcg_pos - CG_ec))
ymod1 = max(0., ymod1 - qdcg_exclusion)
invinc_agi_ec += qdcg_exclusion
# compute ymod variable that is used in OASDI benefit taxation logic
ymod2 = e00400 + (0.50 * e02400) - c02900
ymod3 = (1. - ALD_StudentLoan_hc) * e03210 + e03230 + e03240
ymod = ymod1 + ymod2 + ymod3
return (c01000, c23650, ymod, ymod1, invinc_agi_ec,
gains_at_death, taxable_gains_at_death)
@iterate_jit(nopython=True)
def SSBenefits(MARS, ymod, e02400, SS_thd50, SS_thd85,
SS_percentage1, SS_percentage2, c02500):
"""
Calculates OASDI benefits included in AGI, c02500.
"""
if ymod < SS_thd50[MARS - 1]:
c02500 = 0.
elif ymod < SS_thd85[MARS - 1]:
c02500 = SS_percentage1 * min(ymod - SS_thd50[MARS - 1], e02400)
else:
c02500 = min(SS_percentage2 * (ymod - SS_thd85[MARS - 1]) +
SS_percentage1 *
min(e02400, SS_thd85[MARS - 1] -
SS_thd50[MARS - 1]), SS_percentage2 * e02400)
return c02500
@iterate_jit(nopython=True)
def UBI(nu18, n1820, n21, UBI_u18, UBI_1820, UBI_21, UBI_ecrt,
ubi, taxable_ubi, nontaxable_ubi):
"""
Calculates total and taxable Universal Basic Income (UBI) amount.
Parameters
----------
nu18: Number of people in the tax unit under 18
n1820: Number of people in the tax unit age 18-20
n21: Number of people in the tax unit age 21+
UBI_u18: UBI benefit for those under 18
UBI_1820: UBI benefit for those between 18 to 20
UBI_21: UBI benefit for those 21 or more
UBI_ecrt: Fraction of UBI benefits that are not included in AGI
Returns
-------
ubi: total UBI received by the tax unit (is included in expanded_income)
taxable_ubi: amount of UBI that is taxable (is added to AGI)
nontaxable_ubi: amount of UBI that is nontaxable
"""
ubi = nu18 * UBI_u18 + n1820 * UBI_1820 + n21 * UBI_21
taxable_ubi = ubi * (1. - UBI_ecrt)
nontaxable_ubi = ubi - taxable_ubi
return ubi, taxable_ubi, nontaxable_ubi
@iterate_jit(nopython=True)
def AGI(ymod1, c02500, c02900, XTOT, MARS, sep, DSI, exact, nu18, taxable_ubi,
II_em, II_em_ps, II_prt, II_no_em_nu18,
c00100, pre_c04600, c04600):
"""
Computes Adjusted Gross Income (AGI), c00100, and
compute personal exemption amount, c04600.
"""
# calculate AGI assuming no foreign earned income exclusion
c00100 = ymod1 + c02500 - c02900 + taxable_ubi
# calculate personal exemption amount
if II_no_em_nu18: # repeal of personal exemptions for deps. under 18
pre_c04600 = max(0, XTOT - nu18) * II_em
else:
pre_c04600 = XTOT * II_em
if DSI:
pre_c04600 = 0.
# phase-out personal exemption amount
if exact == 1: # exact calculation as on tax forms
line5 = max(0., c00100 - II_em_ps[MARS - 1])
line6 = math.ceil(line5 / (2500. / sep))
line7 = II_prt * line6
c04600 = max(0., pre_c04600 * (1. - line7))
else: # smoothed calculation needed for sensible mtr calculation
dispc_numer = II_prt * (c00100 - II_em_ps[MARS - 1])
dispc_denom = 2500. / sep
dispc = min(1., max(0., dispc_numer / dispc_denom))
c04600 = pre_c04600 * (1. - dispc)
return (c00100, pre_c04600, c04600)
@iterate_jit(nopython=True)
def ItemDedCap(e17500, e18400, e18500, e19200, e19800, e20100, e20400, g20500,
c00100, ID_AmountCap_rt, ID_AmountCap_Switch, e17500_capped,
e18400_capped, e18500_capped, e19200_capped, e19800_capped,
e20100_capped, e20400_capped, g20500_capped):
"""
Applies a cap to gross itemized deductions.
Notes
-----
Tax Law Parameters:
ID_AmountCap_Switch : Indicator for which itemized deductions are
capped
ID_AmountCap_rt : Cap on itemized deductions; decimal fraction of AGI
Taxpayer Characteristics:
e17500 : Medical expenses
e18400 : State and local taxes
e18500 : Real-estate taxes
e19200 : Interest paid
e19800 : Charity cash contributions
e20100 : Charity noncash contributions
e20400 : Total miscellaneous expenses
g20500 : Gross casualty or theft loss (before disregard)
c00100: Adjusted Gross Income
Returns
-------
e17500_capped: Medical expenses, capped by ItemDedCap
e18400_capped: State and local taxes, capped by ItemDedCap
e18500_capped : Real-estate taxes, capped by ItemDedCap
e19200_capped : Interest paid, capped by ItemDedCap
e19800_capped : Charity cash contributions, capped by ItemDedCap
e20100_capped : Charity noncash contributions, capped by ItemDedCap
e20400_capped : Total miscellaneous expenses, capped by ItemDedCap
g20500_capped : Gross casualty or theft loss (before disregard),
capped by ItemDedCap
"""
# pylint: disable=too-many-branches
cap = max(0., ID_AmountCap_rt * c00100)
gross_ded_amt = 0
if ID_AmountCap_Switch[0]: # medical
gross_ded_amt += e17500
if ID_AmountCap_Switch[1]: # statelocal
gross_ded_amt += e18400
if ID_AmountCap_Switch[2]: # realestate
gross_ded_amt += e18500
if ID_AmountCap_Switch[3]: # casualty
gross_ded_amt += g20500
if ID_AmountCap_Switch[4]: # misc
gross_ded_amt += e20400
if ID_AmountCap_Switch[5]: # interest
gross_ded_amt += e19200
if ID_AmountCap_Switch[6]: # charity
gross_ded_amt += e19800 + e20100
overage = max(0., gross_ded_amt - cap)
e17500_capped = e17500
e18400_capped = e18400
e18500_capped = e18500
g20500_capped = g20500
e20400_capped = e20400
e19200_capped = e19200
e19800_capped = e19800
e20100_capped = e20100
if overage > 0. and c00100 > 0.:
if ID_AmountCap_Switch[0]: # medical
e17500_capped -= (e17500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[1]: # statelocal
e18400_capped -= (e18400 / (gross_ded_amt) * overage)
if ID_AmountCap_Switch[2]: # realestate
e18500_capped -= (e18500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[3]: # casualty
g20500_capped -= (g20500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[4]: # misc
e20400_capped -= (e20400 / gross_ded_amt) * overage
if ID_AmountCap_Switch[5]: # interest
e19200_capped -= (e19200 / gross_ded_amt) * overage
if ID_AmountCap_Switch[6]: # charity
e19800_capped -= (e19800 / gross_ded_amt) * overage
e20100_capped -= (e20100 / gross_ded_amt) * overage
return (e17500_capped, e18400_capped, e18500_capped, g20500_capped,
e20400_capped, e19200_capped, e19800_capped, e20100_capped)
@iterate_jit(nopython=True)
def ItemDed(e17500_capped, e18400_capped, e18500_capped, e19200_capped,
e19800_capped, e20100_capped, e20400_capped, g20500_capped,
MARS, age_head, age_spouse, c00100, c04470, c21040, c21060,
c17000, c18300, c19200, c19700, c20500, c20800,
ID_ps, ID_Medical_frt, ID_Medical_frt_add4aged, ID_Medical_hc,
ID_Casualty_frt, ID_Casualty_hc, ID_Miscellaneous_frt,
ID_Miscellaneous_hc, ID_Charity_crt_all, ID_Charity_crt_noncash,
ID_prt, ID_crt, ID_c, ID_StateLocalTax_hc, ID_Charity_frt,
ID_Charity_hc, ID_InterestPaid_hc, ID_RealEstate_hc,
ID_Medical_c, ID_StateLocalTax_c, ID_RealEstate_c,
ID_InterestPaid_c, ID_Charity_c, ID_Casualty_c,
ID_Miscellaneous_c, ID_AllTaxes_c, ID_AllTaxes_hc,
ID_StateLocalTax_crt, ID_RealEstate_crt, ID_Charity_f):
"""
Calculates itemized deductions, Form 1040, Schedule A.
Notes
-----
Tax Law Parameters:
ID_ps : Itemized deduction phaseout AGI start (Pease)
ID_crt : Itemized deduction maximum phaseout
as a decimal fraction of total itemized deduction (Pease)
ID_prt : Itemized deduction phaseout rate (Pease)
ID_c: Dollar limit on itemized deductions
ID_Medical_frt : Deduction for medical expenses;
floor as a decimal fraction of AGI
ID_Medical_frt_add4aged : Addon for medical expenses deduction for
elderly; addon as a decimal fraction of AGI
ID_Casualty_frt : Deduction for casualty loss;
floor as a decimal fraction of AGI
ID_Miscellaneous_frt : Deduction for miscellaneous expenses;
floor as a decimal fraction of AGI
ID_Charity_crt_all : Deduction for all charitable contributions;
ceiling as a decimal fraction of AGI
ID_Charity_crt_noncash : Deduction for noncash charitable
contributions; ceiling as a decimal
fraction of AGI
ID_Charity_frt : Disregard for charitable contributions;
floor as a decimal fraction of AGI
ID_Medical_c : Ceiling on medical expense deduction
ID_StateLocalTax_c : Ceiling on state and local tax deduction
ID_RealEstate_c : Ceiling on real estate tax deduction
ID_AllTaxes_c: Ceiling combined state and local income/sales and
real estate tax deductions
ID_InterestPaid_c : Ceiling on interest paid deduction
ID_Charity_c : Ceiling on charity expense deduction
ID_Charity_f: Floor on charity expense deduction
ID_Casualty_c : Ceiling on casuality expense deduction
ID_Miscellaneous_c : Ceiling on miscellaneous expense deduction
ID_StateLocalTax_crt : Deduction for state and local taxes;
ceiling as a decimal fraction of AGI
ID_RealEstate_crt : Deduction for real estate taxes;
ceiling as a decimal fraction of AGI
Taxpayer Characteristics:
e17500_capped : Medical expenses, capped by ItemDedCap
e18400_capped : State and local taxes, capped by ItemDedCap
e18500_capped : Real-estate taxes, capped by ItemDedCap
e19200_capped : Interest paid, capped by ItemDedCap
e19800_capped : Charity cash contributions, capped by ItemDedCap
e20100_capped : Charity noncash contributions, capped by ItemDedCap
e20400_capped : Total miscellaneous expenses, capped by ItemDedCap
g20500_capped : Gross casualty or theft loss (before disregard),
capped by ItemDedCap
Returns
-------
c04470 : total itemized deduction amount (and other intermediate variables)
"""
posagi = max(c00100, 0.)
# Medical
medical_frt = ID_Medical_frt
if age_head >= 65 or (MARS == 2 and age_spouse >= 65):
medical_frt += ID_Medical_frt_add4aged
c17750 = medical_frt * posagi
c17000 = max(0., e17500_capped - c17750) * (1. - ID_Medical_hc)
c17000 = min(c17000, ID_Medical_c[MARS - 1])
# State and local taxes
c18400 = min((1. - ID_StateLocalTax_hc) * max(e18400_capped, 0.),
ID_StateLocalTax_c[MARS - 1])
c18500 = min((1. - ID_RealEstate_hc) * e18500_capped,
ID_RealEstate_c[MARS - 1])
# following two statements implement a cap on c18400 and c18500 in a way
# that those with negative AGI, c00100, are not capped under current law,
# hence the 0.0001 rather than zero
c18400 = min(c18400, ID_StateLocalTax_crt * max(c00100, 0.0001))
c18500 = min(c18500, ID_RealEstate_crt * max(c00100, 0.0001))
c18300 = (c18400 + c18500) * (1. - ID_AllTaxes_hc)
c18300 = min(c18300, ID_AllTaxes_c[MARS - 1])
# Interest paid
c19200 = e19200_capped * (1. - ID_InterestPaid_hc)
c19200 = min(c19200, ID_InterestPaid_c[MARS - 1])
# Charity
lim30 = min(ID_Charity_crt_noncash * posagi, e20100_capped)
c19700 = min(ID_Charity_crt_all * posagi, lim30 + e19800_capped)
# charity floor is zero in present law
charity_floor = max(ID_Charity_frt * posagi, ID_Charity_f[MARS - 1])
c19700 = max(0., c19700 - charity_floor) * (1. - ID_Charity_hc)
c19700 = min(c19700, ID_Charity_c[MARS - 1])
# Casualty
c20500 = (max(0., g20500_capped - ID_Casualty_frt * posagi) *
(1. - ID_Casualty_hc))
c20500 = min(c20500, ID_Casualty_c[MARS - 1])
# Miscellaneous
c20400 = e20400_capped
c20750 = ID_Miscellaneous_frt * posagi
c20800 = max(0., c20400 - c20750) * (1. - ID_Miscellaneous_hc)
c20800 = min(c20800, ID_Miscellaneous_c[MARS - 1])
# Gross total itemized deductions
c21060 = c17000 + c18300 + c19200 + c19700 + c20500 + c20800
# Limitations on total itemized deductions
# (no attempt to adjust c04470 components for limitations)
nonlimited = c17000 + c20500
limitstart = ID_ps[MARS - 1]
if c21060 > nonlimited and c00100 > limitstart:
dedmin = ID_crt * (c21060 - nonlimited)
dedpho = ID_prt * max(0., posagi - limitstart)
c21040 = min(dedmin, dedpho)
c04470 = c21060 - c21040
else:
c21040 = 0.
c04470 = c21060
c04470 = min(c04470, ID_c[MARS - 1])
# Return total itemized deduction amounts and components
return (c17000, c18300, c19200, c19700, c20500, c20800,
c21040, c21060, c04470)
@iterate_jit(nopython=True)
def AdditionalMedicareTax(e00200, MARS,
AMEDT_ec, sey, AMEDT_rt,
FICA_mc_trt, FICA_ss_trt,
ptax_amc, payrolltax):
"""
Computes Additional Medicare Tax (Form 8959) included in payroll taxes.
Notes
-----
Tax Law Parameters:
AMEDT_ec : Additional Medicare Tax earnings exclusion
AMEDT_rt : Additional Medicare Tax rate
FICA_ss_trt : FICA Social Security tax rate
FICA_mc_trt : FICA Medicare tax rate
Taxpayer Charateristics:
e00200 : Wages and salaries
sey : Self-employment income
Returns
-------
ptax_amc : Additional Medicare Tax
payrolltax : payroll tax augmented by Additional Medicare Tax
"""
line8 = max(0., sey) * (1. - 0.5 * (FICA_mc_trt + FICA_ss_trt))
line11 = max(0., AMEDT_ec[MARS - 1] - e00200)
ptax_amc = AMEDT_rt * (max(0., e00200 - AMEDT_ec[MARS - 1]) +
max(0., line8 - line11))
payrolltax += ptax_amc
return (ptax_amc, payrolltax)
@iterate_jit(nopython=True)
def StdDed(DSI, earned, STD, age_head, age_spouse, STD_Aged, STD_Dep,
MARS, MIDR, blind_head, blind_spouse, standard, c19700,
STD_allow_charity_ded_nonitemizers):
"""
Calculates standard deduction, including standard deduction for
dependents, aged and bind.
Notes
-----
Tax Law Parameters:
STD : Standard deduction amount, filing status dependent
STD_Dep : Standard deduction for dependents
STD_Aged : Additional standard deduction for blind and aged
Taxpayer Characteristics:
earned : Form 2441 earned income amount
e02400 : Gross Social Security Benefit
DSI : Dependent Status Indicator:
0 - not being claimed as a dependent
1 - claimed as a dependent
MIDR : Married filing separately itemized deductions
requirement indicator:
0 - not necessary to itemize because of filing status
1 - necessary to itemize when filing separately
Returns
-------
standard : the standard deduction amount for filing unit
"""
# calculate deduction for dependents
if DSI == 1:
c15100 = max(350. + earned, STD_Dep)
basic_stded = min(STD[MARS - 1], c15100)
else:
c15100 = 0.
if MIDR == 1:
basic_stded = 0.
else:
basic_stded = STD[MARS - 1]
# calculate extra standard deduction for aged and blind
num_extra_stded = blind_head + blind_spouse
if age_head >= 65:
num_extra_stded += 1
if MARS == 2 and age_spouse >= 65:
num_extra_stded += 1
extra_stded = num_extra_stded * STD_Aged[MARS - 1]
# calculate the total standard deduction
standard = basic_stded + extra_stded
if MARS == 3 and MIDR == 1:
standard = 0.
if STD_allow_charity_ded_nonitemizers:
standard += c19700
return standard
@iterate_jit(nopython=True)
def TaxInc(c00100, standard, c04470, c04600, MARS, e00900, e26270,
e02100, e27200, e00650, c01000,
PT_SSTB_income, PT_binc_w2_wages, PT_ubia_property,
PT_qbid_rt, PT_qbid_taxinc_thd, PT_qbid_taxinc_gap,
PT_qbid_w2_wages_rt,
PT_qbid_alt_w2_wages_rt, PT_qbid_alt_property_rt,
c04800, qbided, StudentLoan_em, studloan_debt, sldf):
"""
Calculates taxable income, c04800, and
qualified business income deduction, qbided.
"""
# calculate taxable income before qualified business income deduction
pre_qbid_taxinc = max(0., c00100 - max(c04470, standard) - c04600)
# calculate qualified business income deduction
qbided = 0.
qbinc = max(0., e00900 + e26270 + e02100 + e27200)
qbided_full = qbinc * PT_qbid_rt
if PT_qbid_taxinc_thd[MARS-1] > 0:
if pre_qbid_taxinc < PT_qbid_taxinc_thd[MARS-1]:
qbided = qbided_full
else:
qbided = max(0., qbided_full * (1 - (pre_qbid_taxinc - PT_qbid_taxinc_thd[MARS-1])/ PT_qbid_taxinc_gap[MARS-1]))
else:
qbided = qbided_full
"""
if qbinc > 0. and PT_qbid_rt > 0.:
qbid_before_limits = qbinc * PT_qbid_rt
lower_thd = PT_qbid_taxinc_thd[MARS - 1]
if pre_qbid_taxinc <= lower_thd:
qbided = qbid_before_limits
else:
pre_qbid_taxinc_gap = PT_qbid_taxinc_gap[MARS - 1]
upper_thd = lower_thd + pre_qbid_taxinc_gap
if PT_SSTB_income == 1 and pre_qbid_taxinc >= upper_thd:
qbided = 0.
else:
wage_cap = PT_binc_w2_wages * PT_qbid_w2_wages_rt
alt_cap = (PT_binc_w2_wages * PT_qbid_alt_w2_wages_rt +
PT_ubia_property * PT_qbid_alt_property_rt)
full_cap = max(wage_cap, alt_cap)
if PT_SSTB_income == 0 and pre_qbid_taxinc >= upper_thd:
# apply full cap
qbided = min(full_cap, qbid_before_limits)
elif PT_SSTB_income == 0 and pre_qbid_taxinc < upper_thd:
# apply adjusted cap as in Part III of Worksheet 12-A
# in 2018 IRS Publication 535 (Chapter 12)
prt = (pre_qbid_taxinc - lower_thd) / pre_qbid_taxinc_gap
adj = prt * (qbid_before_limits - full_cap)
qbided = qbid_before_limits - adj
else: # PT_SSTB_income == 1 and pre_qbid_taxinc < upper_thd
prti = (upper_thd - pre_qbid_taxinc) / pre_qbid_taxinc_gap
qbid_adjusted = prti * qbid_before_limits
cap_adjusted = prti * full_cap
prt = (pre_qbid_taxinc - lower_thd) / pre_qbid_taxinc_gap
adj = prt * (qbid_adjusted - cap_adjusted)
qbided = qbid_adjusted - adj
"""
# apply taxinc cap (assuning cap rate is equal to PT_qbid_rt)
net_cg = e00650 + c01000 # per line 34 in 2018 Pub 535 Worksheet 12-A
taxinc_cap = PT_qbid_rt * max(0., pre_qbid_taxinc - net_cg)
qbided = min(qbided, taxinc_cap)
# exclude forgiven student loan debt from taxable income
if StudentLoan_em is True:
base_sldf = max(0., studloan_debt)
else:
base_sldf = 0.
# exclusion is limited to tax inc
sldf = max(0., min(pre_qbid_taxinc - qbided, base_sldf))
# calculate taxable income after qualified business income deduction
c04800 = max(0., pre_qbid_taxinc - qbided - sldf)
return (c04800, qbided, sldf)
@JIT(nopython=True)
def SchXYZ(taxable_income, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking):
"""
Returns Schedule X, Y, Z tax amount for specified taxable_income.
"""
# separate non-negative taxable income into two non-negative components,
# doing this in a way so that the components add up to taxable income
# define pass-through income eligible for PT schedule
pt_passive = PT_EligibleRate_passive * (e02000 - e26270)
pt_active_gross = e00900 + e26270
if (pt_active_gross > 0) and PT_wages_active_income:
pt_active_gross = pt_active_gross + e00200
pt_active = PT_EligibleRate_active * pt_active_gross
pt_active = min(pt_active, e00900 + e26270)
pt_taxinc = max(0., pt_passive + pt_active)
if pt_taxinc >= taxable_income:
pt_taxinc = taxable_income
reg_taxinc = 0.
else:
# pt_taxinc is unchanged
reg_taxinc = taxable_income - pt_taxinc
# determine stacking order
if PT_top_stacking:
reg_tbase = 0.
pt_tbase = reg_taxinc
else:
reg_tbase = pt_taxinc
pt_tbase = 0.
# compute Schedule X,Y,Z tax using the two components of taxable income
if reg_taxinc > 0.:
reg_tax = Taxes(reg_taxinc, MARS, reg_tbase,
II_rt1, II_rt2, II_rt3, II_rt4,
II_rt5, II_rt6, II_rt7, II_rt8, II_brk1, II_brk2,
II_brk3, II_brk4, II_brk5, II_brk6, II_brk7)
else:
reg_tax = 0.
if pt_taxinc > 0.:
pt_tax = Taxes(pt_taxinc, MARS, pt_tbase,
PT_rt1, PT_rt2, PT_rt3, PT_rt4,
PT_rt5, PT_rt6, PT_rt7, PT_rt8, PT_brk1, PT_brk2,
PT_brk3, PT_brk4, PT_brk5, PT_brk6, PT_brk7)
else:
pt_tax = 0.
return reg_tax + pt_tax
@iterate_jit(nopython=True)
def SchXYZTax(c04800, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking, c05200):
"""
SchXYZTax calls SchXYZ function and sets c05200 to returned amount.
"""
c05200 = SchXYZ(c04800, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking)
return c05200
@iterate_jit(nopython=True)
def GainsTax(e00650, c01000, c23650, p23250, e01100, e58990, e00200,
e24515, e24518, MARS, c04800, c05200, e00900, e26270, e02000,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5, II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5, II_brk6, II_brk7,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5, PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5, PT_brk6, PT_brk7,
CG_nodiff, PT_EligibleRate_active, PT_EligibleRate_passive,
PT_wages_active_income, PT_top_stacking,
CG_rt1, CG_rt2, CG_rt3, CG_rt4, CG_brk1, CG_brk2, CG_brk3,
dwks10, dwks13, dwks14, dwks19, c05700, taxbc):
"""
GainsTax function implements (2015) Schedule D Tax Worksheet logic for
the special taxation of long-term capital gains and qualified dividends
if CG_nodiff is false.
"""
# pylint: disable=too-many-statements
if c01000 > 0. or c23650 > 0. or p23250 > 0. or e01100 > 0. or e00650 > 0.:
hasqdivltcg = 1 # has qualified dividends or long-term capital gains
else:
hasqdivltcg = 0 # no qualified dividends or long-term capital gains
if CG_nodiff:
hasqdivltcg = 0 # no special taxation of qual divids and l-t cap gains
if hasqdivltcg == 1:
dwks1 = c04800
dwks2 = e00650
dwks3 = e58990
dwks4 = 0. # always assumed to be zero
dwks5 = max(0., dwks3 - dwks4)
dwks6 = max(0., dwks2 - dwks5)
dwks7 = min(p23250, c23650) # SchD lines 15 and 16, respectively
# dwks8 = min(dwks3, dwks4)
# dwks9 = max(0., dwks7 - dwks8)
# BELOW TWO STATEMENTS ARE UNCLEAR IN LIGHT OF dwks9=... COMMENT
if e01100 > 0.:
c24510 = e01100
else:
c24510 = max(0., dwks7) + e01100
dwks9 = max(0., c24510 - min(0., e58990))
# ABOVE TWO STATEMENTS ARE UNCLEAR IN LIGHT OF dwks9=... COMMENT
dwks10 = dwks6 + dwks9
dwks11 = e24515 + e24518 # SchD lines 18 and 19, respectively
dwks12 = min(dwks9, dwks11)
dwks13 = dwks10 - dwks12
dwks14 = max(0., dwks1 - dwks13)
dwks16 = min(CG_brk1[MARS - 1], dwks1)
dwks17 = min(dwks14, dwks16)
dwks18 = max(0., dwks1 - dwks10)
dwks19 = max(dwks17, dwks18)
dwks20 = dwks16 - dwks17
lowest_rate_tax = CG_rt1 * dwks20
# break in worksheet lines
dwks21 = min(dwks1, dwks13)
dwks22 = dwks20
dwks23 = max(0., dwks21 - dwks22)
dwks25 = min(CG_brk2[MARS - 1], dwks1)
dwks26 = dwks19 + dwks20
dwks27 = max(0., dwks25 - dwks26)
dwks28 = min(dwks23, dwks27)
dwks29 = CG_rt2 * dwks28
dwks30 = dwks22 + dwks28
dwks31 = dwks21 - dwks30
dwks32 = CG_rt3 * dwks31
hi_base = max(0., dwks31 - CG_brk3[MARS - 1])
hi_incremental_rate = CG_rt4 - CG_rt3
highest_rate_incremental_tax = hi_incremental_rate * hi_base
# break in worksheet lines
dwks33 = min(dwks9, e24518)
dwks34 = dwks10 + dwks19
dwks36 = max(0., dwks34 - dwks1)
dwks37 = max(0., dwks33 - dwks36)
dwks38 = 0.25 * dwks37
# break in worksheet lines
dwks39 = dwks19 + dwks20 + dwks28 + dwks31 + dwks37
dwks40 = dwks1 - dwks39
dwks41 = 0.28 * dwks40
dwks42 = SchXYZ(dwks19, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking)
dwks43 = (dwks29 + dwks32 + dwks38 + dwks41 + dwks42 +
lowest_rate_tax + highest_rate_incremental_tax)
dwks44 = c05200
dwks45 = min(dwks43, dwks44)
c24580 = dwks45
else: # if hasqdivltcg is zero
c24580 = c05200
dwks10 = max(0., min(p23250, c23650)) + e01100
dwks13 = 0.
dwks14 = 0.
dwks19 = 0.
# final calculations done no matter what the value of hasqdivltcg
c05100 = c24580 # because foreign earned income exclusion is assumed zero
c05700 = 0. # no Form 4972, Lump Sum Distributions
taxbc = c05700 + c05100
return (dwks10, dwks13, dwks14, dwks19, c05700, taxbc)
@iterate_jit(nopython=True)
def AGIsurtax(c00100, MARS, AGI_surtax_trt, AGI_surtax_thd, taxbc, surtax):
"""
Computes surtax on AGI above some threshold.
"""
if AGI_surtax_trt > 0.:
hiAGItax = AGI_surtax_trt * max(c00100 - AGI_surtax_thd[MARS - 1], 0.)
taxbc += hiAGItax
surtax += hiAGItax
return (taxbc, surtax)
@iterate_jit(nopython=True)
def AMT(e07300, dwks13, standard, f6251, c00100, c18300, taxbc,
c04470, c17000, c20800, c21040, e24515, MARS, sep, dwks19,
dwks14, c05700, e62900, e00700, dwks10, age_head, age_spouse,
earned, cmbtp,
AMT_child_em_c_age, AMT_brk1,
AMT_em, AMT_prt, AMT_rt1, AMT_rt2,
AMT_child_em, AMT_em_ps, AMT_em_pe,
AMT_CG_brk1, AMT_CG_brk2, AMT_CG_brk3, AMT_CG_rt1, AMT_CG_rt2,
AMT_CG_rt3, AMT_CG_rt4, c05800, c09600, c62100):
"""
Computes Alternative Minimum Tax (AMT) taxable income and liability, where
c62100 is AMT taxable income,
c09600 is AMT tax liability, and
c05800 is total (regular + AMT) income tax liability before credits.
Note that line-number variable names refer to 2015 Form 6251.
"""
# pylint: disable=too-many-statements,too-many-branches
# Form 6251, Part I
if standard == 0.0:
c62100 = (c00100 - e00700 - c04470 +
max(0., min(c17000, 0.025 * c00100)) +
c18300 + c20800 - c21040)
if standard > 0.0:
c62100 = c00100 - e00700
c62100 += cmbtp # add income not in AGI but considered income for AMT
if MARS == 3:
amtsepadd = max(0.,
min(AMT_em[MARS - 1], AMT_prt * (c62100 - AMT_em_pe)))
else:
amtsepadd = 0.
c62100 = c62100 + amtsepadd # AMT taxable income, which is line28
# Form 6251, Part II top
line29 = max(0., AMT_em[MARS - 1] - AMT_prt *
max(0., c62100 - AMT_em_ps[MARS - 1]))
young_head = age_head != 0 and age_head < AMT_child_em_c_age
no_or_young_spouse = age_spouse < AMT_child_em_c_age
if young_head and no_or_young_spouse:
line29 = min(line29, earned + AMT_child_em)
line30 = max(0., c62100 - line29)
line3163 = (AMT_rt1 * line30 +
AMT_rt2 * max(0., (line30 - (AMT_brk1 / sep))))
if dwks10 > 0. or dwks13 > 0. or dwks14 > 0. or dwks19 > 0. or e24515 > 0.:
# complete Form 6251, Part III (line36 is equal to line30)
line37 = dwks13
line38 = e24515
line39 = min(line37 + line38, dwks10)
line40 = min(line30, line39)
line41 = max(0., line30 - line40)
line42 = (AMT_rt1 * line41 +
AMT_rt2 * max(0., (line41 - (AMT_brk1 / sep))))
line44 = dwks14
line45 = max(0., AMT_CG_brk1[MARS - 1] - line44)
line46 = min(line30, line37)
line47 = min(line45, line46) # line47 is amount taxed at AMT_CG_rt1
cgtax1 = line47 * AMT_CG_rt1
line48 = line46 - line47
line51 = dwks19
line52 = line45 + line51
line53 = max(0., AMT_CG_brk2[MARS - 1] - line52)
line54 = min(line48, line53) # line54 is amount taxed at AMT_CG_rt2
cgtax2 = line54 * AMT_CG_rt2
line56 = line47 + line54 # total amount in lower two brackets
if line41 == line56:
line57 = 0. # line57 is amount taxed at AMT_CG_rt3
linex2 = 0. # linex2 is amount taxed at AMT_CG_rt4
else:
line57 = line46 - line56
linex1 = min(line48,
max(0., AMT_CG_brk3[MARS - 1] - line44 - line45))
linex2 = max(0., line54 - linex1)
cgtax3 = line57 * AMT_CG_rt3
cgtax4 = linex2 * AMT_CG_rt4
if line38 == 0.:
line61 = 0.
else:
line61 = 0.25 * max(0., line30 - line41 - line56 - line57 - linex2)
line62 = line42 + cgtax1 + cgtax2 + cgtax3 + cgtax4 + line61
line64 = min(line3163, line62)
line31 = line64
else: # if not completing Form 6251, Part III
line31 = line3163
# Form 6251, Part II bottom
if f6251 == 1:
line32 = e62900
else:
line32 = e07300
line33 = line31 - line32
c09600 = max(0., line33 - max(0., taxbc - e07300 - c05700))
c05800 = taxbc + c09600
return (c62100, c09600, c05800)
@iterate_jit(nopython=True)
def NetInvIncTax(e00300, e00600, e02000, e26270, c01000,
c00100, NIIT_thd, MARS, NIIT_PT_taxed, NIIT_rt, niit):
"""
Computes Net Investment Income Tax (NIIT) amount assuming that
all annuity income is excluded from net investment income.
"""
modAGI = c00100 # no foreign earned income exclusion to add
if not NIIT_PT_taxed:
NII = max(0., e00300 + e00600 + c01000 + e02000 - e26270)
else: # do not subtract e26270 from e02000
NII = max(0., e00300 + e00600 + c01000 + e02000)
niit = NIIT_rt * min(NII, max(0., modAGI - NIIT_thd[MARS - 1]))
return niit
@iterate_jit(nopython=True)
def F2441(MARS, earned_p, earned_s, f2441, CDCC_c, e32800,
exact, c00100, CDCC_ps, CDCC_crt, c05800, e07300, c07180):
"""
Calculates Form 2441 child and dependent care expense credit, c07180.
"""
# credit for at most two cared-for individuals and for actual expenses
max_credit = min(f2441, 2) * CDCC_c
c32800 = max(0., min(e32800, max_credit))
# credit is limited to minimum of individuals' earned income
c32880 = earned_p # earned income of taxpayer
if MARS == 2:
c32890 = earned_s # earned income of spouse when present
else:
c32890 = earned_p
c33000 = max(0., min(c32800, min(c32880, c32890)))
# credit is limited by AGI-related fraction
if exact == 1: # exact calculation as on tax forms
tratio = math.ceil(max(((c00100 - CDCC_ps) / 2000.), 0.))
c33200 = c33000 * 0.01 * max(20., CDCC_crt - min(15., tratio))
else:
c33200 = c33000 * 0.01 * max(20., CDCC_crt -
max(((c00100 - CDCC_ps) / 2000.), 0.))
# credit is limited by tax liability
c07180 = min(max(0., c05800 - e07300), c33200)
return c07180
@JIT(nopython=True)
def EITCamount(basic_frac, phasein_rate, earnings, max_amount,
phaseout_start, agi, phaseout_rate):
"""
Returns EITC amount given specified parameters.
English parameter names are used in this function because the
EITC formula is not available on IRS forms or in IRS instructions;
the extensive IRS EITC look-up table does not reveal the formula.
"""
eitc = min((basic_frac * max_amount +
(1.0 - basic_frac) * phasein_rate * earnings), max_amount)
if earnings > phaseout_start or agi > phaseout_start:
eitcx = max(0., (max_amount - phaseout_rate *
max(0., max(earnings, agi) - phaseout_start)))
eitc = min(eitc, eitcx)
return eitc
@iterate_jit(nopython=True)
def EITC(MARS, DSI, EIC, c00100, e00300, e00400, e00600, c01000,
e02000, e26270, age_head, age_spouse, earned, earned_p, earned_s,
EITC_ps, EITC_MinEligAge, EITC_MaxEligAge, EITC_ps_MarriedJ,
EITC_rt, EITC_c, EITC_prt, EITC_basic_frac,
EITC_InvestIncome_c, EITC_excess_InvestIncome_rt,
EITC_indiv, EITC_sep_filers_elig,
c59660):
"""
Computes EITC amount, c59660.
"""
# pylint: disable=too-many-branches
if MARS != 2:
eitc = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned, EITC_c[EIC],
EITC_ps[EIC], c00100, EITC_prt[EIC])
if EIC == 0:
# enforce age eligibility rule for those with no EITC-eligible
# kids assuming that an unknown age_* value implies EITC age
# eligibility
h_age_elig = EITC_MinEligAge <= age_head <= EITC_MaxEligAge
if (age_head == 0 or h_age_elig):
c59660 = eitc
else:
c59660 = 0.
else: # if EIC != 0
c59660 = eitc
if MARS == 2:
po_start = EITC_ps[EIC] + EITC_ps_MarriedJ[EIC]
if not EITC_indiv:
# filing unit EITC rather than individual EITC
eitc = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned, EITC_c[EIC],
po_start, c00100, EITC_prt[EIC])
if EITC_indiv:
# individual EITC rather than a filing-unit EITC
eitc_p = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned_p, EITC_c[EIC],
po_start, earned_p, EITC_prt[EIC])
eitc_s = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned_s, EITC_c[EIC],
po_start, earned_s, EITC_prt[EIC])
eitc = eitc_p + eitc_s
if EIC == 0:
h_age_elig = EITC_MinEligAge <= age_head <= EITC_MaxEligAge
s_age_elig = EITC_MinEligAge <= age_spouse <= EITC_MaxEligAge
if (age_head == 0 or age_spouse == 0 or h_age_elig or s_age_elig):
c59660 = eitc
else:
c59660 = 0.
else:
c59660 = eitc
if (MARS == 3 and not EITC_sep_filers_elig) or DSI == 1:
c59660 = 0.
# reduce positive EITC if investment income exceeds ceiling
if c59660 > 0.:
invinc = (e00400 + e00300 + e00600 +
max(0., c01000) + max(0., (e02000 - e26270)))
if invinc > EITC_InvestIncome_c:
eitc = (c59660 - EITC_excess_InvestIncome_rt *
(invinc - EITC_InvestIncome_c))
c59660 = max(0., eitc)
return c59660
@iterate_jit(nopython=True)
def RefundablePayrollTaxCredit(was_plus_sey_p, was_plus_sey_s,
RPTC_c, RPTC_rt,
rptc_p, rptc_s, rptc):
"""
Computes refundable payroll tax credit amounts.
"""
rptc_p = min(was_plus_sey_p * RPTC_rt, RPTC_c)
rptc_s = min(was_plus_sey_s * RPTC_rt, RPTC_c)
rptc = rptc_p + rptc_s
return (rptc_p, rptc_s, rptc)
@iterate_jit(nopython=True)
def ChildDepTaxCredit(n24, MARS, c00100, XTOT, num, c05800,
e07260, CR_ResidentialEnergy_hc,
e07300, CR_ForeignTax_hc,
c07180,
c07230,
e07240, CR_RetirementSavings_hc,
c07200,
CTC_c, CTC_ps, CTC_prt, exact, ODC_c,
CTC_c_under6_bonus, nu06,
c07220, odc, codtc_limited):
"""
Computes amounts on "Child Tax Credit and Credit for Other Dependents
Worksheet" in 2018 Publication 972, which pertain to these two
nonrefundable tax credits.
"""
# Worksheet Part 1
line1 = CTC_c * n24 + CTC_c_under6_bonus * nu06
line2 = ODC_c * max(0, XTOT - n24 - num)
line3 = line1 + line2
modAGI = c00100 # no foreign earned income exclusion to add to AGI (line6)
if line3 > 0. and modAGI > CTC_ps[MARS - 1]:
excess = modAGI - CTC_ps[MARS - 1]
if exact == 1: # exact calculation as on tax forms
excess = 1000. * math.ceil(excess / 1000.)
line10 = max(0., line3 - CTC_prt * excess)
else:
line10 = line3
if line10 > 0.:
# Worksheet Part 2
line11 = c05800
line12 = (e07260 * (1. - CR_ResidentialEnergy_hc) +
e07300 * (1. - CR_ForeignTax_hc) +
c07180 + # child & dependent care expense credit
c07230 + # education credit
e07240 * (1. - CR_RetirementSavings_hc) +
c07200) # Schedule R credit
line13 = line11 - line12
line14 = 0.
line15 = max(0., line13 - line14)
line16 = min(line10, line15) # credit is capped by tax liability
else:
line16 = 0.
# separate the CTC and ODTC amounts
c07220 = 0. # nonrefundable CTC amount
odc = 0. # nonrefundable ODTC amount
if line16 > 0.:
if line1 > 0.:
c07220 = line16 * line1 / line3
odc = max(0., line16 - c07220)
# compute codtc_limited for use in AdditionalCTC function
codtc_limited = max(0., line10 - line16)
return (c07220, odc, codtc_limited)
@iterate_jit(nopython=True)
def PersonalTaxCredit(MARS, c00100,
II_credit, II_credit_ps, II_credit_prt,
II_credit_nr, II_credit_nr_ps, II_credit_nr_prt,
personal_refundable_credit,
personal_nonrefundable_credit):
"""
Computes personal_refundable_credit and personal_nonrefundable_credit,
neither of which are part of current-law policy.
"""
# calculate personal refundable credit amount with phase-out
personal_refundable_credit = II_credit[MARS - 1]
if II_credit_prt > 0. and c00100 > II_credit_ps[MARS - 1]:
pout = II_credit_prt * (c00100 - II_credit_ps[MARS - 1])
fully_phasedout = personal_refundable_credit - pout
personal_refundable_credit = max(0., fully_phasedout)
# calculate personal nonrefundable credit amount with phase-out
personal_nonrefundable_credit = II_credit_nr[MARS - 1]
if II_credit_nr_prt > 0. and c00100 > II_credit_nr_ps[MARS - 1]:
pout = II_credit_nr_prt * (c00100 - II_credit_nr_ps[MARS - 1])
fully_phasedout = personal_nonrefundable_credit - pout
personal_nonrefundable_credit = max(0., fully_phasedout)
return (personal_refundable_credit, personal_nonrefundable_credit)
@iterate_jit(nopython=True)
def IRADCTaxCredit(e03150, e03300, IRADC_credit_c, IRADC_credit_rt, iradctc):
"""
Computes refundable retirement savings tax credit amount.
"""
# calculate refundable credit amount
tot_retirement_contributions = e03150 + e03300
if IRADC_credit_rt > 0.:
iradctc = min(tot_retirement_contributions * IRADC_credit_rt, IRADC_credit_c)
else:
iradctc = 0.
return (iradctc)
@iterate_jit(nopython=True)
def FTHBTaxCredit(MARS, FTHB_credit, FTHB_credit_c, c00100,
FTHB_credit_e, fthbc, fthb_credit_amt):
"""
Computes refundable first time homebuyers' tax credit amount.
"""
if FTHB_credit is True:
# max credit
fthbc = max(0., min(FTHB_credit_c, fthb_credit_amt))
# eliminated based on agi
positiveagiamt = max(c00100, 0.)
fthb_max_agi = FTHB_credit_e[MARS - 1]
if positiveagiamt <= fthb_max_agi:
fthbc = fthbc
else:
fthbc = 0.
return (fthbc)
@iterate_jit(nopython=True)
def ICGTaxCredit(earned_p, earned_s, MARS, ICG_credit_c, ICG_credit_em,
ICG_credit_rt, ICG_credit_thd, icg_expense, c05800, e07300,
icgtc):
"""
Computes nonrefundable informal care giver tax credit.
"""
# not reflected in current law and records modified with imputation
# earned income of taxpayer
icg32880 = earned_p # earned income of taxpayer
if MARS == 2:
icg32890 = earned_s # earned income of spouse when present
else:
icg32890 = earned_p
icg33000 = min(icg32880, icg32890)
if icg33000 > ICG_credit_thd:
# credit for actual expenses
icg_max_credit = (icg_expense - ICG_credit_em) * ICG_credit_rt
icg_credit = max(0., min(icg_max_credit, ICG_credit_c))
# credit is limited to minimum of individuals' earned income
icg_credit = max(0., min(icg_credit, icg33000))
# credit is limited by tax liability
icgtc = min(max(0., c05800 - e07300), icg_credit)
else:
icgtc = 0.
return icgtc
@iterate_jit(nopython=True)
def IRATaxCredit(earned_p, earned_s, MARS, AutoIRA_credit, ira_credit,
c05800, e07300, iratc):
"""
Computes nonrefundable automatic enrollment in IRA tax credit.
"""
# not reflected in current law and records modified with imputation
if AutoIRA_credit is True:
iratc = max(0., ira_credit)
else:
iratc = 0.
return iratc
@iterate_jit(nopython=True)
def EVTaxCredit(EV_credit, ev_credit_amt, EV_credit_c, c00100, EV_credit_ps, MARS,
EV_credit_prt, evtc):
"""
Computes nonrefundable full-electric vehicle tax credit.
"""
if EV_credit is True:
# not reflected in current law and records modified with imputation
elecv_credit = max(0., min(ev_credit_amt, EV_credit_c))
# phaseout based on agi
posevagi = max(c00100, 0.)
ev_max = EV_credit_ps[MARS - 1]
if posevagi < ev_max:
evtc = elecv_credit
else:
evtc_reduced = max(0., evtc - EV_credit_prt * (posevagi - ev_max))
evtc = min(evtc, evtc_reduced)
return evtc
@iterate_jit(nopython=True)
def AmOppCreditParts(exact, e87521, num, c00100, CR_AmOppRefundable_hc,
CR_AmOppNonRefundable_hc, c10960, c87668):
"""
Applies a phaseout to the Form 8863, line 1, American Opportunity Credit
amount, e87521, and then applies the 0.4 refundable rate.
Logic corresponds to Form 8863, Part I.
Notes
-----
Tax Law Parameters that are not parameterized:
90000 : American Opportunity Credit phaseout income base
10000 : American Opportunity Credit phaseout income range length
1/1000 : American Opportunity Credit phaseout rate
0.4 : American Opportunity Credit refundable rate
Parameters
----------
exact : whether or not to do rounding of phaseout fraction
e87521 : total tentative American Opportunity Credit for all students,
Form 8863, line 1
num : number of people filing jointly
c00100 : AGI
CR_AmOppRefundable_hc: haircut for the refundable portion of the
American Opportunity Credit
CR_AmOppNonRefundable_hc: haircut for the nonrefundable portion of the
American Opportunity Credit
Returns
-------
c10960 : Refundable part of American Opportunity Credit
c87668 : Tentative nonrefundable part of American Opportunity Credit
"""
if e87521 > 0.:
c87658 = max(0., 90000. * num - c00100)
c87660 = 10000. * num
if exact == 1: # exact calculation as on tax forms
c87662 = 1000. * min(1., round(c87658 / c87660, 3))
else:
c87662 = 1000. * min(1., c87658 / c87660)
c87664 = c87662 * e87521 / 1000.
c10960 = 0.4 * c87664 * (1. - CR_AmOppRefundable_hc)
c87668 = c87664 - c10960 * (1. - CR_AmOppNonRefundable_hc)
else:
c10960 = 0.
c87668 = 0.
return (c10960, c87668)
@iterate_jit(nopython=True)
def SchR(age_head, age_spouse, MARS, c00100,
c05800, e07300, c07180, e02400, c02500, e01500, e01700, CR_SchR_hc,
c07200):
"""
Calculates Schedule R credit for the elderly and the disabled, c07200.
Note that no Schedule R policy parameters are inflation indexed.
Note that all Schedule R policy parameters are hard-coded, and therefore,
are not able to be changed using Policy class parameters.
Note that the CR_SchR_hc policy parameter allows the user to eliminate
or reduce total Schedule R credits.
"""
if age_head >= 65 or (MARS == 2 and age_spouse >= 65):
# calculate credit assuming nobody is disabled (so line12 = line10)
if MARS == 2:
if age_head >= 65 and age_spouse >= 65:
schr12 = 7500.
else:
schr12 = 5000.
schr15 = 10000.
elif MARS == 3:
schr12 = 3750.
schr15 = 5000.
elif MARS in (1, 4):
schr12 = 5000.
schr15 = 7500.
else:
schr12 = 0.
schr15 = 0.
# nontaxable portion of OASDI benefits, line 13a
schr13a = max(0., e02400 - c02500)
# nontaxable portion of pension benefits, line 13b
# NOTE: the following approximation (required because of inadequate IRS
# data) will be accurate if all pensions are partially taxable
# or if all pensions are fully taxable. But if a filing unit
# receives at least one partially taxable pension and at least
# one fully taxable pension, then the approximation in the
# following line is not exactly correct.
schr13b = max(0., e01500 - e01700)
schr13c = schr13a + schr13b
schr16 = max(0., c00100 - schr15)
schr17 = 0.5 * schr16
schr18 = schr13c + schr17
schr19 = max(0., schr12 - schr18)
schr20 = 0.15 * schr19
schr21 = max(0., (c05800 - e07300 - c07180))
c07200 = min(schr20, schr21) * (1. - CR_SchR_hc)
else: # if not calculating Schedule R credit
c07200 = 0.
return c07200
@iterate_jit(nopython=True)
def EducationTaxCredit(exact, e87530, MARS, c00100, num, c05800,
e07300, c07180, c07200, c87668,
LLC_Expense_c, ETC_pe_Single, ETC_pe_Married,
CR_Education_hc,
c07230):
"""
Computes Education Tax Credits (Form 8863) nonrefundable amount, c07230.
Logic corresponds to Form 8863, Part II.
Notes
-----
Tax Law Parameters that are not parameterized:
0.2 : Lifetime Learning Credit ratio against expense
Tax Law Parameters that are parameterized:
LLC_Expense_c : Lifetime Learning Credit expense limit
ETC_pe_Married : Education Tax Credit phaseout end for married
ETC_pe_Single : Education Tax Credit phaseout end for single
Taxpayer Charateristics:
exact : whether or not to do rounding of phaseout fraction
e87530 : Lifetime Learning Credit total qualified expenses,
Form 8863, line 10
e07300 : Foreign tax credit - Form 1116
c07180 : Child/dependent care expense credit - Form 2441
c07200 : Schedule R credit
Returns
-------
c07230 : Education Tax Credits (Form 8863) nonrefundable amount
"""
c87560 = 0.2 * min(e87530, LLC_Expense_c)
if MARS == 2:
c87570 = ETC_pe_Married * 1000.
else:
c87570 = ETC_pe_Single * 1000.
c87590 = max(0., c87570 - c00100)
c87600 = 10000. * num
if exact == 1: # exact calculation as on tax forms
c87610 = min(1., round(c87590 / c87600, 3))
else:
c87610 = min(1., c87590 / c87600)
c87620 = c87560 * c87610
xline4 = max(0., c05800 - (e07300 + c07180 + c07200))
xline5 = min(c87620, xline4)
xline9 = max(0., c05800 - (e07300 + c07180 + c07200 + xline5))
xline10 = min(c87668, xline9)
c87680 = xline5 + xline10
c07230 = c87680 * (1. - CR_Education_hc)
return c07230
@iterate_jit(nopython=True)
def CharityCredit(e19800, e20100, c00100, CR_Charity_rt, CR_Charity_f,
CR_Charity_frt, MARS, charity_credit):
"""
Computes nonrefundable charity credit, charity_credit.
This credit is not part of current-law policy.
"""
total_charity = e19800 + e20100
floor = max(CR_Charity_frt * c00100, CR_Charity_f[MARS - 1])
charity_cr_floored = max(total_charity - floor, 0)
charity_credit = CR_Charity_rt * (charity_cr_floored)
return charity_credit
@iterate_jit(nopython=True)
def NonrefundableCredits(c05800, e07240, e07260, e07300, e07400,
e07600, p08000, odc,
personal_nonrefundable_credit, icgtc, iratc, evtc,
CR_RetirementSavings_hc, CR_ForeignTax_hc,
CR_ResidentialEnergy_hc, CR_GeneralBusiness_hc,
CR_MinimumTax_hc, CR_OtherCredits_hc, charity_credit,
c07180, c07200, c07220, c07230, c07240,
c07260, c07300, c07400, c07600, c08000):
"""
NonRefundableCredits function sequentially limits credits to tax liability.
Parameters
----------
CR_RetirementSavings_hc: Retirement savings credit haircut
CR_ForeignTax_hc: Foreign tax credit haircut
CR_ResidentialEnergy_hc: Residential energy credit haircut
CR_GeneralBusiness_hc: General business credit haircut
CR_MinimumTax_hc: Minimum tax credit haircut
CR_OtherCredits_hc: Other credits haircut
"""
# limit tax credits to tax liability in order they are on 2015 1040 form
avail = c05800
# Foreign tax credit - Form 1116
c07300 = min(e07300 * (1. - CR_ForeignTax_hc), avail)
avail = avail - c07300
# Child & dependent care expense credit
c07180 = min(c07180, avail)
avail = avail - c07180
# Education tax credit
c07230 = min(c07230, avail)
avail = avail - c07230
# Retirement savings credit - Form 8880
c07240 = min(e07240 * (1. - CR_RetirementSavings_hc), avail)
avail = avail - c07240
# Child tax credit
c07220 = min(c07220, avail)
avail = avail - c07220
# Other dependent credit
odc = min(odc, avail)
avail = avail - odc
# Residential energy credit - Form 5695
c07260 = min(e07260 * (1. - CR_ResidentialEnergy_hc), avail)
avail = avail - c07260
# General business credit - Form 3800
c07400 = min(e07400 * (1. - CR_GeneralBusiness_hc), avail)
avail = avail - c07400
# Prior year minimum tax credit - Form 8801
c07600 = min(e07600 * (1. - CR_MinimumTax_hc), avail)
avail = avail - c07600
# Schedule R credit
c07200 = min(c07200, avail)
avail = avail - c07200
# Other credits
c08000 = min(p08000 * (1. - CR_OtherCredits_hc), avail)
avail = avail - c08000
# Charity credit
charity_credit = min(charity_credit, avail)
avail = avail - charity_credit
# Personal nonrefundable credit
personal_nonrefundable_credit = min(personal_nonrefundable_credit, avail)
avail = avail - personal_nonrefundable_credit
# ICG credit
icgtc = min(icgtc, avail)
avail = avail - icgtc
# IRA credit
iratc = min(iratc, avail)
avail = avail - iratc
# EV credit
evtc = min(evtc, avail)
avail = avail - evtc
return (c07180, c07200, c07220, c07230, c07240, odc,
c07260, c07300, c07400, c07600, c08000, charity_credit,
personal_nonrefundable_credit, icgtc, iratc, evtc)
@iterate_jit(nopython=True)
def AdditionalCTC(codtc_limited, ACTC_c, n24, earned, ACTC_Income_thd,
ACTC_rt, nu06, ACTC_rt_bonus_under6family, ACTC_ChildNum,
ptax_was, c03260, e09800, c59660, e11200,
c11070):
"""
Calculates refundable Additional Child Tax Credit (ACTC), c11070,
following 2018 Form 8812 logic.
"""
# Part I
line3 = codtc_limited
line4 = ACTC_c * n24
c11070 = 0. # line15
if line3 > 0. and line4 > 0.:
line5 = min(line3, line4)
line7 = max(0., earned - ACTC_Income_thd)
# accommodate ACTC rate bonus for families with children under 5
if nu06 == 0:
ACTC_rate = ACTC_rt
else:
ACTC_rate = ACTC_rt + ACTC_rt_bonus_under6family
line8 = ACTC_rate * line7
if n24 < ACTC_ChildNum:
if line8 > 0.:
c11070 = min(line5, line8)
else: # if n24 >= ACTC_ChildNum
if line8 >= line5:
c11070 = line5
else: # complete Part II
line9 = 0.5 * ptax_was
line10 = c03260 + e09800
line11 = line9 + line10
line12 = c59660 + e11200
line13 = max(0., line11 - line12)
line14 = max(line8, line13)
c11070 = min(line5, line14)
return c11070
@iterate_jit(nopython=True)
def C1040(c05800, c07180, c07200, c07220, c07230, c07240, c07260, c07300,
c07400, c07600, c08000, e09700, e09800, e09900, niit, othertaxes,
c07100, c09200, odc, charity_credit,
personal_nonrefundable_credit, icgtc, iratc, evtc):
"""
Computes total used nonrefundable credits, c07100, othertaxes, and
income tax before refundable credits, c09200.
"""
# total used nonrefundable credits (as computed in NonrefundableCredits)
c07100 = (c07180 + c07200 + c07600 + c07300 + c07400 + c07220 + c08000 +
c07230 + c07240 + c07260 + odc + charity_credit +
personal_nonrefundable_credit + icgtc + iratc + evtc)
# tax after credits (2016 Form 1040, line 56)
tax_net_nonrefundable_credits = max(0., c05800 - c07100)
# tax (including othertaxes) before refundable credits
othertaxes = e09700 + e09800 + e09900 + niit
c09200 = othertaxes + tax_net_nonrefundable_credits
return (c07100, othertaxes, c09200)
@iterate_jit(nopython=True)
def CTC_new(CTC_new_c, CTC_new_rt, CTC_new_c_under6_bonus,
CTC_new_ps, CTC_new_prt, CTC_new_for_all,
CTC_new_refund_limited, CTC_new_refund_limit_payroll_rt,
CTC_new_refund_limited_all_payroll, payrolltax,
n24, nu06, c00100, MARS, ptax_oasdi, c09200,
ctc_new):
"""
Computes new refundable child tax credit using specified parameters.
"""
if n24 > 0:
posagi = max(c00100, 0.)
ctc_new = CTC_new_c * n24 + CTC_new_c_under6_bonus * nu06
if not CTC_new_for_all:
ctc_new = min(CTC_new_rt * posagi, ctc_new)
ymax = CTC_new_ps[MARS - 1]
if posagi > ymax:
ctc_new_reduced = max(0.,
ctc_new - CTC_new_prt * (posagi - ymax))
ctc_new = min(ctc_new, ctc_new_reduced)
if ctc_new > 0. and CTC_new_refund_limited:
refund_new = max(0., ctc_new - c09200)
if not CTC_new_refund_limited_all_payroll:
limit_new = CTC_new_refund_limit_payroll_rt * ptax_oasdi
if CTC_new_refund_limited_all_payroll:
limit_new = CTC_new_refund_limit_payroll_rt * payrolltax
limited_new = max(0., refund_new - limit_new)
ctc_new = max(0., ctc_new - limited_new)
else:
ctc_new = 0.
return ctc_new
@iterate_jit(nopython=True)
def CDCC_new(CDCC_new_c, CDCC_new_rt, CDCC_new_ps, CDCC_new_pe, CDCC_new_prt, cdcc_new,
MARS, f2441, e32800, earned_s, earned_p, c05800, e07300, c00100):
"""
Calculates new refundable child and dependent care expense credit, cdcc_new.
"""
# credit for at most two cared-for individuals and for actual expenses
cdcc_new_max_credit = min(f2441, 2) * CDCC_new_c
cdcc_new_32800 = max(0., min(e32800 * CDCC_new_rt, cdcc_new_max_credit))
# credit is limited to minimum of individuals' earned income
cdcc_new_32880 = earned_p # earned income of taxpayer
if MARS == 2:
cdcc_new_32890 = earned_s # earned income of spouse when present
else:
cdcc_new_32890 = earned_p
cdcc_new_33000 = max(0., min(cdcc_new_32800, min(cdcc_new_32880, cdcc_new_32890)))
# credit is limited by tax liability
cdcc_new = min(max(0., c05800 - e07300), cdcc_new_33000)
# phaseout based on agi
positiveagi = max(c00100, 0.)
cdcc_min = CDCC_new_ps[MARS - 1]
cdcc_max = CDCC_new_pe[MARS - 1]
if positiveagi < cdcc_min:
cdcc_new = cdcc_new
elif positiveagi < cdcc_max:
cdcc_new_reduced = max(0., cdcc_new - CDCC_new_prt * (positiveagi - cdcc_min))
cdcc_new = min(cdcc_new, cdcc_new_reduced)
else:
cdcc_new = 0.
return cdcc_new
@iterate_jit(nopython=True)
def IITAX(c59660, c11070, c10960, personal_refundable_credit, ctc_new, rptc,
c09200, payrolltax,
eitc, refund, iitax, combined, iradctc, fthbc, cdcc_new,
business_burden, estate_burden, Business_tax_combined):
"""
Computes final taxes.
"""
eitc = c59660
refund = (eitc + c11070 + c10960 +
personal_refundable_credit + ctc_new + rptc + iradctc + fthbc + cdcc_new)
iitax = c09200 - refund
if Business_tax_combined is True:
combined = iitax + payrolltax + business_burden + estate_burden
else:
combined = iitax + payrolltax
return (eitc, refund, iitax, combined)
@JIT(nopython=True)
def Taxes(income, MARS, tbrk_base,
rate1, rate2, rate3, rate4, rate5, rate6, rate7, rate8,
tbrk1, tbrk2, tbrk3, tbrk4, tbrk5, tbrk6, tbrk7):
"""
Taxes function returns tax amount given the progressive tax rate
schedule specified by the rate* and (upper) tbrk* parameters and
given income, filing status (MARS), and tax bracket base (tbrk_base).
"""
if tbrk_base > 0.:
brk1 = max(tbrk1[MARS - 1] - tbrk_base, 0.)
brk2 = max(tbrk2[MARS - 1] - tbrk_base, 0.)
brk3 = max(tbrk3[MARS - 1] - tbrk_base, 0.)
brk4 = max(tbrk4[MARS - 1] - tbrk_base, 0.)
brk5 = max(tbrk5[MARS - 1] - tbrk_base, 0.)
brk6 = max(tbrk6[MARS - 1] - tbrk_base, 0.)
brk7 = max(tbrk7[MARS - 1] - tbrk_base, 0.)
else:
brk1 = tbrk1[MARS - 1]
brk2 = tbrk2[MARS - 1]
brk3 = tbrk3[MARS - 1]
brk4 = tbrk4[MARS - 1]
brk5 = tbrk5[MARS - 1]
brk6 = tbrk6[MARS - 1]
brk7 = tbrk7[MARS - 1]
return (rate1 * min(income, brk1) +
rate2 * min(brk2 - brk1, max(0., income - brk1)) +
rate3 * min(brk3 - brk2, max(0., income - brk2)) +
rate4 * min(brk4 - brk3, max(0., income - brk3)) +
rate5 * min(brk5 - brk4, max(0., income - brk4)) +
rate6 * min(brk6 - brk5, max(0., income - brk5)) +
rate7 * min(brk7 - brk6, max(0., income - brk6)) +
rate8 * max(0., income - brk7))
def ComputeBenefit(calc, ID_switch):
"""
Calculates the value of the benefits accrued from itemizing.
"""
# compute income tax liability with no itemized deductions allowed for
# the types of itemized deductions covered under the BenefitSurtax
no_ID_calc = copy.deepcopy(calc)
if ID_switch[0]:
no_ID_calc.policy_param('ID_Medical_hc', [1.])
if ID_switch[1]:
no_ID_calc.policy_param('ID_StateLocalTax_hc', [1.])
if ID_switch[2]:
no_ID_calc.policy_param('ID_RealEstate_hc', [1.])
if ID_switch[3]:
no_ID_calc.policy_param('ID_Casualty_hc', [1.])
if ID_switch[4]:
no_ID_calc.policy_param('ID_Miscellaneous_hc', [1.])
if ID_switch[5]:
no_ID_calc.policy_param('ID_InterestPaid_hc', [1.])
if ID_switch[6]:
no_ID_calc.policy_param('ID_Charity_hc', [1.])
no_ID_calc._calc_one_year() # pylint: disable=protected-access
diff_iitax = no_ID_calc.array('iitax') - calc.array('iitax')
benefit = np.where(diff_iitax > 0., diff_iitax, 0.)
return benefit
def BenefitSurtax(calc):
"""
Computes itemized-deduction-benefit surtax and adds the surtax amount
to income tax, combined tax, and surtax liabilities.
"""
if calc.policy_param('ID_BenefitSurtax_crt') != 1.:
ben = ComputeBenefit(calc,
calc.policy_param('ID_BenefitSurtax_Switch'))
agi = calc.array('c00100')
ben_deduct = calc.policy_param('ID_BenefitSurtax_crt') * agi
ben_exempt_array = calc.policy_param('ID_BenefitSurtax_em')
ben_exempt = ben_exempt_array[calc.array('MARS') - 1]
ben_dedem = ben_deduct + ben_exempt
ben_surtax = (calc.policy_param('ID_BenefitSurtax_trt') *
np.where(ben > ben_dedem, ben - ben_dedem, 0.))
# add ben_surtax to income & combined taxes and to surtax subtotal
calc.incarray('iitax', ben_surtax)
calc.incarray('combined', ben_surtax)
calc.incarray('surtax', ben_surtax)
def BenefitLimitation(calc):
"""
Limits the benefits of select itemized deductions to a fraction of
deductible expenses.
"""
if calc.policy_param('ID_BenefitCap_rt') != 1.:
benefit = ComputeBenefit(calc,
calc.policy_param('ID_BenefitCap_Switch'))
# Calculate total deductible expenses under the cap
deduct_exps = 0.
if calc.policy_param('ID_BenefitCap_Switch')[0]: # medical
deduct_exps += calc.array('c17000')
if calc.policy_param('ID_BenefitCap_Switch')[1]: # statelocal
one_minus_hc = 1. - calc.policy_param('ID_StateLocalTax_hc')
deduct_exps += (one_minus_hc *
np.maximum(calc.array('e18400_capped'), 0.))
if calc.policy_param('ID_BenefitCap_Switch')[2]: # realestate
one_minus_hc = 1. - calc.policy_param('ID_RealEstate_hc')
deduct_exps += one_minus_hc * calc.array('e18500_capped')
if calc.policy_param('ID_BenefitCap_Switch')[3]: # casualty
deduct_exps += calc.array('c20500')
if calc.policy_param('ID_BenefitCap_Switch')[4]: # misc
deduct_exps += calc.array('c20800')
if calc.policy_param('ID_BenefitCap_Switch')[5]: # interest
deduct_exps += calc.array('c19200')
if calc.policy_param('ID_BenefitCap_Switch')[6]: # charity
deduct_exps += calc.array('c19700')
# Calculate cap value for itemized deductions
benefit_limit = deduct_exps * calc.policy_param('ID_BenefitCap_rt')
# Add the difference between the actual benefit and capped benefit
# to income tax and combined tax liabilities.
excess_benefit = np.maximum(benefit - benefit_limit, 0)
calc.incarray('iitax', excess_benefit)
calc.incarray('surtax', excess_benefit)
calc.incarray('combined', excess_benefit)
@iterate_jit(nopython=True)
def FairShareTax(c00100, MARS, ptax_was, setax, ptax_amc,
FST_AGI_trt, FST_AGI_thd_lo, FST_AGI_thd_hi,
fstax, iitax, combined, surtax):
"""
Computes Fair Share Tax, or "Buffet Rule", types of reforms.
Taxpayer Characteristics
------------------------
c00100 : AGI
MARS : filing (marital) status
ptax_was : payroll tax on wages and salaries
setax : self-employment tax
ptax_amc : Additional Medicare Tax on high earnings
Returns
-------
fstax : Fair Share Tax amount
iitax : individual income tax augmented by fstax
combined : individual income tax plus payroll taxes augmented by fstax
surtax : individual income tax subtotal augmented by fstax
"""
if FST_AGI_trt > 0. and c00100 >= FST_AGI_thd_lo[MARS - 1]:
employee_share = 0.5 * ptax_was + 0.5 * setax + ptax_amc
fstax = max(c00100 * FST_AGI_trt - iitax - employee_share, 0.)
thd_gap = max(FST_AGI_thd_hi[MARS - 1] - FST_AGI_thd_lo[MARS - 1], 0.)
if thd_gap > 0. and c00100 < FST_AGI_thd_hi[MARS - 1]:
fstax *= (c00100 - FST_AGI_thd_lo[MARS - 1]) / thd_gap
iitax += fstax
combined += fstax
surtax += fstax
else:
fstax = 0.
return (fstax, iitax, combined, surtax)
@iterate_jit(nopython=True)
def LumpSumTax(DSI, num, XTOT,
LST,
lumpsum_tax, combined):
"""
Computes lump-sum tax and add it to combined taxes.
"""
if LST == 0.0 or DSI == 1:
lumpsum_tax = 0.
else:
lumpsum_tax = LST * max(num, XTOT)
combined += lumpsum_tax
return (lumpsum_tax, combined)
@iterate_jit(nopython=True)
def ExpandIncome(e00200, pencon_p, pencon_s, e00300, e00400, e00600,
e00700, e00800, e00900, e01100, e01200, e01400, e01500,
e02000, e02100, p22250, p23250, cmbtp, ptax_was,
benefit_value_total, expanded_income):
"""
Calculates expanded_income from component income types.
"""
expanded_income = (
e00200 + # wage and salary income net of DC pension contributions
pencon_p + # tax-advantaged DC pension contributions for taxpayer
pencon_s + # tax-advantaged DC pension contributions for spouse
e00300 + # taxable interest income
e00400 + # non-taxable interest income
e00600 + # dividends
e00700 + # state and local income tax refunds
e00800 + # alimony received
e00900 + # Sch C business net income/loss
e01100 + # capital gain distributions not reported on Sch D
e01200 + # Form 4797 other net gain/loss
e01400 + # taxable IRA distributions
e01500 + # total pension & annuity income (including DB-plan benefits)
e02000 + # Sch E total rental, ..., partnership, S-corp income/loss
e02100 + # Sch F farm net income/loss
p22250 + # Sch D: net short-term capital gain/loss
p23250 + # Sch D: net long-term capital gain/loss
cmbtp + # other AMT taxable income items from Form 6251
0.5 * ptax_was + # employer share of FICA taxes on wages/salaries
benefit_value_total # consumption value of all benefits received;
# see the BenefitPrograms function in this file for details on
# exactly how the benefit_value_total variable is computed
)
return expanded_income
@iterate_jit(nopython=True)
def AfterTaxIncome(combined, expanded_income, aftertax_income,
Business_tax_expinc, corp_taxliab):
"""
Calculates after-tax expanded income.
Parameters
----------
combined: combined tax liability
expanded_income: expanded income
corp_taxliab: imputed corporate tax liability
Returns
-------
aftertax_income: expanded_income minus combined
"""
if Business_tax_expinc is True:
expanded_income = expanded_income + corp_taxliab
else:
expanded_income = expanded_income
aftertax_income = expanded_income - combined
return aftertax_income
| 39.276526 | 124 | 0.632843 |
import math
import copy
import numpy as np
from taxcalc.decorators import iterate_jit, JIT
def BenefitPrograms(calc):
zero = np.zeros(calc.array_len)
if calc.policy_param('BEN_housing_repeal'):
calc.array('housing_ben', zero)
if calc.policy_param('BEN_ssi_repeal'):
calc.array('ssi_ben', zero)
if calc.policy_param('BEN_snap_repeal'):
calc.array('snap_ben', zero)
if calc.policy_param('BEN_tanf_repeal'):
calc.array('tanf_ben', zero)
if calc.policy_param('BEN_vet_repeal'):
calc.array('vet_ben', zero)
if calc.policy_param('BEN_wic_repeal'):
calc.array('wic_ben', zero)
if calc.policy_param('BEN_mcare_repeal'):
calc.array('mcare_ben', zero)
if calc.policy_param('BEN_mcaid_repeal'):
calc.array('mcaid_ben', zero)
if calc.policy_param('BEN_oasdi_repeal'):
calc.array('e02400', zero)
if calc.policy_param('BEN_ui_repeal'):
calc.array('e02300', zero)
if calc.policy_param('BEN_other_repeal'):
calc.array('other_ben', zero)
cost = np.array(
calc.array('housing_ben') +
calc.array('ssi_ben') +
calc.array('snap_ben') +
calc.array('tanf_ben') +
calc.array('vet_ben') +
calc.array('wic_ben') +
calc.array('mcare_ben') +
calc.array('mcaid_ben') +
calc.array('e02400') +
calc.array('e02300') +
calc.array('ubi') +
calc.array('other_ben')
)
calc.array('benefit_cost_total', cost)
value = np.array(
calc.array('housing_ben') * calc.consump_param('BEN_housing_value') +
calc.array('ssi_ben') +
calc.array('snap_ben') * calc.consump_param('BEN_snap_value') +
calc.array('tanf_ben') * calc.consump_param('BEN_tanf_value') +
calc.array('vet_ben') * calc.consump_param('BEN_vet_value') +
calc.array('wic_ben') * calc.consump_param('BEN_wic_value') +
calc.array('mcare_ben') * calc.consump_param('BEN_mcare_value') +
calc.array('mcaid_ben') * calc.consump_param('BEN_mcaid_value') +
calc.array('e02400') +
calc.array('e02300') +
calc.array('ubi') +
calc.array('other_ben') * calc.consump_param('BEN_other_value')
)
calc.array('benefit_value_total', value)
@iterate_jit(nopython=True)
def EI_PayrollTax(SS_Earnings_c, e00200p, e00200s, pencon_p, pencon_s,
FICA_ss_trt, FICA_mc_trt, ALD_SelfEmploymentTax_hc,
SS_Earnings_thd, e00900p, e00900s, e02100p, e02100s, k1bx14p,
k1bx14s, payrolltax, ptax_was, setax, c03260, ptax_oasdi,
sey, earned, earned_p, earned_s,
was_plus_sey_p, was_plus_sey_s):
sey_p = e00900p + e02100p + k1bx14p
sey_s = e00900s + e02100s + k1bx14s
sey = sey_p + sey_s
gross_was_p = e00200p + pencon_p
gross_was_s = e00200s + pencon_s
txearn_was_p = min(SS_Earnings_c, gross_was_p)
txearn_was_s = min(SS_Earnings_c, gross_was_s)
ptax_ss_was_p = FICA_ss_trt * txearn_was_p
ptax_ss_was_s = FICA_ss_trt * txearn_was_s
ptax_mc_was_p = FICA_mc_trt * gross_was_p
ptax_mc_was_s = FICA_mc_trt * gross_was_s
ptax_was = ptax_ss_was_p + ptax_ss_was_s + ptax_mc_was_p + ptax_mc_was_s
sey_frac = 1.0 - 0.5 * (FICA_ss_trt + FICA_mc_trt)
txearn_sey_p = min(max(0., sey_p * sey_frac), SS_Earnings_c - txearn_was_p)
txearn_sey_s = min(max(0., sey_s * sey_frac), SS_Earnings_c - txearn_was_s)
setax_ss_p = FICA_ss_trt * txearn_sey_p
setax_ss_s = FICA_ss_trt * txearn_sey_s
setax_mc_p = FICA_mc_trt * max(0., sey_p * sey_frac)
setax_mc_s = FICA_mc_trt * max(0., sey_s * sey_frac)
setax_p = setax_ss_p + setax_mc_p
setax_s = setax_ss_s + setax_mc_s
setax = setax_p + setax_s
sey_frac = 1.0 - 0.5 * FICA_ss_trt
was_plus_sey_p = gross_was_p + max(0., sey_p * sey_frac)
was_plus_sey_s = gross_was_s + max(0., sey_s * sey_frac)
extra_ss_income_p = max(0., was_plus_sey_p - SS_Earnings_thd)
extra_ss_income_s = max(0., was_plus_sey_s - SS_Earnings_thd)
extra_payrolltax = (extra_ss_income_p * FICA_ss_trt +
extra_ss_income_s * FICA_ss_trt)
payrolltax = ptax_was + setax + extra_payrolltax
ptax_oasdi = (ptax_ss_was_p + ptax_ss_was_s +
setax_ss_p + setax_ss_s +
extra_payrolltax)
c03260 = (1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax
earned = max(0., e00200p + e00200s + sey - c03260)
earned_p = max(0., (e00200p + sey_p -
(1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax_p))
earned_s = max(0., (e00200s + sey_s -
(1. - ALD_SelfEmploymentTax_hc) * 0.5 * setax_s))
return (sey, payrolltax, ptax_was, setax, c03260, ptax_oasdi,
earned, earned_p, earned_s, was_plus_sey_p, was_plus_sey_s)
@iterate_jit(nopython=True)
def DependentCare(nu13, elderly_dependents, earned,
MARS, ALD_Dependents_thd, ALD_Dependents_hc,
ALD_Dependents_Child_c, ALD_Dependents_Elder_c,
care_deduction):
if earned <= ALD_Dependents_thd[MARS - 1]:
care_deduction = (((1. - ALD_Dependents_hc) * nu13 *
ALD_Dependents_Child_c) +
((1. - ALD_Dependents_hc) * elderly_dependents *
ALD_Dependents_Elder_c))
else:
care_deduction = 0.
return care_deduction
@iterate_jit(nopython=True)
def Adj(e03150, e03210, c03260,
e03270, e03300, e03400, e03500, e00800,
e03220, e03230, e03240, e03290, care_deduction,
ALD_StudentLoan_hc, ALD_SelfEmp_HealthIns_hc, ALD_KEOGH_SEP_hc,
ALD_EarlyWithdraw_hc, ALD_AlimonyPaid_hc, ALD_AlimonyReceived_hc,
ALD_EducatorExpenses_hc, ALD_HSADeduction_hc, ALD_IRAContributions_hc,
ALD_DomesticProduction_hc, ALD_Tuition_hc,
c02900):
c02900 = ((1. - ALD_StudentLoan_hc) * e03210 +
c03260 +
(1. - ALD_EarlyWithdraw_hc) * e03400 +
(1. - ALD_AlimonyPaid_hc) * e03500 +
(1. - ALD_AlimonyReceived_hc) * e00800 +
(1. - ALD_EducatorExpenses_hc) * e03220 +
(1. - ALD_Tuition_hc) * e03230 +
(1. - ALD_DomesticProduction_hc) * e03240 +
(1. - ALD_HSADeduction_hc) * e03290 +
(1. - ALD_SelfEmp_HealthIns_hc) * e03270 +
(1. - ALD_IRAContributions_hc) * e03150 +
(1. - ALD_KEOGH_SEP_hc) * e03300 +
care_deduction)
return c02900
@iterate_jit(nopython=True)
def ALD_InvInc_ec_base(p22250, p23250, sep,
e00300, e00600, e01100, e01200,
invinc_ec_base):
cgain = max((-3000. / sep), p22250 + p23250)
invinc_ec_base = e00300 + e00600 + cgain + e01100 + e01200
return invinc_ec_base
@iterate_jit(nopython=True)
def CapGains(p23250, p22250, sep, ALD_StudentLoan_hc,
ALD_InvInc_ec_rt, invinc_ec_base,
e00200, e00300, e00600, e00650, e00700, e00800,
CG_nodiff, CG_ec, CG_reinvest_ec_rt,
ALD_BusinessLosses_c, MARS,
e00900, e01100, e01200, e01400, e01700, e02000, e02100,
e02300, e00400, e02400, c02900, e03210, e03230, e03240,
c01000, c23650, ymod, ymod1, invinc_agi_ec,
gains_at_death, CG_death, CG_death_ec):
if CG_death is True:
taxable_gains_at_death = max(0., gains_at_death - CG_death_ec[MARS-1])
else:
taxable_gains_at_death = 0.
c23650 = p23250 + p22250 + taxable_gains_at_death
c01000 = max((-3000. / sep), c23650)
invinc = e00300 + e00600 + c01000 + e01100 + e01200
invinc_agi_ec = ALD_InvInc_ec_rt * max(0., invinc_ec_base)
ymod1 = (e00200 + e00700 + e00800 + e01400 + e01700 +
invinc - invinc_agi_ec + e02100 + e02300 +
max(e00900 + e02000, -ALD_BusinessLosses_c[MARS - 1]))
if CG_nodiff:
qdcg_pos = max(0., e00650 + c01000)
qdcg_exclusion = (min(CG_ec, qdcg_pos) +
CG_reinvest_ec_rt * max(0., qdcg_pos - CG_ec))
ymod1 = max(0., ymod1 - qdcg_exclusion)
invinc_agi_ec += qdcg_exclusion
ymod2 = e00400 + (0.50 * e02400) - c02900
ymod3 = (1. - ALD_StudentLoan_hc) * e03210 + e03230 + e03240
ymod = ymod1 + ymod2 + ymod3
return (c01000, c23650, ymod, ymod1, invinc_agi_ec,
gains_at_death, taxable_gains_at_death)
@iterate_jit(nopython=True)
def SSBenefits(MARS, ymod, e02400, SS_thd50, SS_thd85,
SS_percentage1, SS_percentage2, c02500):
if ymod < SS_thd50[MARS - 1]:
c02500 = 0.
elif ymod < SS_thd85[MARS - 1]:
c02500 = SS_percentage1 * min(ymod - SS_thd50[MARS - 1], e02400)
else:
c02500 = min(SS_percentage2 * (ymod - SS_thd85[MARS - 1]) +
SS_percentage1 *
min(e02400, SS_thd85[MARS - 1] -
SS_thd50[MARS - 1]), SS_percentage2 * e02400)
return c02500
@iterate_jit(nopython=True)
def UBI(nu18, n1820, n21, UBI_u18, UBI_1820, UBI_21, UBI_ecrt,
ubi, taxable_ubi, nontaxable_ubi):
ubi = nu18 * UBI_u18 + n1820 * UBI_1820 + n21 * UBI_21
taxable_ubi = ubi * (1. - UBI_ecrt)
nontaxable_ubi = ubi - taxable_ubi
return ubi, taxable_ubi, nontaxable_ubi
@iterate_jit(nopython=True)
def AGI(ymod1, c02500, c02900, XTOT, MARS, sep, DSI, exact, nu18, taxable_ubi,
II_em, II_em_ps, II_prt, II_no_em_nu18,
c00100, pre_c04600, c04600):
c00100 = ymod1 + c02500 - c02900 + taxable_ubi
if II_no_em_nu18:
pre_c04600 = max(0, XTOT - nu18) * II_em
else:
pre_c04600 = XTOT * II_em
if DSI:
pre_c04600 = 0.
if exact == 1:
line5 = max(0., c00100 - II_em_ps[MARS - 1])
line6 = math.ceil(line5 / (2500. / sep))
line7 = II_prt * line6
c04600 = max(0., pre_c04600 * (1. - line7))
else:
dispc_numer = II_prt * (c00100 - II_em_ps[MARS - 1])
dispc_denom = 2500. / sep
dispc = min(1., max(0., dispc_numer / dispc_denom))
c04600 = pre_c04600 * (1. - dispc)
return (c00100, pre_c04600, c04600)
@iterate_jit(nopython=True)
def ItemDedCap(e17500, e18400, e18500, e19200, e19800, e20100, e20400, g20500,
c00100, ID_AmountCap_rt, ID_AmountCap_Switch, e17500_capped,
e18400_capped, e18500_capped, e19200_capped, e19800_capped,
e20100_capped, e20400_capped, g20500_capped):
cap = max(0., ID_AmountCap_rt * c00100)
gross_ded_amt = 0
if ID_AmountCap_Switch[0]:
gross_ded_amt += e17500
if ID_AmountCap_Switch[1]:
gross_ded_amt += e18400
if ID_AmountCap_Switch[2]:
gross_ded_amt += e18500
if ID_AmountCap_Switch[3]:
gross_ded_amt += g20500
if ID_AmountCap_Switch[4]:
gross_ded_amt += e20400
if ID_AmountCap_Switch[5]:
gross_ded_amt += e19200
if ID_AmountCap_Switch[6]:
gross_ded_amt += e19800 + e20100
overage = max(0., gross_ded_amt - cap)
e17500_capped = e17500
e18400_capped = e18400
e18500_capped = e18500
g20500_capped = g20500
e20400_capped = e20400
e19200_capped = e19200
e19800_capped = e19800
e20100_capped = e20100
if overage > 0. and c00100 > 0.:
if ID_AmountCap_Switch[0]:
e17500_capped -= (e17500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[1]:
e18400_capped -= (e18400 / (gross_ded_amt) * overage)
if ID_AmountCap_Switch[2]:
e18500_capped -= (e18500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[3]:
g20500_capped -= (g20500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[4]:
e20400_capped -= (e20400 / gross_ded_amt) * overage
if ID_AmountCap_Switch[5]:
e19200_capped -= (e19200 / gross_ded_amt) * overage
if ID_AmountCap_Switch[6]:
e19800_capped -= (e19800 / gross_ded_amt) * overage
e20100_capped -= (e20100 / gross_ded_amt) * overage
return (e17500_capped, e18400_capped, e18500_capped, g20500_capped,
e20400_capped, e19200_capped, e19800_capped, e20100_capped)
@iterate_jit(nopython=True)
def ItemDed(e17500_capped, e18400_capped, e18500_capped, e19200_capped,
e19800_capped, e20100_capped, e20400_capped, g20500_capped,
MARS, age_head, age_spouse, c00100, c04470, c21040, c21060,
c17000, c18300, c19200, c19700, c20500, c20800,
ID_ps, ID_Medical_frt, ID_Medical_frt_add4aged, ID_Medical_hc,
ID_Casualty_frt, ID_Casualty_hc, ID_Miscellaneous_frt,
ID_Miscellaneous_hc, ID_Charity_crt_all, ID_Charity_crt_noncash,
ID_prt, ID_crt, ID_c, ID_StateLocalTax_hc, ID_Charity_frt,
ID_Charity_hc, ID_InterestPaid_hc, ID_RealEstate_hc,
ID_Medical_c, ID_StateLocalTax_c, ID_RealEstate_c,
ID_InterestPaid_c, ID_Charity_c, ID_Casualty_c,
ID_Miscellaneous_c, ID_AllTaxes_c, ID_AllTaxes_hc,
ID_StateLocalTax_crt, ID_RealEstate_crt, ID_Charity_f):
posagi = max(c00100, 0.)
medical_frt = ID_Medical_frt
if age_head >= 65 or (MARS == 2 and age_spouse >= 65):
medical_frt += ID_Medical_frt_add4aged
c17750 = medical_frt * posagi
c17000 = max(0., e17500_capped - c17750) * (1. - ID_Medical_hc)
c17000 = min(c17000, ID_Medical_c[MARS - 1])
c18400 = min((1. - ID_StateLocalTax_hc) * max(e18400_capped, 0.),
ID_StateLocalTax_c[MARS - 1])
c18500 = min((1. - ID_RealEstate_hc) * e18500_capped,
ID_RealEstate_c[MARS - 1])
c18400 = min(c18400, ID_StateLocalTax_crt * max(c00100, 0.0001))
c18500 = min(c18500, ID_RealEstate_crt * max(c00100, 0.0001))
c18300 = (c18400 + c18500) * (1. - ID_AllTaxes_hc)
c18300 = min(c18300, ID_AllTaxes_c[MARS - 1])
c19200 = e19200_capped * (1. - ID_InterestPaid_hc)
c19200 = min(c19200, ID_InterestPaid_c[MARS - 1])
lim30 = min(ID_Charity_crt_noncash * posagi, e20100_capped)
c19700 = min(ID_Charity_crt_all * posagi, lim30 + e19800_capped)
charity_floor = max(ID_Charity_frt * posagi, ID_Charity_f[MARS - 1])
c19700 = max(0., c19700 - charity_floor) * (1. - ID_Charity_hc)
c19700 = min(c19700, ID_Charity_c[MARS - 1])
c20500 = (max(0., g20500_capped - ID_Casualty_frt * posagi) *
(1. - ID_Casualty_hc))
c20500 = min(c20500, ID_Casualty_c[MARS - 1])
c20400 = e20400_capped
c20750 = ID_Miscellaneous_frt * posagi
c20800 = max(0., c20400 - c20750) * (1. - ID_Miscellaneous_hc)
c20800 = min(c20800, ID_Miscellaneous_c[MARS - 1])
c21060 = c17000 + c18300 + c19200 + c19700 + c20500 + c20800
nonlimited = c17000 + c20500
limitstart = ID_ps[MARS - 1]
if c21060 > nonlimited and c00100 > limitstart:
dedmin = ID_crt * (c21060 - nonlimited)
dedpho = ID_prt * max(0., posagi - limitstart)
c21040 = min(dedmin, dedpho)
c04470 = c21060 - c21040
else:
c21040 = 0.
c04470 = c21060
c04470 = min(c04470, ID_c[MARS - 1])
return (c17000, c18300, c19200, c19700, c20500, c20800,
c21040, c21060, c04470)
@iterate_jit(nopython=True)
def AdditionalMedicareTax(e00200, MARS,
AMEDT_ec, sey, AMEDT_rt,
FICA_mc_trt, FICA_ss_trt,
ptax_amc, payrolltax):
line8 = max(0., sey) * (1. - 0.5 * (FICA_mc_trt + FICA_ss_trt))
line11 = max(0., AMEDT_ec[MARS - 1] - e00200)
ptax_amc = AMEDT_rt * (max(0., e00200 - AMEDT_ec[MARS - 1]) +
max(0., line8 - line11))
payrolltax += ptax_amc
return (ptax_amc, payrolltax)
@iterate_jit(nopython=True)
def StdDed(DSI, earned, STD, age_head, age_spouse, STD_Aged, STD_Dep,
MARS, MIDR, blind_head, blind_spouse, standard, c19700,
STD_allow_charity_ded_nonitemizers):
if DSI == 1:
c15100 = max(350. + earned, STD_Dep)
basic_stded = min(STD[MARS - 1], c15100)
else:
c15100 = 0.
if MIDR == 1:
basic_stded = 0.
else:
basic_stded = STD[MARS - 1]
num_extra_stded = blind_head + blind_spouse
if age_head >= 65:
num_extra_stded += 1
if MARS == 2 and age_spouse >= 65:
num_extra_stded += 1
extra_stded = num_extra_stded * STD_Aged[MARS - 1]
standard = basic_stded + extra_stded
if MARS == 3 and MIDR == 1:
standard = 0.
if STD_allow_charity_ded_nonitemizers:
standard += c19700
return standard
@iterate_jit(nopython=True)
def TaxInc(c00100, standard, c04470, c04600, MARS, e00900, e26270,
e02100, e27200, e00650, c01000,
PT_SSTB_income, PT_binc_w2_wages, PT_ubia_property,
PT_qbid_rt, PT_qbid_taxinc_thd, PT_qbid_taxinc_gap,
PT_qbid_w2_wages_rt,
PT_qbid_alt_w2_wages_rt, PT_qbid_alt_property_rt,
c04800, qbided, StudentLoan_em, studloan_debt, sldf):
pre_qbid_taxinc = max(0., c00100 - max(c04470, standard) - c04600)
qbided = 0.
qbinc = max(0., e00900 + e26270 + e02100 + e27200)
qbided_full = qbinc * PT_qbid_rt
if PT_qbid_taxinc_thd[MARS-1] > 0:
if pre_qbid_taxinc < PT_qbid_taxinc_thd[MARS-1]:
qbided = qbided_full
else:
qbided = max(0., qbided_full * (1 - (pre_qbid_taxinc - PT_qbid_taxinc_thd[MARS-1])/ PT_qbid_taxinc_gap[MARS-1]))
else:
qbided = qbided_full
net_cg = e00650 + c01000
taxinc_cap = PT_qbid_rt * max(0., pre_qbid_taxinc - net_cg)
qbided = min(qbided, taxinc_cap)
if StudentLoan_em is True:
base_sldf = max(0., studloan_debt)
else:
base_sldf = 0.
sldf = max(0., min(pre_qbid_taxinc - qbided, base_sldf))
c04800 = max(0., pre_qbid_taxinc - qbided - sldf)
return (c04800, qbided, sldf)
@JIT(nopython=True)
def SchXYZ(taxable_income, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking):
pt_passive = PT_EligibleRate_passive * (e02000 - e26270)
pt_active_gross = e00900 + e26270
if (pt_active_gross > 0) and PT_wages_active_income:
pt_active_gross = pt_active_gross + e00200
pt_active = PT_EligibleRate_active * pt_active_gross
pt_active = min(pt_active, e00900 + e26270)
pt_taxinc = max(0., pt_passive + pt_active)
if pt_taxinc >= taxable_income:
pt_taxinc = taxable_income
reg_taxinc = 0.
else:
reg_taxinc = taxable_income - pt_taxinc
if PT_top_stacking:
reg_tbase = 0.
pt_tbase = reg_taxinc
else:
reg_tbase = pt_taxinc
pt_tbase = 0.
if reg_taxinc > 0.:
reg_tax = Taxes(reg_taxinc, MARS, reg_tbase,
II_rt1, II_rt2, II_rt3, II_rt4,
II_rt5, II_rt6, II_rt7, II_rt8, II_brk1, II_brk2,
II_brk3, II_brk4, II_brk5, II_brk6, II_brk7)
else:
reg_tax = 0.
if pt_taxinc > 0.:
pt_tax = Taxes(pt_taxinc, MARS, pt_tbase,
PT_rt1, PT_rt2, PT_rt3, PT_rt4,
PT_rt5, PT_rt6, PT_rt7, PT_rt8, PT_brk1, PT_brk2,
PT_brk3, PT_brk4, PT_brk5, PT_brk6, PT_brk7)
else:
pt_tax = 0.
return reg_tax + pt_tax
@iterate_jit(nopython=True)
def SchXYZTax(c04800, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking, c05200):
c05200 = SchXYZ(c04800, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking)
return c05200
@iterate_jit(nopython=True)
def GainsTax(e00650, c01000, c23650, p23250, e01100, e58990, e00200,
e24515, e24518, MARS, c04800, c05200, e00900, e26270, e02000,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5, II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5, II_brk6, II_brk7,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5, PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5, PT_brk6, PT_brk7,
CG_nodiff, PT_EligibleRate_active, PT_EligibleRate_passive,
PT_wages_active_income, PT_top_stacking,
CG_rt1, CG_rt2, CG_rt3, CG_rt4, CG_brk1, CG_brk2, CG_brk3,
dwks10, dwks13, dwks14, dwks19, c05700, taxbc):
if c01000 > 0. or c23650 > 0. or p23250 > 0. or e01100 > 0. or e00650 > 0.:
hasqdivltcg = 1
else:
hasqdivltcg = 0
if CG_nodiff:
hasqdivltcg = 0
if hasqdivltcg == 1:
dwks1 = c04800
dwks2 = e00650
dwks3 = e58990
dwks4 = 0.
dwks5 = max(0., dwks3 - dwks4)
dwks6 = max(0., dwks2 - dwks5)
dwks7 = min(p23250, c23650)
if e01100 > 0.:
c24510 = e01100
else:
c24510 = max(0., dwks7) + e01100
dwks9 = max(0., c24510 - min(0., e58990))
dwks10 = dwks6 + dwks9
dwks11 = e24515 + e24518
dwks12 = min(dwks9, dwks11)
dwks13 = dwks10 - dwks12
dwks14 = max(0., dwks1 - dwks13)
dwks16 = min(CG_brk1[MARS - 1], dwks1)
dwks17 = min(dwks14, dwks16)
dwks18 = max(0., dwks1 - dwks10)
dwks19 = max(dwks17, dwks18)
dwks20 = dwks16 - dwks17
lowest_rate_tax = CG_rt1 * dwks20
dwks21 = min(dwks1, dwks13)
dwks22 = dwks20
dwks23 = max(0., dwks21 - dwks22)
dwks25 = min(CG_brk2[MARS - 1], dwks1)
dwks26 = dwks19 + dwks20
dwks27 = max(0., dwks25 - dwks26)
dwks28 = min(dwks23, dwks27)
dwks29 = CG_rt2 * dwks28
dwks30 = dwks22 + dwks28
dwks31 = dwks21 - dwks30
dwks32 = CG_rt3 * dwks31
hi_base = max(0., dwks31 - CG_brk3[MARS - 1])
hi_incremental_rate = CG_rt4 - CG_rt3
highest_rate_incremental_tax = hi_incremental_rate * hi_base
dwks33 = min(dwks9, e24518)
dwks34 = dwks10 + dwks19
dwks36 = max(0., dwks34 - dwks1)
dwks37 = max(0., dwks33 - dwks36)
dwks38 = 0.25 * dwks37
dwks39 = dwks19 + dwks20 + dwks28 + dwks31 + dwks37
dwks40 = dwks1 - dwks39
dwks41 = 0.28 * dwks40
dwks42 = SchXYZ(dwks19, MARS, e00900, e26270, e02000, e00200,
PT_rt1, PT_rt2, PT_rt3, PT_rt4, PT_rt5,
PT_rt6, PT_rt7, PT_rt8,
PT_brk1, PT_brk2, PT_brk3, PT_brk4, PT_brk5,
PT_brk6, PT_brk7,
II_rt1, II_rt2, II_rt3, II_rt4, II_rt5,
II_rt6, II_rt7, II_rt8,
II_brk1, II_brk2, II_brk3, II_brk4, II_brk5,
II_brk6, II_brk7, PT_EligibleRate_active,
PT_EligibleRate_passive, PT_wages_active_income,
PT_top_stacking)
dwks43 = (dwks29 + dwks32 + dwks38 + dwks41 + dwks42 +
lowest_rate_tax + highest_rate_incremental_tax)
dwks44 = c05200
dwks45 = min(dwks43, dwks44)
c24580 = dwks45
else:
c24580 = c05200
dwks10 = max(0., min(p23250, c23650)) + e01100
dwks13 = 0.
dwks14 = 0.
dwks19 = 0.
c05100 = c24580
c05700 = 0.
taxbc = c05700 + c05100
return (dwks10, dwks13, dwks14, dwks19, c05700, taxbc)
@iterate_jit(nopython=True)
def AGIsurtax(c00100, MARS, AGI_surtax_trt, AGI_surtax_thd, taxbc, surtax):
if AGI_surtax_trt > 0.:
hiAGItax = AGI_surtax_trt * max(c00100 - AGI_surtax_thd[MARS - 1], 0.)
taxbc += hiAGItax
surtax += hiAGItax
return (taxbc, surtax)
@iterate_jit(nopython=True)
def AMT(e07300, dwks13, standard, f6251, c00100, c18300, taxbc,
c04470, c17000, c20800, c21040, e24515, MARS, sep, dwks19,
dwks14, c05700, e62900, e00700, dwks10, age_head, age_spouse,
earned, cmbtp,
AMT_child_em_c_age, AMT_brk1,
AMT_em, AMT_prt, AMT_rt1, AMT_rt2,
AMT_child_em, AMT_em_ps, AMT_em_pe,
AMT_CG_brk1, AMT_CG_brk2, AMT_CG_brk3, AMT_CG_rt1, AMT_CG_rt2,
AMT_CG_rt3, AMT_CG_rt4, c05800, c09600, c62100):
if standard == 0.0:
c62100 = (c00100 - e00700 - c04470 +
max(0., min(c17000, 0.025 * c00100)) +
c18300 + c20800 - c21040)
if standard > 0.0:
c62100 = c00100 - e00700
c62100 += cmbtp
if MARS == 3:
amtsepadd = max(0.,
min(AMT_em[MARS - 1], AMT_prt * (c62100 - AMT_em_pe)))
else:
amtsepadd = 0.
c62100 = c62100 + amtsepadd
line29 = max(0., AMT_em[MARS - 1] - AMT_prt *
max(0., c62100 - AMT_em_ps[MARS - 1]))
young_head = age_head != 0 and age_head < AMT_child_em_c_age
no_or_young_spouse = age_spouse < AMT_child_em_c_age
if young_head and no_or_young_spouse:
line29 = min(line29, earned + AMT_child_em)
line30 = max(0., c62100 - line29)
line3163 = (AMT_rt1 * line30 +
AMT_rt2 * max(0., (line30 - (AMT_brk1 / sep))))
if dwks10 > 0. or dwks13 > 0. or dwks14 > 0. or dwks19 > 0. or e24515 > 0.:
line37 = dwks13
line38 = e24515
line39 = min(line37 + line38, dwks10)
line40 = min(line30, line39)
line41 = max(0., line30 - line40)
line42 = (AMT_rt1 * line41 +
AMT_rt2 * max(0., (line41 - (AMT_brk1 / sep))))
line44 = dwks14
line45 = max(0., AMT_CG_brk1[MARS - 1] - line44)
line46 = min(line30, line37)
line47 = min(line45, line46)
cgtax1 = line47 * AMT_CG_rt1
line48 = line46 - line47
line51 = dwks19
line52 = line45 + line51
line53 = max(0., AMT_CG_brk2[MARS - 1] - line52)
line54 = min(line48, line53)
cgtax2 = line54 * AMT_CG_rt2
line56 = line47 + line54
if line41 == line56:
line57 = 0.
linex2 = 0.
else:
line57 = line46 - line56
linex1 = min(line48,
max(0., AMT_CG_brk3[MARS - 1] - line44 - line45))
linex2 = max(0., line54 - linex1)
cgtax3 = line57 * AMT_CG_rt3
cgtax4 = linex2 * AMT_CG_rt4
if line38 == 0.:
line61 = 0.
else:
line61 = 0.25 * max(0., line30 - line41 - line56 - line57 - linex2)
line62 = line42 + cgtax1 + cgtax2 + cgtax3 + cgtax4 + line61
line64 = min(line3163, line62)
line31 = line64
else:
line31 = line3163
if f6251 == 1:
line32 = e62900
else:
line32 = e07300
line33 = line31 - line32
c09600 = max(0., line33 - max(0., taxbc - e07300 - c05700))
c05800 = taxbc + c09600
return (c62100, c09600, c05800)
@iterate_jit(nopython=True)
def NetInvIncTax(e00300, e00600, e02000, e26270, c01000,
c00100, NIIT_thd, MARS, NIIT_PT_taxed, NIIT_rt, niit):
modAGI = c00100
if not NIIT_PT_taxed:
NII = max(0., e00300 + e00600 + c01000 + e02000 - e26270)
else:
NII = max(0., e00300 + e00600 + c01000 + e02000)
niit = NIIT_rt * min(NII, max(0., modAGI - NIIT_thd[MARS - 1]))
return niit
@iterate_jit(nopython=True)
def F2441(MARS, earned_p, earned_s, f2441, CDCC_c, e32800,
exact, c00100, CDCC_ps, CDCC_crt, c05800, e07300, c07180):
max_credit = min(f2441, 2) * CDCC_c
c32800 = max(0., min(e32800, max_credit))
c32880 = earned_p # earned income of taxpayer
if MARS == 2:
c32890 = earned_s # earned income of spouse when present
else:
c32890 = earned_p
c33000 = max(0., min(c32800, min(c32880, c32890)))
# credit is limited by AGI-related fraction
if exact == 1: # exact calculation as on tax forms
tratio = math.ceil(max(((c00100 - CDCC_ps) / 2000.), 0.))
c33200 = c33000 * 0.01 * max(20., CDCC_crt - min(15., tratio))
else:
c33200 = c33000 * 0.01 * max(20., CDCC_crt -
max(((c00100 - CDCC_ps) / 2000.), 0.))
# credit is limited by tax liability
c07180 = min(max(0., c05800 - e07300), c33200)
return c07180
@JIT(nopython=True)
def EITCamount(basic_frac, phasein_rate, earnings, max_amount,
phaseout_start, agi, phaseout_rate):
eitc = min((basic_frac * max_amount +
(1.0 - basic_frac) * phasein_rate * earnings), max_amount)
if earnings > phaseout_start or agi > phaseout_start:
eitcx = max(0., (max_amount - phaseout_rate *
max(0., max(earnings, agi) - phaseout_start)))
eitc = min(eitc, eitcx)
return eitc
@iterate_jit(nopython=True)
def EITC(MARS, DSI, EIC, c00100, e00300, e00400, e00600, c01000,
e02000, e26270, age_head, age_spouse, earned, earned_p, earned_s,
EITC_ps, EITC_MinEligAge, EITC_MaxEligAge, EITC_ps_MarriedJ,
EITC_rt, EITC_c, EITC_prt, EITC_basic_frac,
EITC_InvestIncome_c, EITC_excess_InvestIncome_rt,
EITC_indiv, EITC_sep_filers_elig,
c59660):
# pylint: disable=too-many-branches
if MARS != 2:
eitc = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned, EITC_c[EIC],
EITC_ps[EIC], c00100, EITC_prt[EIC])
if EIC == 0:
# enforce age eligibility rule for those with no EITC-eligible
# kids assuming that an unknown age_* value implies EITC age
# eligibility
h_age_elig = EITC_MinEligAge <= age_head <= EITC_MaxEligAge
if (age_head == 0 or h_age_elig):
c59660 = eitc
else:
c59660 = 0.
else: # if EIC != 0
c59660 = eitc
if MARS == 2:
po_start = EITC_ps[EIC] + EITC_ps_MarriedJ[EIC]
if not EITC_indiv:
# filing unit EITC rather than individual EITC
eitc = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned, EITC_c[EIC],
po_start, c00100, EITC_prt[EIC])
if EITC_indiv:
# individual EITC rather than a filing-unit EITC
eitc_p = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned_p, EITC_c[EIC],
po_start, earned_p, EITC_prt[EIC])
eitc_s = EITCamount(EITC_basic_frac,
EITC_rt[EIC], earned_s, EITC_c[EIC],
po_start, earned_s, EITC_prt[EIC])
eitc = eitc_p + eitc_s
if EIC == 0:
h_age_elig = EITC_MinEligAge <= age_head <= EITC_MaxEligAge
s_age_elig = EITC_MinEligAge <= age_spouse <= EITC_MaxEligAge
if (age_head == 0 or age_spouse == 0 or h_age_elig or s_age_elig):
c59660 = eitc
else:
c59660 = 0.
else:
c59660 = eitc
if (MARS == 3 and not EITC_sep_filers_elig) or DSI == 1:
c59660 = 0.
# reduce positive EITC if investment income exceeds ceiling
if c59660 > 0.:
invinc = (e00400 + e00300 + e00600 +
max(0., c01000) + max(0., (e02000 - e26270)))
if invinc > EITC_InvestIncome_c:
eitc = (c59660 - EITC_excess_InvestIncome_rt *
(invinc - EITC_InvestIncome_c))
c59660 = max(0., eitc)
return c59660
@iterate_jit(nopython=True)
def RefundablePayrollTaxCredit(was_plus_sey_p, was_plus_sey_s,
RPTC_c, RPTC_rt,
rptc_p, rptc_s, rptc):
rptc_p = min(was_plus_sey_p * RPTC_rt, RPTC_c)
rptc_s = min(was_plus_sey_s * RPTC_rt, RPTC_c)
rptc = rptc_p + rptc_s
return (rptc_p, rptc_s, rptc)
@iterate_jit(nopython=True)
def ChildDepTaxCredit(n24, MARS, c00100, XTOT, num, c05800,
e07260, CR_ResidentialEnergy_hc,
e07300, CR_ForeignTax_hc,
c07180,
c07230,
e07240, CR_RetirementSavings_hc,
c07200,
CTC_c, CTC_ps, CTC_prt, exact, ODC_c,
CTC_c_under6_bonus, nu06,
c07220, odc, codtc_limited):
# Worksheet Part 1
line1 = CTC_c * n24 + CTC_c_under6_bonus * nu06
line2 = ODC_c * max(0, XTOT - n24 - num)
line3 = line1 + line2
modAGI = c00100 # no foreign earned income exclusion to add to AGI (line6)
if line3 > 0. and modAGI > CTC_ps[MARS - 1]:
excess = modAGI - CTC_ps[MARS - 1]
if exact == 1: # exact calculation as on tax forms
excess = 1000. * math.ceil(excess / 1000.)
line10 = max(0., line3 - CTC_prt * excess)
else:
line10 = line3
if line10 > 0.:
# Worksheet Part 2
line11 = c05800
line12 = (e07260 * (1. - CR_ResidentialEnergy_hc) +
e07300 * (1. - CR_ForeignTax_hc) +
c07180 + # child & dependent care expense credit
c07230 + # education credit
e07240 * (1. - CR_RetirementSavings_hc) +
c07200) # Schedule R credit
line13 = line11 - line12
line14 = 0.
line15 = max(0., line13 - line14)
line16 = min(line10, line15) # credit is capped by tax liability
else:
line16 = 0.
# separate the CTC and ODTC amounts
c07220 = 0. # nonrefundable CTC amount
odc = 0. # nonrefundable ODTC amount
if line16 > 0.:
if line1 > 0.:
c07220 = line16 * line1 / line3
odc = max(0., line16 - c07220)
# compute codtc_limited for use in AdditionalCTC function
codtc_limited = max(0., line10 - line16)
return (c07220, odc, codtc_limited)
@iterate_jit(nopython=True)
def PersonalTaxCredit(MARS, c00100,
II_credit, II_credit_ps, II_credit_prt,
II_credit_nr, II_credit_nr_ps, II_credit_nr_prt,
personal_refundable_credit,
personal_nonrefundable_credit):
# calculate personal refundable credit amount with phase-out
personal_refundable_credit = II_credit[MARS - 1]
if II_credit_prt > 0. and c00100 > II_credit_ps[MARS - 1]:
pout = II_credit_prt * (c00100 - II_credit_ps[MARS - 1])
fully_phasedout = personal_refundable_credit - pout
personal_refundable_credit = max(0., fully_phasedout)
# calculate personal nonrefundable credit amount with phase-out
personal_nonrefundable_credit = II_credit_nr[MARS - 1]
if II_credit_nr_prt > 0. and c00100 > II_credit_nr_ps[MARS - 1]:
pout = II_credit_nr_prt * (c00100 - II_credit_nr_ps[MARS - 1])
fully_phasedout = personal_nonrefundable_credit - pout
personal_nonrefundable_credit = max(0., fully_phasedout)
return (personal_refundable_credit, personal_nonrefundable_credit)
@iterate_jit(nopython=True)
def IRADCTaxCredit(e03150, e03300, IRADC_credit_c, IRADC_credit_rt, iradctc):
# calculate refundable credit amount
tot_retirement_contributions = e03150 + e03300
if IRADC_credit_rt > 0.:
iradctc = min(tot_retirement_contributions * IRADC_credit_rt, IRADC_credit_c)
else:
iradctc = 0.
return (iradctc)
@iterate_jit(nopython=True)
def FTHBTaxCredit(MARS, FTHB_credit, FTHB_credit_c, c00100,
FTHB_credit_e, fthbc, fthb_credit_amt):
if FTHB_credit is True:
# max credit
fthbc = max(0., min(FTHB_credit_c, fthb_credit_amt))
# eliminated based on agi
positiveagiamt = max(c00100, 0.)
fthb_max_agi = FTHB_credit_e[MARS - 1]
if positiveagiamt <= fthb_max_agi:
fthbc = fthbc
else:
fthbc = 0.
return (fthbc)
@iterate_jit(nopython=True)
def ICGTaxCredit(earned_p, earned_s, MARS, ICG_credit_c, ICG_credit_em,
ICG_credit_rt, ICG_credit_thd, icg_expense, c05800, e07300,
icgtc):
# not reflected in current law and records modified with imputation
# earned income of taxpayer
icg32880 = earned_p # earned income of taxpayer
if MARS == 2:
icg32890 = earned_s # earned income of spouse when present
else:
icg32890 = earned_p
icg33000 = min(icg32880, icg32890)
if icg33000 > ICG_credit_thd:
# credit for actual expenses
icg_max_credit = (icg_expense - ICG_credit_em) * ICG_credit_rt
icg_credit = max(0., min(icg_max_credit, ICG_credit_c))
# credit is limited to minimum of individuals' earned income
icg_credit = max(0., min(icg_credit, icg33000))
icgtc = min(max(0., c05800 - e07300), icg_credit)
else:
icgtc = 0.
return icgtc
@iterate_jit(nopython=True)
def IRATaxCredit(earned_p, earned_s, MARS, AutoIRA_credit, ira_credit,
c05800, e07300, iratc):
if AutoIRA_credit is True:
iratc = max(0., ira_credit)
else:
iratc = 0.
return iratc
@iterate_jit(nopython=True)
def EVTaxCredit(EV_credit, ev_credit_amt, EV_credit_c, c00100, EV_credit_ps, MARS,
EV_credit_prt, evtc):
if EV_credit is True:
elecv_credit = max(0., min(ev_credit_amt, EV_credit_c))
posevagi = max(c00100, 0.)
ev_max = EV_credit_ps[MARS - 1]
if posevagi < ev_max:
evtc = elecv_credit
else:
evtc_reduced = max(0., evtc - EV_credit_prt * (posevagi - ev_max))
evtc = min(evtc, evtc_reduced)
return evtc
@iterate_jit(nopython=True)
def AmOppCreditParts(exact, e87521, num, c00100, CR_AmOppRefundable_hc,
CR_AmOppNonRefundable_hc, c10960, c87668):
if e87521 > 0.:
c87658 = max(0., 90000. * num - c00100)
c87660 = 10000. * num
if exact == 1:
c87662 = 1000. * min(1., round(c87658 / c87660, 3))
else:
c87662 = 1000. * min(1., c87658 / c87660)
c87664 = c87662 * e87521 / 1000.
c10960 = 0.4 * c87664 * (1. - CR_AmOppRefundable_hc)
c87668 = c87664 - c10960 * (1. - CR_AmOppNonRefundable_hc)
else:
c10960 = 0.
c87668 = 0.
return (c10960, c87668)
@iterate_jit(nopython=True)
def SchR(age_head, age_spouse, MARS, c00100,
c05800, e07300, c07180, e02400, c02500, e01500, e01700, CR_SchR_hc,
c07200):
if age_head >= 65 or (MARS == 2 and age_spouse >= 65):
if MARS == 2:
if age_head >= 65 and age_spouse >= 65:
schr12 = 7500.
else:
schr12 = 5000.
schr15 = 10000.
elif MARS == 3:
schr12 = 3750.
schr15 = 5000.
elif MARS in (1, 4):
schr12 = 5000.
schr15 = 7500.
else:
schr12 = 0.
schr15 = 0.
schr13a = max(0., e02400 - c02500)
schr13b = max(0., e01500 - e01700)
schr13c = schr13a + schr13b
schr16 = max(0., c00100 - schr15)
schr17 = 0.5 * schr16
schr18 = schr13c + schr17
schr19 = max(0., schr12 - schr18)
schr20 = 0.15 * schr19
schr21 = max(0., (c05800 - e07300 - c07180))
c07200 = min(schr20, schr21) * (1. - CR_SchR_hc)
else:
c07200 = 0.
return c07200
@iterate_jit(nopython=True)
def EducationTaxCredit(exact, e87530, MARS, c00100, num, c05800,
e07300, c07180, c07200, c87668,
LLC_Expense_c, ETC_pe_Single, ETC_pe_Married,
CR_Education_hc,
c07230):
c87560 = 0.2 * min(e87530, LLC_Expense_c)
if MARS == 2:
c87570 = ETC_pe_Married * 1000.
else:
c87570 = ETC_pe_Single * 1000.
c87590 = max(0., c87570 - c00100)
c87600 = 10000. * num
if exact == 1:
c87610 = min(1., round(c87590 / c87600, 3))
else:
c87610 = min(1., c87590 / c87600)
c87620 = c87560 * c87610
xline4 = max(0., c05800 - (e07300 + c07180 + c07200))
xline5 = min(c87620, xline4)
xline9 = max(0., c05800 - (e07300 + c07180 + c07200 + xline5))
xline10 = min(c87668, xline9)
c87680 = xline5 + xline10
c07230 = c87680 * (1. - CR_Education_hc)
return c07230
@iterate_jit(nopython=True)
def CharityCredit(e19800, e20100, c00100, CR_Charity_rt, CR_Charity_f,
CR_Charity_frt, MARS, charity_credit):
total_charity = e19800 + e20100
floor = max(CR_Charity_frt * c00100, CR_Charity_f[MARS - 1])
charity_cr_floored = max(total_charity - floor, 0)
charity_credit = CR_Charity_rt * (charity_cr_floored)
return charity_credit
@iterate_jit(nopython=True)
def NonrefundableCredits(c05800, e07240, e07260, e07300, e07400,
e07600, p08000, odc,
personal_nonrefundable_credit, icgtc, iratc, evtc,
CR_RetirementSavings_hc, CR_ForeignTax_hc,
CR_ResidentialEnergy_hc, CR_GeneralBusiness_hc,
CR_MinimumTax_hc, CR_OtherCredits_hc, charity_credit,
c07180, c07200, c07220, c07230, c07240,
c07260, c07300, c07400, c07600, c08000):
avail = c05800
c07300 = min(e07300 * (1. - CR_ForeignTax_hc), avail)
avail = avail - c07300
c07180 = min(c07180, avail)
avail = avail - c07180
c07230 = min(c07230, avail)
avail = avail - c07230
c07240 = min(e07240 * (1. - CR_RetirementSavings_hc), avail)
avail = avail - c07240
c07220 = min(c07220, avail)
avail = avail - c07220
odc = min(odc, avail)
avail = avail - odc
c07260 = min(e07260 * (1. - CR_ResidentialEnergy_hc), avail)
avail = avail - c07260
c07400 = min(e07400 * (1. - CR_GeneralBusiness_hc), avail)
avail = avail - c07400
c07600 = min(e07600 * (1. - CR_MinimumTax_hc), avail)
avail = avail - c07600
c07200 = min(c07200, avail)
avail = avail - c07200
c08000 = min(p08000 * (1. - CR_OtherCredits_hc), avail)
avail = avail - c08000
charity_credit = min(charity_credit, avail)
avail = avail - charity_credit
personal_nonrefundable_credit = min(personal_nonrefundable_credit, avail)
avail = avail - personal_nonrefundable_credit
icgtc = min(icgtc, avail)
avail = avail - icgtc
iratc = min(iratc, avail)
avail = avail - iratc
evtc = min(evtc, avail)
avail = avail - evtc
return (c07180, c07200, c07220, c07230, c07240, odc,
c07260, c07300, c07400, c07600, c08000, charity_credit,
personal_nonrefundable_credit, icgtc, iratc, evtc)
@iterate_jit(nopython=True)
def AdditionalCTC(codtc_limited, ACTC_c, n24, earned, ACTC_Income_thd,
ACTC_rt, nu06, ACTC_rt_bonus_under6family, ACTC_ChildNum,
ptax_was, c03260, e09800, c59660, e11200,
c11070):
line3 = codtc_limited
line4 = ACTC_c * n24
c11070 = 0.
if line3 > 0. and line4 > 0.:
line5 = min(line3, line4)
line7 = max(0., earned - ACTC_Income_thd)
if nu06 == 0:
ACTC_rate = ACTC_rt
else:
ACTC_rate = ACTC_rt + ACTC_rt_bonus_under6family
line8 = ACTC_rate * line7
if n24 < ACTC_ChildNum:
if line8 > 0.:
c11070 = min(line5, line8)
else:
if line8 >= line5:
c11070 = line5
else:
line9 = 0.5 * ptax_was
line10 = c03260 + e09800
line11 = line9 + line10
line12 = c59660 + e11200
line13 = max(0., line11 - line12)
line14 = max(line8, line13)
c11070 = min(line5, line14)
return c11070
@iterate_jit(nopython=True)
def C1040(c05800, c07180, c07200, c07220, c07230, c07240, c07260, c07300,
c07400, c07600, c08000, e09700, e09800, e09900, niit, othertaxes,
c07100, c09200, odc, charity_credit,
personal_nonrefundable_credit, icgtc, iratc, evtc):
c07100 = (c07180 + c07200 + c07600 + c07300 + c07400 + c07220 + c08000 +
c07230 + c07240 + c07260 + odc + charity_credit +
personal_nonrefundable_credit + icgtc + iratc + evtc)
tax_net_nonrefundable_credits = max(0., c05800 - c07100)
othertaxes = e09700 + e09800 + e09900 + niit
c09200 = othertaxes + tax_net_nonrefundable_credits
return (c07100, othertaxes, c09200)
@iterate_jit(nopython=True)
def CTC_new(CTC_new_c, CTC_new_rt, CTC_new_c_under6_bonus,
CTC_new_ps, CTC_new_prt, CTC_new_for_all,
CTC_new_refund_limited, CTC_new_refund_limit_payroll_rt,
CTC_new_refund_limited_all_payroll, payrolltax,
n24, nu06, c00100, MARS, ptax_oasdi, c09200,
ctc_new):
if n24 > 0:
posagi = max(c00100, 0.)
ctc_new = CTC_new_c * n24 + CTC_new_c_under6_bonus * nu06
if not CTC_new_for_all:
ctc_new = min(CTC_new_rt * posagi, ctc_new)
ymax = CTC_new_ps[MARS - 1]
if posagi > ymax:
ctc_new_reduced = max(0.,
ctc_new - CTC_new_prt * (posagi - ymax))
ctc_new = min(ctc_new, ctc_new_reduced)
if ctc_new > 0. and CTC_new_refund_limited:
refund_new = max(0., ctc_new - c09200)
if not CTC_new_refund_limited_all_payroll:
limit_new = CTC_new_refund_limit_payroll_rt * ptax_oasdi
if CTC_new_refund_limited_all_payroll:
limit_new = CTC_new_refund_limit_payroll_rt * payrolltax
limited_new = max(0., refund_new - limit_new)
ctc_new = max(0., ctc_new - limited_new)
else:
ctc_new = 0.
return ctc_new
@iterate_jit(nopython=True)
def CDCC_new(CDCC_new_c, CDCC_new_rt, CDCC_new_ps, CDCC_new_pe, CDCC_new_prt, cdcc_new,
MARS, f2441, e32800, earned_s, earned_p, c05800, e07300, c00100):
cdcc_new_max_credit = min(f2441, 2) * CDCC_new_c
cdcc_new_32800 = max(0., min(e32800 * CDCC_new_rt, cdcc_new_max_credit))
cdcc_new_32880 = earned_p # earned income of taxpayer
if MARS == 2:
cdcc_new_32890 = earned_s # earned income of spouse when present
else:
cdcc_new_32890 = earned_p
cdcc_new_33000 = max(0., min(cdcc_new_32800, min(cdcc_new_32880, cdcc_new_32890)))
# credit is limited by tax liability
cdcc_new = min(max(0., c05800 - e07300), cdcc_new_33000)
# phaseout based on agi
positiveagi = max(c00100, 0.)
cdcc_min = CDCC_new_ps[MARS - 1]
cdcc_max = CDCC_new_pe[MARS - 1]
if positiveagi < cdcc_min:
cdcc_new = cdcc_new
elif positiveagi < cdcc_max:
cdcc_new_reduced = max(0., cdcc_new - CDCC_new_prt * (positiveagi - cdcc_min))
cdcc_new = min(cdcc_new, cdcc_new_reduced)
else:
cdcc_new = 0.
return cdcc_new
@iterate_jit(nopython=True)
def IITAX(c59660, c11070, c10960, personal_refundable_credit, ctc_new, rptc,
c09200, payrolltax,
eitc, refund, iitax, combined, iradctc, fthbc, cdcc_new,
business_burden, estate_burden, Business_tax_combined):
eitc = c59660
refund = (eitc + c11070 + c10960 +
personal_refundable_credit + ctc_new + rptc + iradctc + fthbc + cdcc_new)
iitax = c09200 - refund
if Business_tax_combined is True:
combined = iitax + payrolltax + business_burden + estate_burden
else:
combined = iitax + payrolltax
return (eitc, refund, iitax, combined)
@JIT(nopython=True)
def Taxes(income, MARS, tbrk_base,
rate1, rate2, rate3, rate4, rate5, rate6, rate7, rate8,
tbrk1, tbrk2, tbrk3, tbrk4, tbrk5, tbrk6, tbrk7):
if tbrk_base > 0.:
brk1 = max(tbrk1[MARS - 1] - tbrk_base, 0.)
brk2 = max(tbrk2[MARS - 1] - tbrk_base, 0.)
brk3 = max(tbrk3[MARS - 1] - tbrk_base, 0.)
brk4 = max(tbrk4[MARS - 1] - tbrk_base, 0.)
brk5 = max(tbrk5[MARS - 1] - tbrk_base, 0.)
brk6 = max(tbrk6[MARS - 1] - tbrk_base, 0.)
brk7 = max(tbrk7[MARS - 1] - tbrk_base, 0.)
else:
brk1 = tbrk1[MARS - 1]
brk2 = tbrk2[MARS - 1]
brk3 = tbrk3[MARS - 1]
brk4 = tbrk4[MARS - 1]
brk5 = tbrk5[MARS - 1]
brk6 = tbrk6[MARS - 1]
brk7 = tbrk7[MARS - 1]
return (rate1 * min(income, brk1) +
rate2 * min(brk2 - brk1, max(0., income - brk1)) +
rate3 * min(brk3 - brk2, max(0., income - brk2)) +
rate4 * min(brk4 - brk3, max(0., income - brk3)) +
rate5 * min(brk5 - brk4, max(0., income - brk4)) +
rate6 * min(brk6 - brk5, max(0., income - brk5)) +
rate7 * min(brk7 - brk6, max(0., income - brk6)) +
rate8 * max(0., income - brk7))
def ComputeBenefit(calc, ID_switch):
# compute income tax liability with no itemized deductions allowed for
# the types of itemized deductions covered under the BenefitSurtax
no_ID_calc = copy.deepcopy(calc)
if ID_switch[0]:
no_ID_calc.policy_param('ID_Medical_hc', [1.])
if ID_switch[1]:
no_ID_calc.policy_param('ID_StateLocalTax_hc', [1.])
if ID_switch[2]:
no_ID_calc.policy_param('ID_RealEstate_hc', [1.])
if ID_switch[3]:
no_ID_calc.policy_param('ID_Casualty_hc', [1.])
if ID_switch[4]:
no_ID_calc.policy_param('ID_Miscellaneous_hc', [1.])
if ID_switch[5]:
no_ID_calc.policy_param('ID_InterestPaid_hc', [1.])
if ID_switch[6]:
no_ID_calc.policy_param('ID_Charity_hc', [1.])
no_ID_calc._calc_one_year() # pylint: disable=protected-access
diff_iitax = no_ID_calc.array('iitax') - calc.array('iitax')
benefit = np.where(diff_iitax > 0., diff_iitax, 0.)
return benefit
def BenefitSurtax(calc):
if calc.policy_param('ID_BenefitSurtax_crt') != 1.:
ben = ComputeBenefit(calc,
calc.policy_param('ID_BenefitSurtax_Switch'))
agi = calc.array('c00100')
ben_deduct = calc.policy_param('ID_BenefitSurtax_crt') * agi
ben_exempt_array = calc.policy_param('ID_BenefitSurtax_em')
ben_exempt = ben_exempt_array[calc.array('MARS') - 1]
ben_dedem = ben_deduct + ben_exempt
ben_surtax = (calc.policy_param('ID_BenefitSurtax_trt') *
np.where(ben > ben_dedem, ben - ben_dedem, 0.))
# add ben_surtax to income & combined taxes and to surtax subtotal
calc.incarray('iitax', ben_surtax)
calc.incarray('combined', ben_surtax)
calc.incarray('surtax', ben_surtax)
def BenefitLimitation(calc):
if calc.policy_param('ID_BenefitCap_rt') != 1.:
benefit = ComputeBenefit(calc,
calc.policy_param('ID_BenefitCap_Switch'))
# Calculate total deductible expenses under the cap
deduct_exps = 0.
if calc.policy_param('ID_BenefitCap_Switch')[0]: # medical
deduct_exps += calc.array('c17000')
if calc.policy_param('ID_BenefitCap_Switch')[1]: # statelocal
one_minus_hc = 1. - calc.policy_param('ID_StateLocalTax_hc')
deduct_exps += (one_minus_hc *
np.maximum(calc.array('e18400_capped'), 0.))
if calc.policy_param('ID_BenefitCap_Switch')[2]: # realestate
one_minus_hc = 1. - calc.policy_param('ID_RealEstate_hc')
deduct_exps += one_minus_hc * calc.array('e18500_capped')
if calc.policy_param('ID_BenefitCap_Switch')[3]: # casualty
deduct_exps += calc.array('c20500')
if calc.policy_param('ID_BenefitCap_Switch')[4]: # misc
deduct_exps += calc.array('c20800')
if calc.policy_param('ID_BenefitCap_Switch')[5]: # interest
deduct_exps += calc.array('c19200')
if calc.policy_param('ID_BenefitCap_Switch')[6]: # charity
deduct_exps += calc.array('c19700')
# Calculate cap value for itemized deductions
benefit_limit = deduct_exps * calc.policy_param('ID_BenefitCap_rt')
# Add the difference between the actual benefit and capped benefit
# to income tax and combined tax liabilities.
excess_benefit = np.maximum(benefit - benefit_limit, 0)
calc.incarray('iitax', excess_benefit)
calc.incarray('surtax', excess_benefit)
calc.incarray('combined', excess_benefit)
@iterate_jit(nopython=True)
def FairShareTax(c00100, MARS, ptax_was, setax, ptax_amc,
FST_AGI_trt, FST_AGI_thd_lo, FST_AGI_thd_hi,
fstax, iitax, combined, surtax):
if FST_AGI_trt > 0. and c00100 >= FST_AGI_thd_lo[MARS - 1]:
employee_share = 0.5 * ptax_was + 0.5 * setax + ptax_amc
fstax = max(c00100 * FST_AGI_trt - iitax - employee_share, 0.)
thd_gap = max(FST_AGI_thd_hi[MARS - 1] - FST_AGI_thd_lo[MARS - 1], 0.)
if thd_gap > 0. and c00100 < FST_AGI_thd_hi[MARS - 1]:
fstax *= (c00100 - FST_AGI_thd_lo[MARS - 1]) / thd_gap
iitax += fstax
combined += fstax
surtax += fstax
else:
fstax = 0.
return (fstax, iitax, combined, surtax)
@iterate_jit(nopython=True)
def LumpSumTax(DSI, num, XTOT,
LST,
lumpsum_tax, combined):
if LST == 0.0 or DSI == 1:
lumpsum_tax = 0.
else:
lumpsum_tax = LST * max(num, XTOT)
combined += lumpsum_tax
return (lumpsum_tax, combined)
@iterate_jit(nopython=True)
def ExpandIncome(e00200, pencon_p, pencon_s, e00300, e00400, e00600,
e00700, e00800, e00900, e01100, e01200, e01400, e01500,
e02000, e02100, p22250, p23250, cmbtp, ptax_was,
benefit_value_total, expanded_income):
expanded_income = (
e00200 + # wage and salary income net of DC pension contributions
pencon_p + # tax-advantaged DC pension contributions for taxpayer
pencon_s + # tax-advantaged DC pension contributions for spouse
e00300 + # taxable interest income
e00400 + # non-taxable interest income
e00600 + # dividends
e00700 + # state and local income tax refunds
e00800 + # alimony received
e00900 + # Sch C business net income/loss
e01100 + # capital gain distributions not reported on Sch D
e01200 + # Form 4797 other net gain/loss
e01400 + # taxable IRA distributions
e01500 + # total pension & annuity income (including DB-plan benefits)
e02000 + # Sch E total rental, ..., partnership, S-corp income/loss
e02100 + # Sch F farm net income/loss
p22250 + # Sch D: net short-term capital gain/loss
p23250 + # Sch D: net long-term capital gain/loss
cmbtp + # other AMT taxable income items from Form 6251
0.5 * ptax_was + # employer share of FICA taxes on wages/salaries
benefit_value_total # consumption value of all benefits received;
# see the BenefitPrograms function in this file for details on
# exactly how the benefit_value_total variable is computed
)
return expanded_income
@iterate_jit(nopython=True)
def AfterTaxIncome(combined, expanded_income, aftertax_income,
Business_tax_expinc, corp_taxliab):
if Business_tax_expinc is True:
expanded_income = expanded_income + corp_taxliab
else:
expanded_income = expanded_income
aftertax_income = expanded_income - combined
return aftertax_income
| true | true |
f721ddd3d8923aa8e66f4d2cdeb1b7bb82dc9246 | 1,791 | py | Python | volttrontesting/services/test_pubsub_service.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | 406 | 2015-01-20T03:08:53.000Z | 2022-03-31T20:59:07.000Z | volttrontesting/services/test_pubsub_service.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | 2,031 | 2015-01-05T21:35:45.000Z | 2022-03-29T21:44:36.000Z | volttrontesting/services/test_pubsub_service.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | 219 | 2015-01-20T14:53:57.000Z | 2022-03-06T00:37:41.000Z | from volttron.platform.vip.pubsubservice import PubSubService, ProtectedPubSubTopics
from mock import Mock, MagicMock
import pytest
@pytest.fixture(params=[
dict(has_external_routing=True),
dict(has_external_routing=False)
])
def pubsub_service(request):
mock_socket = Mock()
mock_protected_topics = MagicMock()
mock_routing_service = None
if request.param['has_external_routing']:
mock_routing_service = Mock()
service = PubSubService(socket=mock_socket,
protected_topics=mock_protected_topics,
routing_service=mock_routing_service)
parameters = dict(socket=mock_socket, protected_topics=mock_protected_topics,
routing_service=mock_routing_service, has_external_routing=request.param['has_external_routing'])
yield parameters, service
def test_pubsub_routing_setup(pubsub_service):
parameters, service = pubsub_service
assert isinstance(service, PubSubService)
if parameters['has_external_routing']:
assert parameters["routing_service"] is not None
else:
assert parameters["routing_service"] is None
def test_handle_subsystem_not_enough_frames(pubsub_service):
parameters, service = pubsub_service
# Expectation currently is there are 7 frames available.
result = service.handle_subsystem([])
assert not result
result = service.handle_subsystem(None)
assert not result
def test_returns_empty_list_when_subsystem_not_specified(pubsub_service):
parameters, service = pubsub_service
frames = [None for x in range(7)]
assert 7 == len(frames)
frames[6] = "not_pubsub"
result = service.handle_subsystem(frames)
assert [] == result
| 31.982143 | 119 | 0.713009 | from volttron.platform.vip.pubsubservice import PubSubService, ProtectedPubSubTopics
from mock import Mock, MagicMock
import pytest
@pytest.fixture(params=[
dict(has_external_routing=True),
dict(has_external_routing=False)
])
def pubsub_service(request):
mock_socket = Mock()
mock_protected_topics = MagicMock()
mock_routing_service = None
if request.param['has_external_routing']:
mock_routing_service = Mock()
service = PubSubService(socket=mock_socket,
protected_topics=mock_protected_topics,
routing_service=mock_routing_service)
parameters = dict(socket=mock_socket, protected_topics=mock_protected_topics,
routing_service=mock_routing_service, has_external_routing=request.param['has_external_routing'])
yield parameters, service
def test_pubsub_routing_setup(pubsub_service):
parameters, service = pubsub_service
assert isinstance(service, PubSubService)
if parameters['has_external_routing']:
assert parameters["routing_service"] is not None
else:
assert parameters["routing_service"] is None
def test_handle_subsystem_not_enough_frames(pubsub_service):
parameters, service = pubsub_service
result = service.handle_subsystem([])
assert not result
result = service.handle_subsystem(None)
assert not result
def test_returns_empty_list_when_subsystem_not_specified(pubsub_service):
parameters, service = pubsub_service
frames = [None for x in range(7)]
assert 7 == len(frames)
frames[6] = "not_pubsub"
result = service.handle_subsystem(frames)
assert [] == result
| true | true |
f721de426c7c8a45eb5801626a616f226aca0644 | 108,032 | py | Python | dante/vendor/pkg_resources/__init__.py | sbg/dante | 104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227 | [
"Apache-2.0"
] | 9 | 2017-11-03T15:53:01.000Z | 2019-10-01T14:09:56.000Z | dante/vendor/pkg_resources/__init__.py | sbg/dante | 104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227 | [
"Apache-2.0"
] | 4 | 2019-10-01T12:53:58.000Z | 2021-04-26T15:39:16.000Z | dante/vendor/pkg_resources/__init__.py | sbg/dante | 104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227 | [
"Apache-2.0"
] | 5 | 2017-11-03T15:50:40.000Z | 2021-09-13T08:50:45.000Z | # coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
import ntpath
import posixpath
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
from dante.vendor import six
from dante.vendor.six.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from dante.vendor import appdirs
from dante.vendor import packaging
__import__('dante.vendor.packaging.version')
__import__('dante.vendor.packaging.specifiers')
__import__('dante.vendor.packaging.requirements')
__import__('dante.vendor.packaging.markers')
__metaclass__ = type
if (3, 0) < sys.version_info < (3, 4):
raise RuntimeError("Python 3.4 or later is required")
if six.PY2:
# Those builtin exceptions are only defined in Python 3
PermissionError = None
NotADirectoryError = None
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Warnings
'PkgResourcesDeprecationWarning',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet:
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
@staticmethod
def _validate_resource_path(path):
"""
Validate the resource paths according to the docs.
https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
>>> warned = getfixture('recwarn')
>>> warnings.simplefilter('always')
>>> vrp = NullProvider._validate_resource_path
>>> vrp('foo/bar.txt')
>>> bool(warned)
False
>>> vrp('../foo/bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('/foo/bar.txt')
>>> bool(warned)
True
>>> vrp('foo/../../bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('foo/f../bar.txt')
>>> bool(warned)
False
Windows path separators are straight-up disallowed.
>>> vrp(r'\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
>>> vrp(r'C:\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
Blank values are allowed
>>> vrp('')
>>> bool(warned)
False
Non-string values are not.
>>> vrp(None)
Traceback (most recent call last):
...
AttributeError: ...
"""
invalid = (
os.path.pardir in path.split(posixpath.sep) or
posixpath.isabs(path) or
ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
# Aggressively disallow Windows absolute paths
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
# for compatibility, warn; in future
# raise ValueError(msg)
warnings.warn(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
stacklevel=4,
)
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.7 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""
Return a dist_factory for a path_item and entry
"""
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root, entry, metadata, precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return path.lower().endswith('.egg')
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = (
"Missing 'Version:' header and/or {} file at path: {}"
).format(self.PKG_INFO, path)
raise ValueError(msg, self)
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker)
or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(
attr for attr in self._provider.__dir__()
if not attr.startswith('_')
)
)
if not hasattr(object, '__dir__'):
# python 2.7 not supported
del __dir__
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(
dist.activate(replace=False)
for dist in working_set
)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
| 32.866444 | 92 | 0.620177 |
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
import ntpath
import posixpath
from pkgutil import get_importer
try:
import _imp
except ImportError:
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
from dante.vendor import six
from dante.vendor.six.six.moves import urllib, map, filter
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from dante.vendor import appdirs
from dante.vendor import packaging
__import__('dante.vendor.packaging.version')
__import__('dante.vendor.packaging.specifiers')
__import__('dante.vendor.packaging.requirements')
__import__('dante.vendor.packaging.markers')
__metaclass__ = type
if (3, 0) < sys.version_info < (3, 4):
raise RuntimeError("Python 3.4 or later is required")
if six.PY2:
PermissionError = None
NotADirectoryError = None
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass
return plat
__all__ = [
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
'PEP440Warning',
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
'ensure_directory', 'normalize_path',
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
'PkgResourcesDeprecationWarning',
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
except ValueError:
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform
def compatible_platforms(provided, required):
if provided is None or required is None or provided == required:
return True
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
if not provMac:
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script
def get_distribution(dist):
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
def get_metadata(name):
def get_metadata_lines(name):
def metadata_isdir(name):
def metadata_listdir(name):
def run_script(script_name, namespace):
class IResourceProvider(IMetadataProvider):
def get_resource_filename(manager, resource_name):
def get_resource_stream(manager, resource_name):
def get_resource_string(manager, resource_name):
def has_resource(resource_name):
def resource_isdir(resource_name):
def resource_listdir(resource_name):
class WorkingSet:
def __init__(self, entries=None):
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
ws = cls()
try:
from __main__ import __requires__
except ImportError:
return ws
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
return self.by_key.get(dist.key) == dist
def find(self, req):
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
seen = {}
for item in self.entries:
if item not in self.entry_keys:
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
requirements = list(requirements)[::-1]
processed = {}
best = {}
to_activate = []
req_extras = _ReqExtras()
required_by = collections.defaultdict(set)
while requirements:
req = requirements.pop(0)
if req in processed:
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
return to_activate
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
plugin_projects = list(plugin_env)
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
error_info[dist] = v
if fallback:
continue
else:
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
def markers_pass(self, req, extras=None):
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
def __init__(
self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
if installer is not None:
return installer(requirement)
def __iter__(self):
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
class ResourceManager:
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
if os.name == 'nt' and not path.startswith(os.environ['windir']):
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
if os.name == 'posix':
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
# XXX
def get_default_cache():
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
return name.replace('-', '_')
def invalid_marker(text):
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
@staticmethod
def _validate_resource_path(path):
invalid = (
os.path.pardir in path.split(posixpath.sep) or
posixpath.isabs(path) or
ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
warnings.warn(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
stacklevel=4,
)
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
@classmethod
def build(cls, path):
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1)
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
return real_path
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
def __init__(self, importer):
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
if importer.archive.endswith('.whl'):
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
def _by_version(name):
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
)
class NoDists:
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root, entry, metadata, precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
importer = get_importer(path_item)
if importer is None:
return None
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
return path.lower().endswith('.egg')
def _is_unpacked_egg(path):
return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution:
PKG_INFO = 'PKG-INFO'
def __init__(
self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = (
"Missing 'Version:' header and/or {} file at path: {}"
).format(self.PKG_INFO, path)
raise ValueError(msg, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker)
or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata_path_for_display(self, name):
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
path = self._provider._get_metadata_path(name)
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(
attr for attr in self._provider.__dir__()
if not attr.startswith('_')
)
)
if not hasattr(object, '__dir__'):
del __dir__
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if '
line = line[:line.find('
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _always_object(classes):
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script
tuple(
dist.activate(replace=False)
for dist in working_set
)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
list(map(working_set.add_entry, sys.path))
globals().update(locals())
class PkgResourcesDeprecationWarning(Warning):
| true | true |
f721df51ffd92a86fa4f5ed499e8968701513d30 | 3,931 | py | Python | food_ke/scripts/candidate_edges.py | IBPA/FoodAtlas | 0a431f0a391adaa8984b380f3f6f7189f27b9311 | [
"Apache-2.0"
] | 1 | 2022-02-07T10:04:35.000Z | 2022-02-07T10:04:35.000Z | food_ke/scripts/candidate_edges.py | IBPA/FoodAtlas | 0a431f0a391adaa8984b380f3f6f7189f27b9311 | [
"Apache-2.0"
] | null | null | null | food_ke/scripts/candidate_edges.py | IBPA/FoodAtlas | 0a431f0a391adaa8984b380f3f6f7189f27b9311 | [
"Apache-2.0"
] | null | null | null | import uuid
from collections import defaultdict
from itertools import combinations
from typing import Any, Dict, List, Tuple
from dagster import OutputDefinition, pipeline, solid
from tinydb import Query
from food_ke.labelstudio import (
LSAnnotationResult,
LSAnnotationValue,
LSPreAnnotation,
LSPrediction,
)
from food_ke.scripts.custom import CandidateEdge, EntityMention
from food_ke.scripts.modes import dev, prod
from food_ke.scripts.utils import groupby_doi
from food_ke.stubs import NER_FROM_NAME, NER_TO_NAME
@solid(required_resource_keys={"ner_raw_io_manager"})
def get_ner_mentions(context) -> List[EntityMention]:
return context.resources.ner_raw_io_manager.load_input(context)
@solid(
output_defs=[OutputDefinition(io_manager_key="candidate_edges_io_manager")]
)
def generate_candidate_edges(
grouped_mentions: Dict[str, List[EntityMention]]
) -> List[CandidateEdge]:
candidate_edges = []
for doi, mentions in grouped_mentions.items():
for e1, e2 in combinations(mentions, 2):
if e1.e_type == "FOOD" and e2.e_type == "UNITS":
candidate_edges.append(CandidateEdge(e1, e2, doi))
elif e2.e_type == "FOOD" and e1.e_type == "UNITS":
candidate_edges.append(CandidateEdge(e2, e1, doi))
return candidate_edges
def entity_mention_to_ls_annotation_result(
entity_mention: EntityMention,
) -> LSAnnotationResult:
val = LSAnnotationValue(
start=entity_mention.xpath,
end=entity_mention.xpath,
startOffset=entity_mention.start_offset,
endOffset=entity_mention.end_offset,
text=entity_mention.text,
htmllabels=[entity_mention.e_type],
)
return LSAnnotationResult(
value=val,
id=str(uuid.uuid4()),
from_name=NER_FROM_NAME,
to_name=NER_TO_NAME,
type="hypertextlabels",
doi=entity_mention.doi,
)
@solid
def convert_candidate_edges_to_ls_predictions(
candidate_edges: List[CandidateEdge],
) -> List[LSPrediction]:
output = []
for edge in candidate_edges:
head_ls_annot = entity_mention_to_ls_annotation_result(edge.head)
tail_ls_annot = entity_mention_to_ls_annotation_result(edge.tail)
edge_dict = {
"from_id": head_ls_annot.id,
"to_id": tail_ls_annot.id,
"type": "relation",
"direction": "right",
"labels": [],
}
result = [head_ls_annot, tail_ls_annot, edge_dict]
pred = LSPrediction(
result=result,
score=1.0,
model_version="candidate_edges_v1",
doi=edge.doi,
)
output.append(pred)
return output
@solid(
output_defs=[
OutputDefinition(
io_manager_key="candidate_edges_ls_predictions_io_manager"
)
],
required_resource_keys={"articles_db"},
)
def convert_ls_predictions_to_preannotations(
context,
ls_predictions: List[LSPrediction],
) -> List[LSPreAnnotation]:
predictions_grouped = groupby_doi(ls_predictions)
preannotations = []
for doi, predictions in predictions_grouped.items():
q = Query()
results = context.resources.articles_db.search(q.doi == doi)
if not results:
raise ValueError(f"article not found in articles_db for doi {doi}")
data = {
"fulltext_html": results[0]["html"],
"doi": doi,
} # should be url to html
preannotation = LSPreAnnotation(data=data, predictions=predictions)
preannotations.append(preannotation)
return preannotations
@pipeline(mode_defs=[dev, prod])
def generate_candidate_edges_pipeline():
mentions = get_ner_mentions()
grouped = groupby_doi(mentions)
convert_ls_predictions_to_preannotations(
convert_candidate_edges_to_ls_predictions(
generate_candidate_edges(grouped)
)
)
| 31.198413 | 79 | 0.682524 | import uuid
from collections import defaultdict
from itertools import combinations
from typing import Any, Dict, List, Tuple
from dagster import OutputDefinition, pipeline, solid
from tinydb import Query
from food_ke.labelstudio import (
LSAnnotationResult,
LSAnnotationValue,
LSPreAnnotation,
LSPrediction,
)
from food_ke.scripts.custom import CandidateEdge, EntityMention
from food_ke.scripts.modes import dev, prod
from food_ke.scripts.utils import groupby_doi
from food_ke.stubs import NER_FROM_NAME, NER_TO_NAME
@solid(required_resource_keys={"ner_raw_io_manager"})
def get_ner_mentions(context) -> List[EntityMention]:
return context.resources.ner_raw_io_manager.load_input(context)
@solid(
output_defs=[OutputDefinition(io_manager_key="candidate_edges_io_manager")]
)
def generate_candidate_edges(
grouped_mentions: Dict[str, List[EntityMention]]
) -> List[CandidateEdge]:
candidate_edges = []
for doi, mentions in grouped_mentions.items():
for e1, e2 in combinations(mentions, 2):
if e1.e_type == "FOOD" and e2.e_type == "UNITS":
candidate_edges.append(CandidateEdge(e1, e2, doi))
elif e2.e_type == "FOOD" and e1.e_type == "UNITS":
candidate_edges.append(CandidateEdge(e2, e1, doi))
return candidate_edges
def entity_mention_to_ls_annotation_result(
entity_mention: EntityMention,
) -> LSAnnotationResult:
val = LSAnnotationValue(
start=entity_mention.xpath,
end=entity_mention.xpath,
startOffset=entity_mention.start_offset,
endOffset=entity_mention.end_offset,
text=entity_mention.text,
htmllabels=[entity_mention.e_type],
)
return LSAnnotationResult(
value=val,
id=str(uuid.uuid4()),
from_name=NER_FROM_NAME,
to_name=NER_TO_NAME,
type="hypertextlabels",
doi=entity_mention.doi,
)
@solid
def convert_candidate_edges_to_ls_predictions(
candidate_edges: List[CandidateEdge],
) -> List[LSPrediction]:
output = []
for edge in candidate_edges:
head_ls_annot = entity_mention_to_ls_annotation_result(edge.head)
tail_ls_annot = entity_mention_to_ls_annotation_result(edge.tail)
edge_dict = {
"from_id": head_ls_annot.id,
"to_id": tail_ls_annot.id,
"type": "relation",
"direction": "right",
"labels": [],
}
result = [head_ls_annot, tail_ls_annot, edge_dict]
pred = LSPrediction(
result=result,
score=1.0,
model_version="candidate_edges_v1",
doi=edge.doi,
)
output.append(pred)
return output
@solid(
output_defs=[
OutputDefinition(
io_manager_key="candidate_edges_ls_predictions_io_manager"
)
],
required_resource_keys={"articles_db"},
)
def convert_ls_predictions_to_preannotations(
context,
ls_predictions: List[LSPrediction],
) -> List[LSPreAnnotation]:
predictions_grouped = groupby_doi(ls_predictions)
preannotations = []
for doi, predictions in predictions_grouped.items():
q = Query()
results = context.resources.articles_db.search(q.doi == doi)
if not results:
raise ValueError(f"article not found in articles_db for doi {doi}")
data = {
"fulltext_html": results[0]["html"],
"doi": doi,
}
preannotation = LSPreAnnotation(data=data, predictions=predictions)
preannotations.append(preannotation)
return preannotations
@pipeline(mode_defs=[dev, prod])
def generate_candidate_edges_pipeline():
mentions = get_ner_mentions()
grouped = groupby_doi(mentions)
convert_ls_predictions_to_preannotations(
convert_candidate_edges_to_ls_predictions(
generate_candidate_edges(grouped)
)
)
| true | true |
f721df7c81e5b22b791a270c424bdef9a82993ec | 4,110 | py | Python | sdk/python/pulumi_azure_native/resources/v20201001/get_deployment_at_scope.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/resources/v20201001/get_deployment_at_scope.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/resources/v20201001/get_deployment_at_scope.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDeploymentAtScopeResult',
'AwaitableGetDeploymentAtScopeResult',
'get_deployment_at_scope',
]
@pulumi.output_type
class GetDeploymentAtScopeResult:
"""
Deployment information.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the deployment.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
the location of the deployment.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the deployment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.DeploymentPropertiesExtendedResponse':
"""
Deployment properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Deployment tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the deployment.
"""
return pulumi.get(self, "type")
class AwaitableGetDeploymentAtScopeResult(GetDeploymentAtScopeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDeploymentAtScopeResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_deployment_at_scope(deployment_name: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeploymentAtScopeResult:
"""
Deployment information.
:param str deployment_name: The name of the deployment.
:param str scope: The resource scope.
"""
__args__ = dict()
__args__['deploymentName'] = deployment_name
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:resources/v20201001:getDeploymentAtScope', __args__, opts=opts, typ=GetDeploymentAtScopeResult).value
return AwaitableGetDeploymentAtScopeResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| 31.136364 | 151 | 0.625061 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDeploymentAtScopeResult',
'AwaitableGetDeploymentAtScopeResult',
'get_deployment_at_scope',
]
@pulumi.output_type
class GetDeploymentAtScopeResult:
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.DeploymentPropertiesExtendedResponse':
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetDeploymentAtScopeResult(GetDeploymentAtScopeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDeploymentAtScopeResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_deployment_at_scope(deployment_name: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeploymentAtScopeResult:
__args__ = dict()
__args__['deploymentName'] = deployment_name
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:resources/v20201001:getDeploymentAtScope', __args__, opts=opts, typ=GetDeploymentAtScopeResult).value
return AwaitableGetDeploymentAtScopeResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| true | true |
f721e066c1aa8136fae4963f5e9d06ec258240a4 | 9,368 | py | Python | bin.src/generate_lensed_hosts_agn.py | mpwiesner/sims_GCRCatSimInterface | 831e78ec8eb610983768d4657fbff9744cb17249 | [
"BSD-3-Clause"
] | null | null | null | bin.src/generate_lensed_hosts_agn.py | mpwiesner/sims_GCRCatSimInterface | 831e78ec8eb610983768d4657fbff9744cb17249 | [
"BSD-3-Clause"
] | null | null | null | bin.src/generate_lensed_hosts_agn.py | mpwiesner/sims_GCRCatSimInterface | 831e78ec8eb610983768d4657fbff9744cb17249 | [
"BSD-3-Clause"
] | 2 | 2018-04-12T20:49:23.000Z | 2018-08-04T00:08:46.000Z | import numpy as np
import os
import argparse
import pylab as pl
import subprocess as sp
import astropy.io.fits as pyfits
import pandas as pd
import scipy.special as ss
import om10_lensing_equations as ole
data_dir = os.path.join(os.environ['SIMS_GCRCATSIMINTERFACE_DIR'], 'data')
twinkles_data_dir = os.path.join(os.environ['TWINKLES_DIR'], 'data')
outdefault = os.path.join(data_dir,'outputs')
parser = argparse.ArgumentParser(description='The location of the desired output directory')
parser.add_argument("--outdir", dest='outdir1', type=str, default = outdefault,
help='Output location for FITS stamps')
args = parser.parse_args()
outdir = args.outdir1
def load_in_data_agn():
"""
Reads in catalogs of host galaxy bulge and disk as well as om10 lenses
"""
agn_host_bulge = pd.read_csv(os.path.join(data_dir,'agn_host_bulge.csv.gz'))
agn_host_disk = pd.read_csv(os.path.join(data_dir, 'agn_host_disk.csv.gz'))
idx = agn_host_bulge['image_number'] == 0
ahb_purged = agn_host_bulge[:][idx]
ahd_purged = agn_host_disk[:][idx]
lens_list = pyfits.open(os.path.join(twinkles_data_dir,
'twinkles_lenses_v2.fits'))
return lens_list, ahb_purged, ahd_purged
def create_cats_agns(index, hdu_list, ahb_list, ahd_list):
"""
Takes input catalogs and isolates lensing parameters as well as ra and dec of lens
Parameters:
-----------
index: int
Index for pandas data frame
hdu_list:
row of data frame that contains lens parameters
ahb_list:
row of data frame that contains lens galaxy parameters for the galactic bulge
ahd_list:
row of data frame that contains lens galaxy parameters for the galactic disk """
twinkles_ID = ahd['twinkles_system'][index]
UID_lens = ahd['uniqueId_lens'][index]
Ra_lens = ahd['raPhoSim_lens'][index]
Dec_lens = ahd['decPhoSim_lens'][index]
idx = hdu_list[1].data['twinklesId'] == twinkles_ID
lid = hdu_list[1].data['LENSID'][idx][0]
xl1 = 0.0
xl2 = 0.0
vd = hdu_list[1].data['VELDISP'][idx][0]
zd = hdu_list[1].data['ZLENS'][idx][0]
ql = 1.0 - hdu_list[1].data['ELLIP'][idx][0]
phi= hdu_list[1].data['PHIE'][idx][0]
ys1 = hdu_list[1].data['XSRC'][idx][0]
ys2 = hdu_list[1].data['YSRC'][idx][0]
ext_shr = hdu_list[1].data['GAMMA'][idx][0]
ext_phi = hdu_list[1].data['PHIG'][idx][0]
ximg = hdu_list[1].data['XIMG'][idx][0]
yimg = hdu_list[1].data['YIMG'][idx][0]
#----------------------------------------------------------------------------
lens_cat = {'xl1' : xl1,
'xl2' : xl2,
'ql' : ql,
'vd' : vd,
'phl' : phi,
'gamma' : ext_shr,
'phg' : ext_phi,
'zl' : zd,
'ximg' : ximg,
'yimg' : yimg,
'twinklesid' : twinkles_ID,
'lensid' : lid,
'index' : index,
'UID_lens' : UID_lens,
'Ra_lens' : Ra_lens,
'Dec_lens' : Dec_lens}
#----------------------------------------------------------------------------
mag_src_b = ahb_list['phosimMagNorm'][index]
qs_b = ahb_list['minorAxis'][index]/ahb_list['majorAxis'][index]
Reff_src_b = np.sqrt(ahb_list['minorAxis'][index]*ahb_list['majorAxis'][index])
phs_b = ahb_list['positionAngle'][index]
ns_b = ahb_list['sindex'][index]
zs_b = ahb_list['redshift'][index]
sed_src_b = ahb_list['sedFilepath'][index]
srcsP_bulge = {'ys1' : ys1,
'ys2' : ys2,
'mag_src' : mag_src_b,
'Reff_src' : Reff_src_b,
'qs' : qs_b,
'phs' : phs_b,
'ns' : ns_b,
'zs' : zs_b,
'sed_src' : sed_src_b,
'components' : 'bulge'}
#----------------------------------------------------------------------------
mag_src_d = ahd_list['phosimMagNorm'][index]
qs_d = ahd_list['minorAxis'][index]/ahd_list['majorAxis'][index]
Reff_src_d = np.sqrt(ahd_list['minorAxis'][index]*ahd_list['majorAxis'][index])
phs_d = ahd_list['positionAngle'][index]
ns_d = ahd_list['sindex'][index]
zs_d = ahd_list['redshift'][index]
sed_src_d = ahd_list['sedFilepath'][index]
srcsP_disk = {'ys1' : ys1,
'ys2' : ys2,
'mag_src' : mag_src_d,
'Reff_src' : Reff_src_d,
'qs' : qs_d,
'phs' : phs_d,
'ns' : ns_d,
'zs' : zs_d,
'sed_src' : sed_src_d,
'components' : 'disk'}
#----------------------------------------------------------------------------
return lens_cat, srcsP_bulge, srcsP_disk
def lensed_sersic_2d(xi1, xi2, yi1, yi2, source_cat, lens_cat):
#Defines a magnitude of lensed host galaxy using 2d Sersic profile
#----------------------------------------------------------------------
ysc1 = source_cat['ys1'] # x position of the source, arcseconds
ysc2 = source_cat['ys2'] # y position of the source, arcseconds
mag_tot = source_cat['mag_src'] # total magnitude of the source
Reff_arc = source_cat['Reff_src'] # Effective Radius of the source, arcseconds
qs = source_cat['qs'] # axis ratio of the source, b/a
phs = source_cat['phs'] # orientation of the source, degree
ns = source_cat['ns'] # index of the source
#----------------------------------------------------------------------
g_limage = ole.sersic_2d(yi1,yi2,ysc1,ysc2,Reff_arc,qs,phs,ns)
g_source = ole.sersic_2d(xi1,xi2,ysc1,ysc2,Reff_arc,qs,phs,ns)
mag_lensed = mag_tot - 2.5*np.log(np.sum(g_limage)/np.sum(g_source))
return mag_lensed, g_limage
def generate_lensed_host(xi1, xi2, lens_P, srcP_b, srcP_d):
"""Does ray tracing of light from host galaxies using
a non-singular isothermal ellipsoid profile.
Ultimately writes out a FITS image of the result of the ray tracing. """
dsx = 0.01
xlc1 = lens_P['xl1'] # x position of the lens, arcseconds
xlc2 = lens_P['xl2'] # y position of the lens, arcseconds
rlc = 0.0 # core size of Non-singular Isothermal Ellipsoid
vd = lens_P['vd'] # velocity dispersion of the lens
zl = lens_P['zl'] # redshift of the lens
zs = srcP_b['zs'] # redshift of the source
rle = ole.re_sv(vd, zl, zs) # Einstein radius of lens, arcseconds.
ql = lens_P['ql'] # axis ratio b/a
le = ole.e2le(1.0 - ql) # scale factor due to projection of ellpsoid
phl = lens_P['phl'] # position angle of the lens, degree
eshr = lens_P['gamma'] # external shear
eang = lens_P['phg'] # position angle of external shear
ekpa = 0.0 # external convergence
#----------------------------------------------------------------------
ai1, ai2 = ole.alphas_sie(xlc1, xlc2, phl, ql, rle, le,
eshr, eang, ekpa, xi1, xi2)
yi1 = xi1 - ai1
yi2 = xi2 - ai2
#----------------------------------------------------------------------------
lensed_mag_b, lensed_image_b = lensed_sersic_2d(xi1,xi2,yi1,yi2,srcP_b,lens_P)
os.makedirs(os.path.join(outdir,'agn_lensed_bulges'), exist_ok=True)
fits_limg_b = os.path.join(outdir,'agn_lensed_bulges/') + str(lens_P['UID_lens']) + "_" + str(lensed_mag_b) + "_bulge.fits"
pyfits.writeto(fits_limg_b, lensed_image_b.astype("float32"), overwrite=True)
#----------------------------------------------------------------------------
lensed_mag_d, lensed_image_d = lensed_sersic_2d(xi1,xi2,yi1,yi2,srcP_d,lens_P)
os.makedirs(os.path.join(outdir,'agn_lensed_disks'), exist_ok=True)
fits_limg_d = os.path.join(outdir,'agn_lensed_disks/') + str(lens_P['UID_lens']) + "_" + str(lensed_mag_d) + "_disk.fits"
pyfits.writeto(fits_limg_d, lensed_image_d.astype("float32"), overwrite=True)
return 0
if __name__ == '__main__':
dsx = 0.01 # pixel size per side, arcseconds
nnn = 1000 # number of pixels per side
xi1, xi2 = ole.make_r_coor(nnn, dsx)
hdulist, ahb, ahd = load_in_data_agn()
message_row = 0
message_freq = 50
for i, row in ahb.iterrows():
if i >= message_row:
print ("working on system ", i , "of", max(ahb.index))
message_row += message_freq
lensP, srcPb, srcPd = create_cats_agns(i, hdulist, ahb, ahd)
generate_lensed_host(xi1, xi2, lensP, srcPb, srcPd)
| 41.635556 | 129 | 0.516652 | import numpy as np
import os
import argparse
import pylab as pl
import subprocess as sp
import astropy.io.fits as pyfits
import pandas as pd
import scipy.special as ss
import om10_lensing_equations as ole
data_dir = os.path.join(os.environ['SIMS_GCRCATSIMINTERFACE_DIR'], 'data')
twinkles_data_dir = os.path.join(os.environ['TWINKLES_DIR'], 'data')
outdefault = os.path.join(data_dir,'outputs')
parser = argparse.ArgumentParser(description='The location of the desired output directory')
parser.add_argument("--outdir", dest='outdir1', type=str, default = outdefault,
help='Output location for FITS stamps')
args = parser.parse_args()
outdir = args.outdir1
def load_in_data_agn():
agn_host_bulge = pd.read_csv(os.path.join(data_dir,'agn_host_bulge.csv.gz'))
agn_host_disk = pd.read_csv(os.path.join(data_dir, 'agn_host_disk.csv.gz'))
idx = agn_host_bulge['image_number'] == 0
ahb_purged = agn_host_bulge[:][idx]
ahd_purged = agn_host_disk[:][idx]
lens_list = pyfits.open(os.path.join(twinkles_data_dir,
'twinkles_lenses_v2.fits'))
return lens_list, ahb_purged, ahd_purged
def create_cats_agns(index, hdu_list, ahb_list, ahd_list):
twinkles_ID = ahd['twinkles_system'][index]
UID_lens = ahd['uniqueId_lens'][index]
Ra_lens = ahd['raPhoSim_lens'][index]
Dec_lens = ahd['decPhoSim_lens'][index]
idx = hdu_list[1].data['twinklesId'] == twinkles_ID
lid = hdu_list[1].data['LENSID'][idx][0]
xl1 = 0.0
xl2 = 0.0
vd = hdu_list[1].data['VELDISP'][idx][0]
zd = hdu_list[1].data['ZLENS'][idx][0]
ql = 1.0 - hdu_list[1].data['ELLIP'][idx][0]
phi= hdu_list[1].data['PHIE'][idx][0]
ys1 = hdu_list[1].data['XSRC'][idx][0]
ys2 = hdu_list[1].data['YSRC'][idx][0]
ext_shr = hdu_list[1].data['GAMMA'][idx][0]
ext_phi = hdu_list[1].data['PHIG'][idx][0]
ximg = hdu_list[1].data['XIMG'][idx][0]
yimg = hdu_list[1].data['YIMG'][idx][0]
lens_cat = {'xl1' : xl1,
'xl2' : xl2,
'ql' : ql,
'vd' : vd,
'phl' : phi,
'gamma' : ext_shr,
'phg' : ext_phi,
'zl' : zd,
'ximg' : ximg,
'yimg' : yimg,
'twinklesid' : twinkles_ID,
'lensid' : lid,
'index' : index,
'UID_lens' : UID_lens,
'Ra_lens' : Ra_lens,
'Dec_lens' : Dec_lens}
mag_src_b = ahb_list['phosimMagNorm'][index]
qs_b = ahb_list['minorAxis'][index]/ahb_list['majorAxis'][index]
Reff_src_b = np.sqrt(ahb_list['minorAxis'][index]*ahb_list['majorAxis'][index])
phs_b = ahb_list['positionAngle'][index]
ns_b = ahb_list['sindex'][index]
zs_b = ahb_list['redshift'][index]
sed_src_b = ahb_list['sedFilepath'][index]
srcsP_bulge = {'ys1' : ys1,
'ys2' : ys2,
'mag_src' : mag_src_b,
'Reff_src' : Reff_src_b,
'qs' : qs_b,
'phs' : phs_b,
'ns' : ns_b,
'zs' : zs_b,
'sed_src' : sed_src_b,
'components' : 'bulge'}
mag_src_d = ahd_list['phosimMagNorm'][index]
qs_d = ahd_list['minorAxis'][index]/ahd_list['majorAxis'][index]
Reff_src_d = np.sqrt(ahd_list['minorAxis'][index]*ahd_list['majorAxis'][index])
phs_d = ahd_list['positionAngle'][index]
ns_d = ahd_list['sindex'][index]
zs_d = ahd_list['redshift'][index]
sed_src_d = ahd_list['sedFilepath'][index]
srcsP_disk = {'ys1' : ys1,
'ys2' : ys2,
'mag_src' : mag_src_d,
'Reff_src' : Reff_src_d,
'qs' : qs_d,
'phs' : phs_d,
'ns' : ns_d,
'zs' : zs_d,
'sed_src' : sed_src_d,
'components' : 'disk'}
return lens_cat, srcsP_bulge, srcsP_disk
def lensed_sersic_2d(xi1, xi2, yi1, yi2, source_cat, lens_cat):
ysc1 = source_cat['ys1']
ysc2 = source_cat['ys2']
mag_tot = source_cat['mag_src']
Reff_arc = source_cat['Reff_src']
qs = source_cat['qs']
phs = source_cat['phs']
ns = source_cat['ns']
g_limage = ole.sersic_2d(yi1,yi2,ysc1,ysc2,Reff_arc,qs,phs,ns)
g_source = ole.sersic_2d(xi1,xi2,ysc1,ysc2,Reff_arc,qs,phs,ns)
mag_lensed = mag_tot - 2.5*np.log(np.sum(g_limage)/np.sum(g_source))
return mag_lensed, g_limage
def generate_lensed_host(xi1, xi2, lens_P, srcP_b, srcP_d):
dsx = 0.01
xlc1 = lens_P['xl1']
xlc2 = lens_P['xl2']
rlc = 0.0
vd = lens_P['vd']
zl = lens_P['zl']
zs = srcP_b['zs']
rle = ole.re_sv(vd, zl, zs)
ql = lens_P['ql']
le = ole.e2le(1.0 - ql)
phl = lens_P['phl']
eshr = lens_P['gamma']
eang = lens_P['phg']
ekpa = 0.0
ai1, ai2 = ole.alphas_sie(xlc1, xlc2, phl, ql, rle, le,
eshr, eang, ekpa, xi1, xi2)
yi1 = xi1 - ai1
yi2 = xi2 - ai2
lensed_mag_b, lensed_image_b = lensed_sersic_2d(xi1,xi2,yi1,yi2,srcP_b,lens_P)
os.makedirs(os.path.join(outdir,'agn_lensed_bulges'), exist_ok=True)
fits_limg_b = os.path.join(outdir,'agn_lensed_bulges/') + str(lens_P['UID_lens']) + "_" + str(lensed_mag_b) + "_bulge.fits"
pyfits.writeto(fits_limg_b, lensed_image_b.astype("float32"), overwrite=True)
lensed_mag_d, lensed_image_d = lensed_sersic_2d(xi1,xi2,yi1,yi2,srcP_d,lens_P)
os.makedirs(os.path.join(outdir,'agn_lensed_disks'), exist_ok=True)
fits_limg_d = os.path.join(outdir,'agn_lensed_disks/') + str(lens_P['UID_lens']) + "_" + str(lensed_mag_d) + "_disk.fits"
pyfits.writeto(fits_limg_d, lensed_image_d.astype("float32"), overwrite=True)
return 0
if __name__ == '__main__':
dsx = 0.01
nnn = 1000
xi1, xi2 = ole.make_r_coor(nnn, dsx)
hdulist, ahb, ahd = load_in_data_agn()
message_row = 0
message_freq = 50
for i, row in ahb.iterrows():
if i >= message_row:
print ("working on system ", i , "of", max(ahb.index))
message_row += message_freq
lensP, srcPb, srcPd = create_cats_agns(i, hdulist, ahb, ahd)
generate_lensed_host(xi1, xi2, lensP, srcPb, srcPd)
| true | true |
f721e2171bf6d9f9e10e140e276f0503a8e3e20a | 3,112 | py | Python | spectramanipulator/dialogs/rename_dialog.py | dmadea/Spectra-Manipulator | ddc1b27cb4f4691096dfa7b2975df350d2eaf40e | [
"MIT"
] | 1 | 2020-07-18T17:46:01.000Z | 2020-07-18T17:46:01.000Z | spectramanipulator/dialogs/rename_dialog.py | dmadea/Spectra-Manipulator | ddc1b27cb4f4691096dfa7b2975df350d2eaf40e | [
"MIT"
] | null | null | null | spectramanipulator/dialogs/rename_dialog.py | dmadea/Spectra-Manipulator | ddc1b27cb4f4691096dfa7b2975df350d2eaf40e | [
"MIT"
] | null | null | null |
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from .gui_rename_dialog import Ui_Dialog
class RenameDialog(QtWidgets.QDialog, Ui_Dialog):
# static variables
is_opened = False
_instance = None
int32_max = 2147483647
def __init__(self, expression='', offset=0, c_mult_facotr=1,
last_rename_take_name_from_list=False, parent=None):
super(RenameDialog, self).__init__(parent)
self.setupUi(self)
self.result = (expression, offset, c_mult_facotr)
self.list = last_rename_take_name_from_list
#disable resizing of the window,
# help from https://stackoverflow.com/questions/16673074/in-qt-c-how-can-i-fully-disable-resizing-a-window-including-the-resize-icon-w
self.setWindowFlags(Qt.Dialog | Qt.MSWindowsFixedSizeDialogHint)
self.setWindowTitle("Rename Items")
self.leExpression.setText(expression)
self.leCounterMulFactor.setText(str(c_mult_facotr))
self.sbOffset.setValue(offset)
self.sbOffset.setMinimum(0)
self.sbOffset.setMaximum(self.int32_max)
self.cbTakeNamesFromList.setCheckState(self.check_state(last_rename_take_name_from_list))
self.accepted = False
RenameDialog.is_opened = True
RenameDialog._instance = self
self.leExpression.setFocus()
self.leExpression.selectAll()
self.cbTakeNamesFromList.stateChanged.connect(self.cbTakeNamesFromList_check_changed)
# perform change
self.cbTakeNamesFromList_check_changed()
self.show()
self.exec()
@staticmethod
def get_instance():
return RenameDialog._instance
@staticmethod
def check_state(checked):
return Qt.Checked if checked else 0
def set_result(self):
if self.is_renaming_by_expression:
self.result = (self.leExpression.text(), self.sbOffset.value(), self.leCounterMulFactor.text())
else:
self.list = self.leList.text()
def cbTakeNamesFromList_check_changed(self):
if self.cbTakeNamesFromList.checkState() == Qt.Checked:
self.sbOffset.setEnabled(False)
self.leExpression.setEnabled(False)
self.leList.setEnabled(True)
self.leCounterMulFactor.setEnabled(False)
self.is_renaming_by_expression = False
else:
self.sbOffset.setEnabled(True)
self.leExpression.setEnabled(True)
self.leList.setEnabled(False)
self.leCounterMulFactor.setEnabled(True)
self.is_renaming_by_expression = True
def accept(self):
self.set_result()
self.accepted = True
RenameDialog.is_opened = False
RenameDialog._instance = None
super(RenameDialog, self).accept()
def reject(self):
RenameDialog.is_opened = False
RenameDialog._instance = None
super(RenameDialog, self).reject()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = RenameDialog()
# Dialog.show()
sys.exit(app.exec_()) | 30.811881 | 142 | 0.673843 |
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from .gui_rename_dialog import Ui_Dialog
class RenameDialog(QtWidgets.QDialog, Ui_Dialog):
is_opened = False
_instance = None
int32_max = 2147483647
def __init__(self, expression='', offset=0, c_mult_facotr=1,
last_rename_take_name_from_list=False, parent=None):
super(RenameDialog, self).__init__(parent)
self.setupUi(self)
self.result = (expression, offset, c_mult_facotr)
self.list = last_rename_take_name_from_list
self.setWindowFlags(Qt.Dialog | Qt.MSWindowsFixedSizeDialogHint)
self.setWindowTitle("Rename Items")
self.leExpression.setText(expression)
self.leCounterMulFactor.setText(str(c_mult_facotr))
self.sbOffset.setValue(offset)
self.sbOffset.setMinimum(0)
self.sbOffset.setMaximum(self.int32_max)
self.cbTakeNamesFromList.setCheckState(self.check_state(last_rename_take_name_from_list))
self.accepted = False
RenameDialog.is_opened = True
RenameDialog._instance = self
self.leExpression.setFocus()
self.leExpression.selectAll()
self.cbTakeNamesFromList.stateChanged.connect(self.cbTakeNamesFromList_check_changed)
self.cbTakeNamesFromList_check_changed()
self.show()
self.exec()
@staticmethod
def get_instance():
return RenameDialog._instance
@staticmethod
def check_state(checked):
return Qt.Checked if checked else 0
def set_result(self):
if self.is_renaming_by_expression:
self.result = (self.leExpression.text(), self.sbOffset.value(), self.leCounterMulFactor.text())
else:
self.list = self.leList.text()
def cbTakeNamesFromList_check_changed(self):
if self.cbTakeNamesFromList.checkState() == Qt.Checked:
self.sbOffset.setEnabled(False)
self.leExpression.setEnabled(False)
self.leList.setEnabled(True)
self.leCounterMulFactor.setEnabled(False)
self.is_renaming_by_expression = False
else:
self.sbOffset.setEnabled(True)
self.leExpression.setEnabled(True)
self.leList.setEnabled(False)
self.leCounterMulFactor.setEnabled(True)
self.is_renaming_by_expression = True
def accept(self):
self.set_result()
self.accepted = True
RenameDialog.is_opened = False
RenameDialog._instance = None
super(RenameDialog, self).accept()
def reject(self):
RenameDialog.is_opened = False
RenameDialog._instance = None
super(RenameDialog, self).reject()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = RenameDialog()
sys.exit(app.exec_()) | true | true |
f721e218fc789c3f8ef3a8e08522e4afa077fe67 | 3,728 | py | Python | Algorithm/BOJ/19238스타트택시.py | Nyapy/FMTG | dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e | [
"MIT"
] | null | null | null | Algorithm/BOJ/19238스타트택시.py | Nyapy/FMTG | dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e | [
"MIT"
] | null | null | null | Algorithm/BOJ/19238스타트택시.py | Nyapy/FMTG | dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e | [
"MIT"
] | null | null | null | import sys
sys.stdin = open("19238.txt")
from collections import deque
N,M,oil = map(int, input().split())
jido = [0]+[[0]+list(map(int, input().split())) for _ in range(N)]
tay, tax = map(int, input().split())
cst = [list(map(int, input().split())) for _ in range(M)]
dx = [0, -1,1,0]
dy = [-1, 0,0,1]
for i in range(M):
y,x,ay,ax = cst[i]
jido[y][x] = i+2
cnt = 0
def taxi(x,y):
global oil, cnt
visited = [[-1 for _ in range(N+1)] for _ in range(N+1)]
visited[y][x] = 0
q= deque()
q.append([x,y])
flag = True
cx,cy = 0,0
st = 1000000
while q:
while q:
if oil == M:
return
tx, ty = q.popleft()
if visited[ty][tx] + 1 == oil:
return -1
if jido[ty][tx] >= 2:
plus = goto(jido[ty][tx] - 2)
sy, sx, ey, ex = cst[jido[ty][tx] - 2]
if plus == -1:
oil = -1
return
else:
oil += plus
cnt +=1
visited = [[-1 for _ in range(N + 1)] for _ in range(N + 1)]
visited[ey][ex] = 0
jido[ty][tx] = 0
q = deque()
q.append([ex, ey])
continue
for k in range(4):
nx,ny = tx+dx[k], ty+dy[k]
if 0<nx<=N and 0<ny <= N:
if visited[ny][nx] == -1:
if jido[ny][nx] != 1:
visited[ny][nx] = visited[ty][tx] + 1
if jido[ny][nx] == 0:
q.append([nx,ny])
elif jido[ny][nx] >= 2:
if flag:
cx, cy = nx, ny
st = visited[ny][nx]
flag = False
elif visited[ny][nx] == st:
if cy > ny :
cy = ny
cx = nx
st = visited[ny][nx]
elif cy == ny:
if nx < cx :
cx = nx
cy = ny
st = visited[ny][nx]
if not flag:
oil -= visited[cy][cx]
plus = goto(jido[cy][cx] - 2)
sy, sx, ey, ex = cst[jido[cy][cx] - 2]
jido[cy][cx] = 0
if plus == -1:
oil = -1
return
else:
oil += plus
cnt += 1
visited = [[-1 for _ in range(N + 1)] for _ in range(N + 1)]
visited[ey][ex] = 0
q = deque()
q.append([ex, ey])
flag = True
if cnt < M:
oil = -1
return
def goto(i):
visited = [[-1 for _ in range(N+1)] for _ in range(N+1)]
sy,sx, ey,ex = cst[i]
q = deque()
q.append([sx,sy])
visited[sy][sx] = 0
while q:
tx, ty = q.popleft()
if visited[ty][tx]+1 > oil:
return -1
for k in range(4):
nx, ny = tx + dx[k], ty +dy[k]
if 0<nx <=N and 0< ny <= N:
if visited[ny][nx] == -1 and jido[ny][nx] != 1:
visited[ny][nx] = visited[ty][tx] +1
q.append([nx,ny])
if ny==ey and nx == ex:
return visited[ny][nx]
return -1
taxi(tax,tay)
print(oil) | 30.809917 | 80 | 0.333691 | import sys
sys.stdin = open("19238.txt")
from collections import deque
N,M,oil = map(int, input().split())
jido = [0]+[[0]+list(map(int, input().split())) for _ in range(N)]
tay, tax = map(int, input().split())
cst = [list(map(int, input().split())) for _ in range(M)]
dx = [0, -1,1,0]
dy = [-1, 0,0,1]
for i in range(M):
y,x,ay,ax = cst[i]
jido[y][x] = i+2
cnt = 0
def taxi(x,y):
global oil, cnt
visited = [[-1 for _ in range(N+1)] for _ in range(N+1)]
visited[y][x] = 0
q= deque()
q.append([x,y])
flag = True
cx,cy = 0,0
st = 1000000
while q:
while q:
if oil == M:
return
tx, ty = q.popleft()
if visited[ty][tx] + 1 == oil:
return -1
if jido[ty][tx] >= 2:
plus = goto(jido[ty][tx] - 2)
sy, sx, ey, ex = cst[jido[ty][tx] - 2]
if plus == -1:
oil = -1
return
else:
oil += plus
cnt +=1
visited = [[-1 for _ in range(N + 1)] for _ in range(N + 1)]
visited[ey][ex] = 0
jido[ty][tx] = 0
q = deque()
q.append([ex, ey])
continue
for k in range(4):
nx,ny = tx+dx[k], ty+dy[k]
if 0<nx<=N and 0<ny <= N:
if visited[ny][nx] == -1:
if jido[ny][nx] != 1:
visited[ny][nx] = visited[ty][tx] + 1
if jido[ny][nx] == 0:
q.append([nx,ny])
elif jido[ny][nx] >= 2:
if flag:
cx, cy = nx, ny
st = visited[ny][nx]
flag = False
elif visited[ny][nx] == st:
if cy > ny :
cy = ny
cx = nx
st = visited[ny][nx]
elif cy == ny:
if nx < cx :
cx = nx
cy = ny
st = visited[ny][nx]
if not flag:
oil -= visited[cy][cx]
plus = goto(jido[cy][cx] - 2)
sy, sx, ey, ex = cst[jido[cy][cx] - 2]
jido[cy][cx] = 0
if plus == -1:
oil = -1
return
else:
oil += plus
cnt += 1
visited = [[-1 for _ in range(N + 1)] for _ in range(N + 1)]
visited[ey][ex] = 0
q = deque()
q.append([ex, ey])
flag = True
if cnt < M:
oil = -1
return
def goto(i):
visited = [[-1 for _ in range(N+1)] for _ in range(N+1)]
sy,sx, ey,ex = cst[i]
q = deque()
q.append([sx,sy])
visited[sy][sx] = 0
while q:
tx, ty = q.popleft()
if visited[ty][tx]+1 > oil:
return -1
for k in range(4):
nx, ny = tx + dx[k], ty +dy[k]
if 0<nx <=N and 0< ny <= N:
if visited[ny][nx] == -1 and jido[ny][nx] != 1:
visited[ny][nx] = visited[ty][tx] +1
q.append([nx,ny])
if ny==ey and nx == ex:
return visited[ny][nx]
return -1
taxi(tax,tay)
print(oil) | true | true |
f721e6187906d39f9020e1cf5b0b2268c9f06476 | 1,986 | py | Python | verification/HMM/rtcf-h-table.py | thomasgibson/tabula-rasa | 85abf26d6604b5a9a4d356f07aeb90d5b6453f33 | [
"MIT"
] | 3 | 2018-08-24T02:11:46.000Z | 2021-06-15T12:53:36.000Z | verification/HMM/rtcf-h-table.py | thomasgibson/tabula-rasa | 85abf26d6604b5a9a4d356f07aeb90d5b6453f33 | [
"MIT"
] | null | null | null | verification/HMM/rtcf-h-table.py | thomasgibson/tabula-rasa | 85abf26d6604b5a9a4d356f07aeb90d5b6453f33 | [
"MIT"
] | 1 | 2019-08-21T15:02:21.000Z | 2019-08-21T15:02:21.000Z | import os
import sys
import pandas as pd
data_set = ["results/H-RTCF-degree-0.csv",
"results/H-RTCF-degree-1.csv",
"results/H-RTCF-degree-2.csv",
"results/H-RTCF-degree-3.csv"]
for data in data_set:
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
table = r"""\resizebox{\textwidth}{!}{%
\begin{tabular}{| l | c| c | c | c | c | c | c |}
\hline
\multicolumn{8}{|c|}{RTCF-H method} \\
\hline
\multirow{2}{*}{$k$} & mesh &
\multicolumn{2}{|c|}{$\norm{p-p_h}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} &
\multicolumn{2}{|c|}{
$\norm{\boldsymbol{u}-\boldsymbol{u}_h}_{\boldsymbol{L}^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} &
\multicolumn{2}{|c|}{$\norm{p-p_h^{\star}}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+2})$} \\
\cline{2-8}
& $r$ & $L^2$-error & rate & $L^2$-error & rate & $L^2$-error & rate \\
"""
lformat = r"""& {mesh: d} & {ScalarErrors:.3e} & {ScalarRates} & {FluxErrors:.3e} & {FluxRates} & {PPScalarErrors:.3e} & {PPScalarRates} \\
"""
def rate(s):
if s == '---':
return s
else:
return "{s:.3f}".format(s=float(s))
for data in data_set:
df = pd.read_csv(data)
df = df.sort_values("Mesh")
degree = df.Degree.values[0]
table += r"""
\hline
\multirow{5}{*}{%d}
""" % degree
for k in df.Mesh:
sliced = df.loc[lambda x: x.Mesh == k]
table += lformat.format(mesh=k,
ScalarErrors=sliced.ScalarErrors.values[0],
ScalarRates=rate(sliced.ScalarConvRates.values[0]),
FluxErrors=sliced.FluxErrors.values[0],
FluxRates=rate(sliced.FluxConvRates.values[0]),
PPScalarErrors=sliced.PostProcessedScalarErrors.values[0],
PPScalarRates=rate(sliced.PostProcessedScalarRates.values[0]))
table += r"""\hline
\end{tabular}}
"""
print(table)
| 31.52381 | 139 | 0.543303 | import os
import sys
import pandas as pd
data_set = ["results/H-RTCF-degree-0.csv",
"results/H-RTCF-degree-1.csv",
"results/H-RTCF-degree-2.csv",
"results/H-RTCF-degree-3.csv"]
for data in data_set:
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
table = r"""\resizebox{\textwidth}{!}{%
\begin{tabular}{| l | c| c | c | c | c | c | c |}
\hline
\multicolumn{8}{|c|}{RTCF-H method} \\
\hline
\multirow{2}{*}{$k$} & mesh &
\multicolumn{2}{|c|}{$\norm{p-p_h}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} &
\multicolumn{2}{|c|}{
$\norm{\boldsymbol{u}-\boldsymbol{u}_h}_{\boldsymbol{L}^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} &
\multicolumn{2}{|c|}{$\norm{p-p_h^{\star}}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+2})$} \\
\cline{2-8}
& $r$ & $L^2$-error & rate & $L^2$-error & rate & $L^2$-error & rate \\
"""
lformat = r"""& {mesh: d} & {ScalarErrors:.3e} & {ScalarRates} & {FluxErrors:.3e} & {FluxRates} & {PPScalarErrors:.3e} & {PPScalarRates} \\
"""
def rate(s):
if s == '---':
return s
else:
return "{s:.3f}".format(s=float(s))
for data in data_set:
df = pd.read_csv(data)
df = df.sort_values("Mesh")
degree = df.Degree.values[0]
table += r"""
\hline
\multirow{5}{*}{%d}
""" % degree
for k in df.Mesh:
sliced = df.loc[lambda x: x.Mesh == k]
table += lformat.format(mesh=k,
ScalarErrors=sliced.ScalarErrors.values[0],
ScalarRates=rate(sliced.ScalarConvRates.values[0]),
FluxErrors=sliced.FluxErrors.values[0],
FluxRates=rate(sliced.FluxConvRates.values[0]),
PPScalarErrors=sliced.PostProcessedScalarErrors.values[0],
PPScalarRates=rate(sliced.PostProcessedScalarRates.values[0]))
table += r"""\hline
\end{tabular}}
"""
print(table)
| true | true |
f721e6da40641349c02a4345af82f81e8183b4e8 | 5,620 | py | Python | docs/conf.py | husio/weave | 7c46c7dee60614c3812c9e29796f62687085b933 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | husio/weave | 7c46c7dee60614c3812c9e29796f62687085b933 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | husio/weave | 7c46c7dee60614c3812c9e29796f62687085b933 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
def setup(app):
app.add_stylesheet('css/custom.css')
# -- Project information -----------------------------------------------------
project = u'Weave'
copyright = u'2018 - 2019, IOV SAS'
author = u'Ethan Frey'
# The short X.Y version
version = u'0.2'
# The full version, including alpha/beta/rc tags
release = u'0.2.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.imgmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Weavedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Weave.tex', u'IOV Weave Documentation',
u'Ethan Frey', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'weave', u'IOV Weave Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Weave', u'IOV Weave Documentation',
author, 'Weave', 'Framework for tendermint ABCI apps',
'Blockchain'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 29.424084 | 79 | 0.645018 |
def setup(app):
app.add_stylesheet('css/custom.css')
project = u'Weave'
copyright = u'2018 - 2019, IOV SAS'
author = u'Ethan Frey'
version = u'0.2'
release = u'0.2.1'
extensions = [
'sphinx.ext.todo',
'sphinx.ext.imgmath',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Weavedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Weave.tex', u'IOV Weave Documentation',
u'Ethan Frey', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'weave', u'IOV Weave Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Weave', u'IOV Weave Documentation',
author, 'Weave', 'Framework for tendermint ABCI apps',
'Blockchain'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true | true |
f721e85640e494bdebc1fb808b9ad090fa4b3d72 | 4,750 | py | Python | src/sentry/integrations/bitbucket/issues.py | xzkostyan/sentry | 21476700defcf0dddeadeec8471d3454cef6faa7 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/integrations/bitbucket/issues.py | xzkostyan/sentry | 21476700defcf0dddeadeec8471d3454cef6faa7 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/integrations/bitbucket/issues.py | xzkostyan/sentry | 21476700defcf0dddeadeec8471d3454cef6faa7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.integrations.issues import IssueBasicMixin
from sentry.integrations.exceptions import ApiError, IntegrationFormError
ISSUE_TYPES = (
('bug', 'Bug'), ('enhancement', 'Enhancement'), ('proposal', 'Proposal'), ('task', 'Task'),
)
PRIORITIES = (
('trivial', 'Trivial', ), ('minor', 'Minor', ), ('major', 'Major'), ('critical', 'Critical'),
('blocker', 'Blocker'),
)
class BitbucketIssueBasicMixin(IssueBasicMixin):
def get_issue_url(self, key):
repo, issue_id = key.split('#')
return u'https://bitbucket.org/{}/issues/{}'.format(repo, issue_id)
def get_persisted_default_config_fields(self):
return ['repo']
def get_create_issue_config(self, group, **kwargs):
fields = super(BitbucketIssueBasicMixin, self).get_create_issue_config(group, **kwargs)
default_repo, repo_choices = self.get_repository_choices(group, **kwargs)
org = group.organization
autocomplete_url = reverse(
'sentry-extensions-bitbucket-search', args=[org.slug, self.model.id],
)
return [
{
'name': 'repo',
'required': True,
'type': 'select',
'url': autocomplete_url,
'choices': repo_choices,
'defaultValue': default_repo,
'label': 'Bitbucket Repository',
}
] + fields + [
{
'name': 'issue_type',
'label': 'Issue type',
'default': ISSUE_TYPES[0][0],
'type': 'select',
'choices': ISSUE_TYPES
}, {
'name': 'priority',
'label': 'Priority',
'default': PRIORITIES[0][0],
'type': 'select',
'choices': PRIORITIES
}
]
def get_link_issue_config(self, group, **kwargs):
default_repo, repo_choices = self.get_repository_choices(group, **kwargs)
org = group.organization
autocomplete_url = reverse(
'sentry-extensions-bitbucket-search', args=[org.slug, self.model.id],
)
return [{
'name': 'repo',
'required': True,
'type': 'select',
'url': autocomplete_url,
'choices': repo_choices,
'defaultValue': default_repo,
'label': 'Bitbucket Repository',
}, {
'name': 'externalIssue',
'label': 'Issue',
'default': '',
'type': 'select',
'required': True,
'url': autocomplete_url,
}, {
'name': 'comment',
'label': 'Comment',
'default': '',
'type': 'textarea',
'required': False,
'help': ('Leave blank if you don\'t want to '
'add a comment to the Bitbucket issue.'),
}]
def create_issue(self, data, **kwargs):
client = self.get_client()
if not data.get('repo'):
raise IntegrationFormError({'repo': ['Repository is required']})
try:
issue = client.create_issue(data.get('repo'), data)
except ApiError as e:
self.raise_error(e)
return {
'key': issue['id'],
'title': issue['title'],
'description': issue['content']['html'], # users content rendered as html
'repo': data.get('repo'),
}
def get_issue(self, issue_id, **kwargs):
client = self.get_client()
repo = kwargs['data'].get('repo')
issue = client.get_issue(repo, issue_id)
return {
'key': issue['id'],
'title': issue['title'],
'description': issue['content']['html'], # users content rendered as html
'repo': repo,
}
def make_external_key(self, data):
return u'{}#{}'.format(data['repo'], data['key'])
def after_link_issue(self, external_issue, **kwargs):
data = kwargs['data']
client = self.get_client()
repo, issue_num = external_issue.key.split('#')
if not repo:
raise IntegrationFormError({'repo': 'Repository is required'})
if not issue_num:
raise IntegrationFormError({'externalIssue': 'Issue ID is required'})
comment = data.get('comment')
if comment:
try:
client.create_comment(
repo=repo,
issue_id=issue_num,
data={'content': {'raw': comment}}
)
except ApiError as e:
self.raise_error(e)
| 32.534247 | 97 | 0.521053 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.integrations.issues import IssueBasicMixin
from sentry.integrations.exceptions import ApiError, IntegrationFormError
ISSUE_TYPES = (
('bug', 'Bug'), ('enhancement', 'Enhancement'), ('proposal', 'Proposal'), ('task', 'Task'),
)
PRIORITIES = (
('trivial', 'Trivial', ), ('minor', 'Minor', ), ('major', 'Major'), ('critical', 'Critical'),
('blocker', 'Blocker'),
)
class BitbucketIssueBasicMixin(IssueBasicMixin):
def get_issue_url(self, key):
repo, issue_id = key.split('#')
return u'https://bitbucket.org/{}/issues/{}'.format(repo, issue_id)
def get_persisted_default_config_fields(self):
return ['repo']
def get_create_issue_config(self, group, **kwargs):
fields = super(BitbucketIssueBasicMixin, self).get_create_issue_config(group, **kwargs)
default_repo, repo_choices = self.get_repository_choices(group, **kwargs)
org = group.organization
autocomplete_url = reverse(
'sentry-extensions-bitbucket-search', args=[org.slug, self.model.id],
)
return [
{
'name': 'repo',
'required': True,
'type': 'select',
'url': autocomplete_url,
'choices': repo_choices,
'defaultValue': default_repo,
'label': 'Bitbucket Repository',
}
] + fields + [
{
'name': 'issue_type',
'label': 'Issue type',
'default': ISSUE_TYPES[0][0],
'type': 'select',
'choices': ISSUE_TYPES
}, {
'name': 'priority',
'label': 'Priority',
'default': PRIORITIES[0][0],
'type': 'select',
'choices': PRIORITIES
}
]
def get_link_issue_config(self, group, **kwargs):
default_repo, repo_choices = self.get_repository_choices(group, **kwargs)
org = group.organization
autocomplete_url = reverse(
'sentry-extensions-bitbucket-search', args=[org.slug, self.model.id],
)
return [{
'name': 'repo',
'required': True,
'type': 'select',
'url': autocomplete_url,
'choices': repo_choices,
'defaultValue': default_repo,
'label': 'Bitbucket Repository',
}, {
'name': 'externalIssue',
'label': 'Issue',
'default': '',
'type': 'select',
'required': True,
'url': autocomplete_url,
}, {
'name': 'comment',
'label': 'Comment',
'default': '',
'type': 'textarea',
'required': False,
'help': ('Leave blank if you don\'t want to '
'add a comment to the Bitbucket issue.'),
}]
def create_issue(self, data, **kwargs):
client = self.get_client()
if not data.get('repo'):
raise IntegrationFormError({'repo': ['Repository is required']})
try:
issue = client.create_issue(data.get('repo'), data)
except ApiError as e:
self.raise_error(e)
return {
'key': issue['id'],
'title': issue['title'],
'description': issue['content']['html'], # users content rendered as html
'repo': data.get('repo'),
}
def get_issue(self, issue_id, **kwargs):
client = self.get_client()
repo = kwargs['data'].get('repo')
issue = client.get_issue(repo, issue_id)
return {
'key': issue['id'],
'title': issue['title'],
'description': issue['content']['html'], # users content rendered as html
'repo': repo,
}
def make_external_key(self, data):
return u'{}
def after_link_issue(self, external_issue, **kwargs):
data = kwargs['data']
client = self.get_client()
repo, issue_num = external_issue.key.split('
if not repo:
raise IntegrationFormError({'repo': 'Repository is required'})
if not issue_num:
raise IntegrationFormError({'externalIssue': 'Issue ID is required'})
comment = data.get('comment')
if comment:
try:
client.create_comment(
repo=repo,
issue_id=issue_num,
data={'content': {'raw': comment}}
)
except ApiError as e:
self.raise_error(e)
| true | true |
f721e86e8c416b8778de1f5a4d429e740eea897d | 2,427 | py | Python | conftest.py | viaviare/PyRepository | 6520d558a76dab5bd36cf321c0a68298ff048f7a | [
"Apache-2.0"
] | null | null | null | conftest.py | viaviare/PyRepository | 6520d558a76dab5bd36cf321c0a68298ff048f7a | [
"Apache-2.0"
] | null | null | null | conftest.py | viaviare/PyRepository | 6520d558a76dab5bd36cf321c0a68298ff048f7a | [
"Apache-2.0"
] | null | null | null | import pytest
import json
import jsonpickle
import os.path
import importlib
from fixture.application import Application
from fixture.db import DbFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))['web']
if fixture is None or not fixture.helper.is_valid:
fixture = Application(browser=browser, baseUrl=web_config['baseUrl'])
fixture.session.login(username=web_config['username'], password=web_config['password'])
return fixture
@pytest.fixture(scope="session")
def db(request):
db_config = load_config(request.config.getoption("--target"))['db']
dbfixture = DbFixture(host = db_config['host'], database = db_config['database'], user = db_config['user'], password = db_config['password'])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="chrome")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.json" % file)) as f:
return jsonpickle.decode(f.read())
| 29.597561 | 145 | 0.694685 | import pytest
import json
import jsonpickle
import os.path
import importlib
from fixture.application import Application
from fixture.db import DbFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))['web']
if fixture is None or not fixture.helper.is_valid:
fixture = Application(browser=browser, baseUrl=web_config['baseUrl'])
fixture.session.login(username=web_config['username'], password=web_config['password'])
return fixture
@pytest.fixture(scope="session")
def db(request):
db_config = load_config(request.config.getoption("--target"))['db']
dbfixture = DbFixture(host = db_config['host'], database = db_config['database'], user = db_config['user'], password = db_config['password'])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="chrome")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.json" % file)) as f:
return jsonpickle.decode(f.read())
| true | true |
f721e98693635493da8f166ed44f9befba0e31b8 | 4,170 | py | Python | benchmark/startQiskit2925.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit2925.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit2925.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=42
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=35
prog.cz(input_qubit[0],input_qubit[3]) # number=36
prog.h(input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=22
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.x(input_qubit[3]) # number=33
prog.cx(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.z(input_qubit[3]) # number=10
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=26
prog.cz(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
prog.z(input_qubit[1]) # number=24
prog.h(input_qubit[2]) # number=39
prog.cz(input_qubit[3],input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=41
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[1],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[1]) # number=18
prog.rx(2.8902652413026093,input_qubit[2]) # number=13
prog.y(input_qubit[1]) # number=11
prog.y(input_qubit[1]) # number=12
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2925.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.46281 | 140 | 0.64964 |
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.h(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.z(input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.z(input_qubit[1])
prog.h(input_qubit[2])
prog.cz(input_qubit[3],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.rx(2.8902652413026093,input_qubit[2])
prog.y(input_qubit[1])
prog.y(input_qubit[1])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2925.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f721eada49d0d24dce621475587d2b8e069721ef | 23,626 | py | Python | ocs_ci/ocs/ui/acm_ui.py | MeridianExplorer/ocs-ci | a33d5116128b88f176f5eff68a3ef805125cdba1 | [
"MIT"
] | null | null | null | ocs_ci/ocs/ui/acm_ui.py | MeridianExplorer/ocs-ci | a33d5116128b88f176f5eff68a3ef805125cdba1 | [
"MIT"
] | null | null | null | ocs_ci/ocs/ui/acm_ui.py | MeridianExplorer/ocs-ci | a33d5116128b88f176f5eff68a3ef805125cdba1 | [
"MIT"
] | null | null | null | import os
import logging
import time
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import ACMClusterDeployException
from ocs_ci.ocs.ui.base_ui import BaseUI
from ocs_ci.ocs.ui.helpers_ui import format_locator
from ocs_ci.ocs.ui.views import locators
from ocs_ci.utility.utils import (
get_ocp_version,
expose_ocp_version,
run_cmd,
)
from ocs_ci.ocs.constants import (
PLATFORM_XPATH_MAP,
ACM_PLATOFRM_VSPHERE_CRED_PREFIX,
VSPHERE_CA_FILE_PATH,
DATA_DIR,
ACM_OCP_RELEASE_IMG_URL_PREFIX,
ACM_VSPHERE_NETWORK,
ACM_CLUSTER_DEPLOY_TIMEOUT,
ACM_CLUSTER_DEPLOYMENT_LABEL_KEY,
ACM_CLUSTER_DEPLOYMENT_SECRET_TYPE_LABEL_KEY,
)
from ocs_ci.framework import config
from ocs_ci.utility.retry import retry
log = logging.getLogger(__name__)
class AcmPageNavigator(BaseUI):
"""
ACM Page Navigator Class
"""
def __init__(self, driver):
super().__init__(driver)
self.ocp_version = get_ocp_version()
self.acm_page_nav = locators[self.ocp_version]["acm_page"]
def navigate_welcome_page(self):
"""
Navigate to ACM Welcome Page
"""
log.info("Navigate into Home Page")
self.choose_expanded_mode(mode=True, locator=self.acm_page_nav["Home"])
self.do_click(locator=self.acm_page_nav["Welcome_page"])
def navigate_overview_page(self):
"""
Navigate to ACM Overview Page
"""
log.info("Navigate into Overview Page")
self.choose_expanded_mode(mode=True, locator=self.acm_page_nav["Home"])
self.do_click(locator=self.acm_page_nav["Overview_page"])
def navigate_clusters_page(self):
"""
Navigate to ACM Clusters Page
"""
log.info("Navigate into Clusters Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Clusters_page"])
def navigate_bare_metal_assets_page(self):
"""
Navigate to ACM Bare Metal Assets Page
"""
log.info("Navigate into Bare Metal Assets Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Bare_metal_assets_page"])
def navigate_automation_page(self):
"""
Navigate to ACM Automation Page
"""
log.info("Navigate into Automation Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Automation_page"])
def navigate_infrastructure_env_page(self):
"""
Navigate to ACM Infrastructure Environments Page
"""
log.info("Navigate into Infrastructure Environments Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Infrastructure_environments_page"])
def navigate_applications_page(self):
"""
Navigate to ACM Applications Page
"""
log.info("Navigate into Applications Page")
self.do_click(locator=self.acm_page_nav["Applications"])
def navigate_governance_page(self):
"""
Navigate to ACM Governance Page
"""
log.info("Navigate into Governance Page")
self.do_click(locator=self.acm_page_nav["Governance"])
def navigate_credentials_page(self):
"""
Navigate to ACM Credentials Page
"""
log.info("Navigate into Governance Page")
self.do_click(locator=self.acm_page_nav["Credentials"])
class ACMOCPClusterDeployment(AcmPageNavigator):
"""
Everything related to cluster creation through ACM goes here
"""
def __init__(self, driver, platform, cluster_conf):
super().__init__(driver)
self.platform = platform
self.cluster_conf = cluster_conf
self.cluster_name = self.cluster_conf.ENV_DATA["cluster_name"]
self.cluster_path = self.cluster_conf.ENV_DATA["cluster_path"]
self.deploy_sync_mode = config.MULTICLUSTER.get("deploy_sync_mode", "async")
self.deployment_status = None
self.cluster_deploy_timeout = self.cluster_conf.ENV_DATA.get(
"cluster_deploy_timeout", ACM_CLUSTER_DEPLOY_TIMEOUT
)
self.deployment_failed_reason = None
self.deployment_start_time = 0
def create_cluster_prereq(self):
raise NotImplementedError("Child class has to implement this method")
def navigate_create_clusters_page(self):
# Navigate to Clusters page which has 'Create Cluster'/
# 'Import Cluster' buttons
# Here we click on "Create Cluster" and we will be in create cluster page
while True:
self.navigate_clusters_page()
log.info("Clicking on 'CreateCluster'")
# Because of weird selenium behaviour we are checking
# for CreateCluster button in 3 different ways
# 1. CreateCluster button
# 2. CreateCluster button with index xpath
# 3. Checking url, which should end with 'create-cluster'
if not self.check_element_presence(
(By.XPATH, self.acm_page_nav["cc_create_cluster"][0]), timeout=60
):
log.error("Create cluster button not found")
raise ACMClusterDeployException("Can't continue with deployment")
log.info("check 1:Found create cluster button")
if not self.check_element_presence(
(By.XPATH, self.acm_page_nav["cc_create_cluster_index_xpath"][0]),
timeout=300,
):
log.error("Create cluster button not found")
raise ACMClusterDeployException("Can't continue with deployment")
log.info("check 2:Found create cluster by index path")
self.do_click(locator=self.acm_page_nav["cc_create_cluster"], timeout=100)
time.sleep(20)
if self.driver.current_url.endswith("create-cluster"):
break
def click_next_button(self):
self.do_click(self.acm_page_nav["cc_next_page_button"])
def fill_multiple_textbox(self, key_val):
"""
In a page if we want to fill multiple text boxes we can use
this function which iteratively fills in values from the dictionary parameter
key_val (dict): keys corresponds to the xpath of text box, value corresponds
to the value to be filled in
"""
for xpath, value in key_val.items():
self.do_send_keys(locator=xpath, text=value)
def click_platform_and_credentials(self):
self.navigate_create_clusters_page()
self.do_click(
locator=self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]], timeout=100
)
self.do_click(
locator=self.acm_page_nav["cc_infrastructure_provider_creds_dropdown"]
)
credential = format_locator(
self.acm_page_nav["cc_infrastructure_provider_creds_select_creds"],
self.platform_credential_name,
)
self.do_click(locator=credential)
@retry(ACMClusterDeployException, tries=3, delay=10, backoff=1)
def goto_cluster_details_page(self):
self.navigate_clusters_page()
locator = format_locator(self.acm_page_nav["cc_table_entry"], self.cluster_name)
self.do_click(locator=locator)
self.do_click(locator=self.acm_page_nav["cc_cluster_details_page"], timeout=100)
self.choose_expanded_mode(True, self.acm_page_nav["cc_details_toggle_icon"])
def get_deployment_status(self):
self.goto_cluster_details_page()
if self.acm_cluster_status_failed(timeout=2):
self.deployment_status = "failed"
elif self.acm_cluster_status_ready(timeout=2):
self.deployment_status = "ready"
elif self.acm_cluster_status_creating(timeout=2):
self.deployment_status = "creating"
else:
self.deployment_status = "unknown"
elapsed_time = int(time.time() - self.deployment_start_time)
if elapsed_time > self.cluster_deploy_timeout:
if self.deployment_status == "creating":
self.deployment_status = "failed"
self.deployment_failed_reason = "deploy_timeout"
def wait_for_cluster_create(self):
# Wait for status creating
staus_check_timeout = 300
while (
not self.acm_cluster_status_ready(staus_check_timeout)
and self.cluster_deploy_timeout >= 1
):
self.cluster_deploy_timeout -= staus_check_timeout
if self.acm_cluster_status_creating():
log.info(f"Cluster {self.cluster_name} is in 'Creating' phase")
else:
self.acm_bailout_if_failed()
if self.acm_cluster_status_ready():
log.info(
f"Cluster create successful, Cluster {self.cluster_name} is in 'Ready' state"
)
def acm_bailout_if_failed(self):
if self.acm_cluster_status_failed():
raise ACMClusterDeployException("Deployment is in 'FAILED' state")
def acm_cluster_status_failed(self, timeout=5):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_failed"][1],
self.acm_page_nav["cc_cluster_status_page_status_failed"][0],
),
timeout=timeout,
)
def acm_cluster_status_ready(self, timeout=120):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_ready"][1],
self.acm_page_nav["cc_cluster_status_page_status_ready"][0],
),
timeout=timeout,
)
def acm_cluster_status_creating(self, timeout=120):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_creating"][1],
self.acm_page_nav["cc_cluster_status_page_status_creating"][0],
),
timeout=timeout,
)
def download_cluster_conf_files(self):
"""
Download install-config and kubeconfig to cluster dir
"""
if not os.path.exists(os.path.expanduser(f"{self.cluster_path}")):
os.mkdir(os.path.expanduser(f"{self.cluster_path}"))
# create auth dir inside cluster dir
auth_dir = os.path.join(os.path.expanduser(f"{self.cluster_path}"), "auth")
if not os.path.exists(auth_dir):
os.mkdir(auth_dir)
self.download_kubeconfig(auth_dir)
def download_kubeconfig(self, authdir):
get_kubeconf_secret_cmd = (
f"$(oc get secret -o name -n {self.cluster_name} "
f"-l {ACM_CLUSTER_DEPLOYMENT_LABEL_KEY}={self.cluster_name} "
f"-l {ACM_CLUSTER_DEPLOYMENT_SECRET_TYPE_LABEL_KEY}=kubeconfig)"
)
extract_cmd = (
f"oc extract -n {self.cluster_name} "
f"{get_kubeconf_secret_cmd} "
f"--to={authdir} --confirm"
)
run_cmd(extract_cmd)
if not os.path.exists(os.path.join(authdir, "kubeconfig")):
raise ACMClusterDeployException("Could not find the kubeconfig")
def create_cluster(self, cluster_config=None):
"""
Create cluster using ACM UI
Args:
cluster_config (Config): framework.Config object of complete configuration required
for deployment
"""
raise NotImplementedError("Child class should implement this function")
class ACMOCPPlatformVsphereIPI(ACMOCPClusterDeployment):
"""
This class handles all behind the scene activities
for cluster creation through ACM for vsphere platform
"""
def __init__(self, driver, cluster_conf=None):
super().__init__(driver=driver, platform="vsphere", cluster_conf=cluster_conf)
self.platform_credential_name = cluster_conf.ENV_DATA.get(
"platform_credential_name",
f"{ACM_PLATOFRM_VSPHERE_CRED_PREFIX}{self.cluster_name}",
)
# API VIP & Ingress IP
self.ips = None
self.vsphere_network = None
def create_cluster_prereq(self, timeout=600):
"""
Perform all prereqs before vsphere cluster creation from ACM
Args:
timeout (int): Timeout for any UI operations
"""
# Create vsphre credentials
# Click on 'Add credential' in 'Infrastructure provider' page
self.navigate_create_clusters_page()
self.refresh_page()
hard_timeout = config.ENV_DATA.get("acm_ui_hard_deadline", 1200)
remaining = hard_timeout
while True:
ret = self.check_element_presence(
(By.XPATH, self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]][0]),
timeout=300,
)
if ret:
log.info("Found platform icon")
break
else:
if remaining < 0:
raise TimeoutException("Timedout while waiting for platform icon")
else:
remaining -= timeout
self.navigate_create_clusters_page()
self.refresh_page()
self.do_click(
locator=self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]], timeout=100
)
# "Basic vsphere credential info"
# 1. credential name
# 2. Namespace
# 3. Base DNS domain
self.do_click(locator=self.acm_page_nav["cc_provider_credentials"], timeout=100)
parent_tab = self.driver.current_window_handle
tabs = self.driver.window_handles
self.driver.switch_to.window(tabs[1])
self.do_click(locator=self.acm_page_nav["cc_provider_creds_vsphere"])
basic_cred_dict = {
self.acm_page_nav[
"cc_provider_creds_vsphere_cred_name"
]: self.platform_credential_name,
self.acm_page_nav[
"cc_provider_creds_vsphere_base_dns"
]: f"{self.cluster_conf.ENV_DATA['base_domain']}",
}
self.fill_multiple_textbox(basic_cred_dict)
# Credential Namespace is not a text box but a dropdown
self.do_click(self.acm_page_nav["cc_provider_creds_vsphere_cred_namespace"])
self.do_click(self.acm_page_nav["cc_provider_creds_default_namespace"])
# click on 'Next' button at the bottom
self.click_next_button()
# Detailed VMWare credentials section
# 1. vCenter server
# 2. vCenter username
# 3. vCenter password
# 4. cVenter root CA certificate
# 5. vSphere cluster name
# 6. vSphere datacenter
# 7. vSphere default Datastore
with open(VSPHERE_CA_FILE_PATH, "r") as fp:
vsphere_ca = fp.read()
vsphere_creds_dict = {
self.acm_page_nav[
"cc_provider_creds_vsphere_vcenter_server"
]: f"{self.cluster_conf.ENV_DATA['vsphere_server']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_username"
]: f"{self.cluster_conf.ENV_DATA['vsphere_user']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_password"
]: f"{self.cluster_conf.ENV_DATA['vsphere_password']}",
self.acm_page_nav["cc_provider_creds_vsphere_rootca"]: f"{vsphere_ca}",
self.acm_page_nav[
"cc_provider_creds_vsphere_clustername"
]: f"{self.cluster_conf.ENV_DATA['vsphere_cluster']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_dc"
]: f"{self.cluster_conf.ENV_DATA['vsphere_datacenter']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_datastore"
]: f"{self.cluster_conf.ENV_DATA['vsphere_datastore']}",
}
self.fill_multiple_textbox(vsphere_creds_dict)
self.click_next_button()
# Pull Secret and SSH
# 1. Pull secret
# 2. SSH Private key
# 3. SSH Public key
with open(os.path.join(DATA_DIR, "pull-secret"), "r") as fp:
pull_secret = fp.read()
ssh_pub_key_path = os.path.expanduser(self.cluster_conf.DEPLOYMENT["ssh_key"])
ssh_priv_key_path = os.path.expanduser(
self.cluster_conf.DEPLOYMENT["ssh_key_private"]
)
with open(ssh_pub_key_path, "r") as fp:
ssh_pub_key = fp.read()
with open(ssh_priv_key_path, "r") as fp:
ssh_priv_key = fp.read()
pull_secret_and_ssh = {
self.acm_page_nav["cc_provider_creds_vsphere_pullsecret"]: f"{pull_secret}",
self.acm_page_nav[
"cc_provider_creds_vsphere_ssh_privkey"
]: f"{ssh_priv_key}",
self.acm_page_nav["cc_provider_creds_vsphere_ssh_pubkey"]: f"{ssh_pub_key}",
}
self.fill_multiple_textbox(pull_secret_and_ssh)
self.click_next_button()
self.do_click(locator=self.acm_page_nav["cc_provider_creds_vsphere_add_button"])
# Go to credentials tab
self.do_click(locator=self.acm_page_nav["Credentials"])
credential_table_entry = format_locator(
self.acm_page_nav["cc_table_entry"], self.platform_credential_name
)
if not self.check_element_presence(
(By.XPATH, credential_table_entry[0]), timeout=20
):
raise ACMClusterDeployException("Could not create credentials for vsphere")
else:
log.info(
f"vsphere credential successfully created {self.platform_credential_name}"
)
# Get the ips in prereq itself
from ocs_ci.deployment import vmware
# Switch context to cluster which we are about to create
prev_ctx = config.cur_index
config.switch_ctx(self.cluster_conf.MULTICLUSTER["multicluster_index"])
self.ips = vmware.assign_ips(2)
vmware.create_dns_records(self.ips)
config.switch_ctx(prev_ctx)
self.driver.close()
self.driver.switch_to.window(parent_tab)
self.driver.switch_to.default_content()
def create_cluster(self):
"""
This function navigates through following pages in the UI
1. Cluster details
2. Node poools
3. Networks
4. Proxy
5. Automation
6. Review
Raises:
ACMClusterDeployException: If deployment failed for the cluster
"""
self.navigate_create_clusters_page()
self.click_platform_and_credentials()
self.click_next_button()
self.fill_cluster_details_page()
self.click_next_button()
# For now we don't do anything in 'Node Pools' page
self.click_next_button()
self.fill_network_info()
self.click_next_button()
# Skip proxy for now
self.click_next_button()
# Skip Automation for now
self.click_next_button()
# We are at Review page
# Click on create
self.do_click(locator=self.acm_page_nav["cc_create_button"])
self.deployment_start_time = time.time()
# We will be redirect to 'Details' page which has cluster deployment progress
if self.deploy_sync_mode == "sync":
try:
self.wait_for_cluster_create()
except ACMClusterDeployException:
log.error(
f"Failed to create OCP cluster {self.cluster_conf.ENV_DATA['cluster_name']}"
)
raise
# Download kubeconfig and install-config file
self.download_cluster_conf_files()
else:
# Async mode of deployment, so just return to caller
# we will just wait for status 'Creating' and then return
if not self.acm_cluster_status_creating(timeout=600):
raise ACMClusterDeployException(
f"Cluster {self.cluster_name} didn't reach 'Creating' phase"
)
self.deployment_status = "Creating"
return
def fill_network_info(self):
"""
We need to fill following network info
1. vSphere network name
2. API VIP
3. Ingress VIP
"""
self.vsphere_network = self.cluster_conf.ENV_DATA.get(
"vm_network", ACM_VSPHERE_NETWORK
)
self.do_click(self.acm_page_nav["cc_vsphere_network_name"])
self.do_send_keys(
self.acm_page_nav["cc_vsphere_network_name"], self.vsphere_network
)
# Chrome has a weird problem of trimming the whitespace
# Suppose if network name is 'VM Network', when we put this text
# in text box it automatically becomes 'VMNetwork', hence we need to take
# care
ele = self.driver.find_element(
By.XPATH, self.acm_page_nav["cc_vsphere_network_name"][0]
)
remote_text = ele.get_property("value")
if remote_text != self.vsphere_network:
# Check if we have white space char
# in network name
try:
index = self.vsphere_network.index(constants.SPACE)
left_shift_offset = len(remote_text) - index
self.do_send_keys(
self.acm_page_nav["cc_vsphere_network_name"],
f"{left_shift_offset*Keys.ARROW_LEFT}{constants.SPACE}",
)
except ValueError:
raise ACMClusterDeployException(
"Weird browser behaviour, Not able to provide vsphere network info"
)
vsphere_network = {
self.acm_page_nav["cc_api_vip"]: f"{self.ips[0]}",
self.acm_page_nav["cc_ingress_vip"]: f"{self.ips[1]}",
}
self.fill_multiple_textbox(vsphere_network)
def fill_cluster_details_page(self):
"""
Fill in following details in "Cluster details" page
1. Cluster name
2. Base DNS domain
3. Release image
"""
release_img = self.get_ocp_release_img()
cluster_details = {
self.acm_page_nav[
"cc_cluster_name"
]: f"{self.cluster_conf.ENV_DATA['cluster_name']}",
self.acm_page_nav["cc_openshift_release_image"]: f"{release_img}",
}
self.fill_multiple_textbox(cluster_details)
def get_ocp_release_img(self):
vers = expose_ocp_version(self.cluster_conf.DEPLOYMENT["installer_version"])
return f"{ACM_OCP_RELEASE_IMG_URL_PREFIX}:{vers}"
class ACMOCPDeploymentFactory(object):
def __init__(self):
# All platform specific classes should have map here
self.platform_map = {"vsphereipi": ACMOCPPlatformVsphereIPI}
def get_platform_instance(self, driver, cluster_config):
"""
Args:
driver: selenium UI driver object
cluster_config (dict): Cluster Config object
"""
platform_deployment = (
f"{cluster_config.ENV_DATA['platform']}"
f"{cluster_config.ENV_DATA['deployment_type']}"
)
return self.platform_map[platform_deployment](driver, cluster_config)
| 37.561208 | 96 | 0.635698 | import os
import logging
import time
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import ACMClusterDeployException
from ocs_ci.ocs.ui.base_ui import BaseUI
from ocs_ci.ocs.ui.helpers_ui import format_locator
from ocs_ci.ocs.ui.views import locators
from ocs_ci.utility.utils import (
get_ocp_version,
expose_ocp_version,
run_cmd,
)
from ocs_ci.ocs.constants import (
PLATFORM_XPATH_MAP,
ACM_PLATOFRM_VSPHERE_CRED_PREFIX,
VSPHERE_CA_FILE_PATH,
DATA_DIR,
ACM_OCP_RELEASE_IMG_URL_PREFIX,
ACM_VSPHERE_NETWORK,
ACM_CLUSTER_DEPLOY_TIMEOUT,
ACM_CLUSTER_DEPLOYMENT_LABEL_KEY,
ACM_CLUSTER_DEPLOYMENT_SECRET_TYPE_LABEL_KEY,
)
from ocs_ci.framework import config
from ocs_ci.utility.retry import retry
log = logging.getLogger(__name__)
class AcmPageNavigator(BaseUI):
def __init__(self, driver):
super().__init__(driver)
self.ocp_version = get_ocp_version()
self.acm_page_nav = locators[self.ocp_version]["acm_page"]
def navigate_welcome_page(self):
log.info("Navigate into Home Page")
self.choose_expanded_mode(mode=True, locator=self.acm_page_nav["Home"])
self.do_click(locator=self.acm_page_nav["Welcome_page"])
def navigate_overview_page(self):
log.info("Navigate into Overview Page")
self.choose_expanded_mode(mode=True, locator=self.acm_page_nav["Home"])
self.do_click(locator=self.acm_page_nav["Overview_page"])
def navigate_clusters_page(self):
log.info("Navigate into Clusters Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Clusters_page"])
def navigate_bare_metal_assets_page(self):
log.info("Navigate into Bare Metal Assets Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Bare_metal_assets_page"])
def navigate_automation_page(self):
log.info("Navigate into Automation Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Automation_page"])
def navigate_infrastructure_env_page(self):
log.info("Navigate into Infrastructure Environments Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Infrastructure_environments_page"])
def navigate_applications_page(self):
log.info("Navigate into Applications Page")
self.do_click(locator=self.acm_page_nav["Applications"])
def navigate_governance_page(self):
log.info("Navigate into Governance Page")
self.do_click(locator=self.acm_page_nav["Governance"])
def navigate_credentials_page(self):
log.info("Navigate into Governance Page")
self.do_click(locator=self.acm_page_nav["Credentials"])
class ACMOCPClusterDeployment(AcmPageNavigator):
def __init__(self, driver, platform, cluster_conf):
super().__init__(driver)
self.platform = platform
self.cluster_conf = cluster_conf
self.cluster_name = self.cluster_conf.ENV_DATA["cluster_name"]
self.cluster_path = self.cluster_conf.ENV_DATA["cluster_path"]
self.deploy_sync_mode = config.MULTICLUSTER.get("deploy_sync_mode", "async")
self.deployment_status = None
self.cluster_deploy_timeout = self.cluster_conf.ENV_DATA.get(
"cluster_deploy_timeout", ACM_CLUSTER_DEPLOY_TIMEOUT
)
self.deployment_failed_reason = None
self.deployment_start_time = 0
def create_cluster_prereq(self):
raise NotImplementedError("Child class has to implement this method")
def navigate_create_clusters_page(self):
while True:
self.navigate_clusters_page()
log.info("Clicking on 'CreateCluster'")
if not self.check_element_presence(
(By.XPATH, self.acm_page_nav["cc_create_cluster"][0]), timeout=60
):
log.error("Create cluster button not found")
raise ACMClusterDeployException("Can't continue with deployment")
log.info("check 1:Found create cluster button")
if not self.check_element_presence(
(By.XPATH, self.acm_page_nav["cc_create_cluster_index_xpath"][0]),
timeout=300,
):
log.error("Create cluster button not found")
raise ACMClusterDeployException("Can't continue with deployment")
log.info("check 2:Found create cluster by index path")
self.do_click(locator=self.acm_page_nav["cc_create_cluster"], timeout=100)
time.sleep(20)
if self.driver.current_url.endswith("create-cluster"):
break
def click_next_button(self):
self.do_click(self.acm_page_nav["cc_next_page_button"])
def fill_multiple_textbox(self, key_val):
for xpath, value in key_val.items():
self.do_send_keys(locator=xpath, text=value)
def click_platform_and_credentials(self):
self.navigate_create_clusters_page()
self.do_click(
locator=self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]], timeout=100
)
self.do_click(
locator=self.acm_page_nav["cc_infrastructure_provider_creds_dropdown"]
)
credential = format_locator(
self.acm_page_nav["cc_infrastructure_provider_creds_select_creds"],
self.platform_credential_name,
)
self.do_click(locator=credential)
@retry(ACMClusterDeployException, tries=3, delay=10, backoff=1)
def goto_cluster_details_page(self):
self.navigate_clusters_page()
locator = format_locator(self.acm_page_nav["cc_table_entry"], self.cluster_name)
self.do_click(locator=locator)
self.do_click(locator=self.acm_page_nav["cc_cluster_details_page"], timeout=100)
self.choose_expanded_mode(True, self.acm_page_nav["cc_details_toggle_icon"])
def get_deployment_status(self):
self.goto_cluster_details_page()
if self.acm_cluster_status_failed(timeout=2):
self.deployment_status = "failed"
elif self.acm_cluster_status_ready(timeout=2):
self.deployment_status = "ready"
elif self.acm_cluster_status_creating(timeout=2):
self.deployment_status = "creating"
else:
self.deployment_status = "unknown"
elapsed_time = int(time.time() - self.deployment_start_time)
if elapsed_time > self.cluster_deploy_timeout:
if self.deployment_status == "creating":
self.deployment_status = "failed"
self.deployment_failed_reason = "deploy_timeout"
def wait_for_cluster_create(self):
staus_check_timeout = 300
while (
not self.acm_cluster_status_ready(staus_check_timeout)
and self.cluster_deploy_timeout >= 1
):
self.cluster_deploy_timeout -= staus_check_timeout
if self.acm_cluster_status_creating():
log.info(f"Cluster {self.cluster_name} is in 'Creating' phase")
else:
self.acm_bailout_if_failed()
if self.acm_cluster_status_ready():
log.info(
f"Cluster create successful, Cluster {self.cluster_name} is in 'Ready' state"
)
def acm_bailout_if_failed(self):
if self.acm_cluster_status_failed():
raise ACMClusterDeployException("Deployment is in 'FAILED' state")
def acm_cluster_status_failed(self, timeout=5):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_failed"][1],
self.acm_page_nav["cc_cluster_status_page_status_failed"][0],
),
timeout=timeout,
)
def acm_cluster_status_ready(self, timeout=120):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_ready"][1],
self.acm_page_nav["cc_cluster_status_page_status_ready"][0],
),
timeout=timeout,
)
def acm_cluster_status_creating(self, timeout=120):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_creating"][1],
self.acm_page_nav["cc_cluster_status_page_status_creating"][0],
),
timeout=timeout,
)
def download_cluster_conf_files(self):
if not os.path.exists(os.path.expanduser(f"{self.cluster_path}")):
os.mkdir(os.path.expanduser(f"{self.cluster_path}"))
auth_dir = os.path.join(os.path.expanduser(f"{self.cluster_path}"), "auth")
if not os.path.exists(auth_dir):
os.mkdir(auth_dir)
self.download_kubeconfig(auth_dir)
def download_kubeconfig(self, authdir):
get_kubeconf_secret_cmd = (
f"$(oc get secret -o name -n {self.cluster_name} "
f"-l {ACM_CLUSTER_DEPLOYMENT_LABEL_KEY}={self.cluster_name} "
f"-l {ACM_CLUSTER_DEPLOYMENT_SECRET_TYPE_LABEL_KEY}=kubeconfig)"
)
extract_cmd = (
f"oc extract -n {self.cluster_name} "
f"{get_kubeconf_secret_cmd} "
f"--to={authdir} --confirm"
)
run_cmd(extract_cmd)
if not os.path.exists(os.path.join(authdir, "kubeconfig")):
raise ACMClusterDeployException("Could not find the kubeconfig")
def create_cluster(self, cluster_config=None):
raise NotImplementedError("Child class should implement this function")
class ACMOCPPlatformVsphereIPI(ACMOCPClusterDeployment):
def __init__(self, driver, cluster_conf=None):
super().__init__(driver=driver, platform="vsphere", cluster_conf=cluster_conf)
self.platform_credential_name = cluster_conf.ENV_DATA.get(
"platform_credential_name",
f"{ACM_PLATOFRM_VSPHERE_CRED_PREFIX}{self.cluster_name}",
)
self.ips = None
self.vsphere_network = None
def create_cluster_prereq(self, timeout=600):
self.navigate_create_clusters_page()
self.refresh_page()
hard_timeout = config.ENV_DATA.get("acm_ui_hard_deadline", 1200)
remaining = hard_timeout
while True:
ret = self.check_element_presence(
(By.XPATH, self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]][0]),
timeout=300,
)
if ret:
log.info("Found platform icon")
break
else:
if remaining < 0:
raise TimeoutException("Timedout while waiting for platform icon")
else:
remaining -= timeout
self.navigate_create_clusters_page()
self.refresh_page()
self.do_click(
locator=self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]], timeout=100
)
self.do_click(locator=self.acm_page_nav["cc_provider_credentials"], timeout=100)
parent_tab = self.driver.current_window_handle
tabs = self.driver.window_handles
self.driver.switch_to.window(tabs[1])
self.do_click(locator=self.acm_page_nav["cc_provider_creds_vsphere"])
basic_cred_dict = {
self.acm_page_nav[
"cc_provider_creds_vsphere_cred_name"
]: self.platform_credential_name,
self.acm_page_nav[
"cc_provider_creds_vsphere_base_dns"
]: f"{self.cluster_conf.ENV_DATA['base_domain']}",
}
self.fill_multiple_textbox(basic_cred_dict)
self.do_click(self.acm_page_nav["cc_provider_creds_vsphere_cred_namespace"])
self.do_click(self.acm_page_nav["cc_provider_creds_default_namespace"])
self.click_next_button()
with open(VSPHERE_CA_FILE_PATH, "r") as fp:
vsphere_ca = fp.read()
vsphere_creds_dict = {
self.acm_page_nav[
"cc_provider_creds_vsphere_vcenter_server"
]: f"{self.cluster_conf.ENV_DATA['vsphere_server']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_username"
]: f"{self.cluster_conf.ENV_DATA['vsphere_user']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_password"
]: f"{self.cluster_conf.ENV_DATA['vsphere_password']}",
self.acm_page_nav["cc_provider_creds_vsphere_rootca"]: f"{vsphere_ca}",
self.acm_page_nav[
"cc_provider_creds_vsphere_clustername"
]: f"{self.cluster_conf.ENV_DATA['vsphere_cluster']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_dc"
]: f"{self.cluster_conf.ENV_DATA['vsphere_datacenter']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_datastore"
]: f"{self.cluster_conf.ENV_DATA['vsphere_datastore']}",
}
self.fill_multiple_textbox(vsphere_creds_dict)
self.click_next_button()
with open(os.path.join(DATA_DIR, "pull-secret"), "r") as fp:
pull_secret = fp.read()
ssh_pub_key_path = os.path.expanduser(self.cluster_conf.DEPLOYMENT["ssh_key"])
ssh_priv_key_path = os.path.expanduser(
self.cluster_conf.DEPLOYMENT["ssh_key_private"]
)
with open(ssh_pub_key_path, "r") as fp:
ssh_pub_key = fp.read()
with open(ssh_priv_key_path, "r") as fp:
ssh_priv_key = fp.read()
pull_secret_and_ssh = {
self.acm_page_nav["cc_provider_creds_vsphere_pullsecret"]: f"{pull_secret}",
self.acm_page_nav[
"cc_provider_creds_vsphere_ssh_privkey"
]: f"{ssh_priv_key}",
self.acm_page_nav["cc_provider_creds_vsphere_ssh_pubkey"]: f"{ssh_pub_key}",
}
self.fill_multiple_textbox(pull_secret_and_ssh)
self.click_next_button()
self.do_click(locator=self.acm_page_nav["cc_provider_creds_vsphere_add_button"])
self.do_click(locator=self.acm_page_nav["Credentials"])
credential_table_entry = format_locator(
self.acm_page_nav["cc_table_entry"], self.platform_credential_name
)
if not self.check_element_presence(
(By.XPATH, credential_table_entry[0]), timeout=20
):
raise ACMClusterDeployException("Could not create credentials for vsphere")
else:
log.info(
f"vsphere credential successfully created {self.platform_credential_name}"
)
from ocs_ci.deployment import vmware
prev_ctx = config.cur_index
config.switch_ctx(self.cluster_conf.MULTICLUSTER["multicluster_index"])
self.ips = vmware.assign_ips(2)
vmware.create_dns_records(self.ips)
config.switch_ctx(prev_ctx)
self.driver.close()
self.driver.switch_to.window(parent_tab)
self.driver.switch_to.default_content()
def create_cluster(self):
self.navigate_create_clusters_page()
self.click_platform_and_credentials()
self.click_next_button()
self.fill_cluster_details_page()
self.click_next_button()
self.click_next_button()
self.fill_network_info()
self.click_next_button()
# Skip proxy for now
self.click_next_button()
# Skip Automation for now
self.click_next_button()
# We are at Review page
# Click on create
self.do_click(locator=self.acm_page_nav["cc_create_button"])
self.deployment_start_time = time.time()
# We will be redirect to 'Details' page which has cluster deployment progress
if self.deploy_sync_mode == "sync":
try:
self.wait_for_cluster_create()
except ACMClusterDeployException:
log.error(
f"Failed to create OCP cluster {self.cluster_conf.ENV_DATA['cluster_name']}"
)
raise
# Download kubeconfig and install-config file
self.download_cluster_conf_files()
else:
# Async mode of deployment, so just return to caller
# we will just wait for status 'Creating' and then return
if not self.acm_cluster_status_creating(timeout=600):
raise ACMClusterDeployException(
f"Cluster {self.cluster_name} didn't reach 'Creating' phase"
)
self.deployment_status = "Creating"
return
def fill_network_info(self):
self.vsphere_network = self.cluster_conf.ENV_DATA.get(
"vm_network", ACM_VSPHERE_NETWORK
)
self.do_click(self.acm_page_nav["cc_vsphere_network_name"])
self.do_send_keys(
self.acm_page_nav["cc_vsphere_network_name"], self.vsphere_network
)
ele = self.driver.find_element(
By.XPATH, self.acm_page_nav["cc_vsphere_network_name"][0]
)
remote_text = ele.get_property("value")
if remote_text != self.vsphere_network:
try:
index = self.vsphere_network.index(constants.SPACE)
left_shift_offset = len(remote_text) - index
self.do_send_keys(
self.acm_page_nav["cc_vsphere_network_name"],
f"{left_shift_offset*Keys.ARROW_LEFT}{constants.SPACE}",
)
except ValueError:
raise ACMClusterDeployException(
"Weird browser behaviour, Not able to provide vsphere network info"
)
vsphere_network = {
self.acm_page_nav["cc_api_vip"]: f"{self.ips[0]}",
self.acm_page_nav["cc_ingress_vip"]: f"{self.ips[1]}",
}
self.fill_multiple_textbox(vsphere_network)
def fill_cluster_details_page(self):
release_img = self.get_ocp_release_img()
cluster_details = {
self.acm_page_nav[
"cc_cluster_name"
]: f"{self.cluster_conf.ENV_DATA['cluster_name']}",
self.acm_page_nav["cc_openshift_release_image"]: f"{release_img}",
}
self.fill_multiple_textbox(cluster_details)
def get_ocp_release_img(self):
vers = expose_ocp_version(self.cluster_conf.DEPLOYMENT["installer_version"])
return f"{ACM_OCP_RELEASE_IMG_URL_PREFIX}:{vers}"
class ACMOCPDeploymentFactory(object):
def __init__(self):
self.platform_map = {"vsphereipi": ACMOCPPlatformVsphereIPI}
def get_platform_instance(self, driver, cluster_config):
platform_deployment = (
f"{cluster_config.ENV_DATA['platform']}"
f"{cluster_config.ENV_DATA['deployment_type']}"
)
return self.platform_map[platform_deployment](driver, cluster_config)
| true | true |
f721ec4d816bbef5bb4d08a3d8d876e428dfc432 | 990 | py | Python | python/phonenumbers/shortdata/region_KG.py | ILMServices/python-phonenumbers | 317b0b128162b031e156b9de69ade9a5c8cf4844 | [
"Apache-2.0"
] | 1 | 2015-01-31T01:17:14.000Z | 2015-01-31T01:17:14.000Z | python/phonenumbers/shortdata/region_KG.py | ILMServices/python-phonenumbers | 317b0b128162b031e156b9de69ade9a5c8cf4844 | [
"Apache-2.0"
] | null | null | null | python/phonenumbers/shortdata/region_KG.py | ILMServices/python-phonenumbers | 317b0b128162b031e156b9de69ade9a5c8cf4844 | [
"Apache-2.0"
] | null | null | null | """Auto-generated file, do not edit by hand. KG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KG = PhoneMetadata(id='KG', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[14]\\d{2,3}', possible_number_pattern='\\d{3,4}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='10[123]', possible_number_pattern='\\d{3}', example_number='101'),
short_code=PhoneNumberDesc(national_number_pattern='10[123]|4040', possible_number_pattern='\\d{3,4}', example_number='101'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='4040', possible_number_pattern='\\d{4}'),
short_data=True)
| 76.153846 | 129 | 0.787879 | from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KG = PhoneMetadata(id='KG', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[14]\\d{2,3}', possible_number_pattern='\\d{3,4}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='10[123]', possible_number_pattern='\\d{3}', example_number='101'),
short_code=PhoneNumberDesc(national_number_pattern='10[123]|4040', possible_number_pattern='\\d{3,4}', example_number='101'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='4040', possible_number_pattern='\\d{4}'),
short_data=True)
| true | true |
f721ec59857ef6e3afbbf06a165c7b74088bc96e | 398 | py | Python | app/model/mail_agent.py | dwdraugr/YADS | c8036d8196a3158636aaa4f1910033e70ec8ecb4 | [
"Apache-2.0"
] | 3 | 2019-09-02T11:26:58.000Z | 2019-12-06T15:54:38.000Z | app/model/mail_agent.py | dwdraugr/YADS | c8036d8196a3158636aaa4f1910033e70ec8ecb4 | [
"Apache-2.0"
] | null | null | null | app/model/mail_agent.py | dwdraugr/YADS | c8036d8196a3158636aaa4f1910033e70ec8ecb4 | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask_mail import Message, Mail
from app.lazy import lazy_async
class MailAgent:
def __init__(self, app: Flask):
self.mail = Mail(app)
self.app = app
@lazy_async
def _async_mail(self, msg: Message):
with self.app.app_context():
self.mail.send(msg)
def send_mail(self, msg: Message):
self._async_mail(msg)
| 22.111111 | 40 | 0.650754 | from flask import Flask
from flask_mail import Message, Mail
from app.lazy import lazy_async
class MailAgent:
def __init__(self, app: Flask):
self.mail = Mail(app)
self.app = app
@lazy_async
def _async_mail(self, msg: Message):
with self.app.app_context():
self.mail.send(msg)
def send_mail(self, msg: Message):
self._async_mail(msg)
| true | true |
f721ec6e3ba1bb95049502e8c6e9d52d6173e217 | 1,059 | py | Python | data_ai/comp3035/finews/spiders/news_spider.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | 2 | 2021-04-26T16:37:38.000Z | 2022-03-15T01:26:19.000Z | data_ai/comp3035/finews/spiders/news_spider.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | null | null | null | data_ai/comp3035/finews/spiders/news_spider.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | 1 | 2022-03-15T01:26:23.000Z | 2022-03-15T01:26:23.000Z | import scrapy
from os import path
INDEX_PATH = './resources/index'
RECORD_PATH = './resources/record'
class NewsSpider(scrapy.Spider):
name = "index"
start_urls = ['http://fund.10jqka.com.cn/smxw_list/index_1.shtml']
def parse_record(self, response):
filename: str = response.url.split('/')[-1]
with open(path.join(RECORD_PATH, filename), 'wb') as f:
f.write(response.body)
self.log(f"Save file {filename}")
def parse(self, response):
page = int(response.url.split('/')[-1].lstrip('index_').rstrip('.shtml'))
filename = f"index-{page}.shtml"
with open(path.join(INDEX_PATH, filename), 'wb') as f:
f.write(response.body)
self.log(f'Saved file {filename}')
for href in response.css('div.listNews > div > h1 > a::attr(href)'):
yield response.follow(href, self.parse_record)
next_page = response.css('.nextpage > a::attr(href)').get()
if next_page is not None:
yield scrapy.Request(next_page, callback=self.parse)
| 36.517241 | 81 | 0.624174 | import scrapy
from os import path
INDEX_PATH = './resources/index'
RECORD_PATH = './resources/record'
class NewsSpider(scrapy.Spider):
name = "index"
start_urls = ['http://fund.10jqka.com.cn/smxw_list/index_1.shtml']
def parse_record(self, response):
filename: str = response.url.split('/')[-1]
with open(path.join(RECORD_PATH, filename), 'wb') as f:
f.write(response.body)
self.log(f"Save file {filename}")
def parse(self, response):
page = int(response.url.split('/')[-1].lstrip('index_').rstrip('.shtml'))
filename = f"index-{page}.shtml"
with open(path.join(INDEX_PATH, filename), 'wb') as f:
f.write(response.body)
self.log(f'Saved file {filename}')
for href in response.css('div.listNews > div > h1 > a::attr(href)'):
yield response.follow(href, self.parse_record)
next_page = response.css('.nextpage > a::attr(href)').get()
if next_page is not None:
yield scrapy.Request(next_page, callback=self.parse)
| true | true |
f721ec7ee08947d5e5c3b440d62ccc09e6d6cbce | 105,565 | py | Python | addon/pycThermopack/pyctp/thermo.py | morteham/thermopack | 67deaf74a2ae974e880be25026738cc32e3a6c1e | [
"MIT"
] | 28 | 2020-10-14T07:51:21.000Z | 2022-03-21T04:59:23.000Z | addon/pycThermopack/pyctp/thermo.py | morteham/thermopack | 67deaf74a2ae974e880be25026738cc32e3a6c1e | [
"MIT"
] | 20 | 2020-10-26T11:43:43.000Z | 2022-03-30T22:06:30.000Z | addon/pycThermopack/pyctp/thermo.py | morteham/thermopack | 67deaf74a2ae974e880be25026738cc32e3a6c1e | [
"MIT"
] | 13 | 2020-10-27T13:04:19.000Z | 2022-03-21T04:59:24.000Z | # Support for python2
from __future__ import print_function
import sys
from ctypes import *
from os import path
import numpy as np
from . import plotutils, utils, platform_specifics
if utils.gcc_major_version_greater_than(7):
c_len_type = c_size_t # c_size_t on GCC > 7
else:
c_len_type = c_int
class thermopack(object):
"""
Interface to thermopack
"""
def __init__(self):
"""
Load libthermopack.(so/dll) and initialize function pointers
"""
pf_specifics = platform_specifics.get_platform_specifics()
self.prefix = pf_specifics["prefix"]
self.module = pf_specifics["module"]
self.postfix = pf_specifics["postfix"]
self.postfix_nm = pf_specifics["postfix_no_module"]
dyn_lib_path = path.join(path.dirname(__file__), pf_specifics["dyn_lib"])
self.tp = cdll.LoadLibrary(dyn_lib_path)
# Set phase flags
self.s_get_phase_flags = self.tp.get_phase_flags_c
self.get_phase_flags()
# Model control
self.s_add_eos = getattr(self.tp, self.get_export_name("thermopack_var", "add_eos"))
self.s_delete_eos = getattr(self.tp, self.get_export_name("thermopack_var", "delete_eos"))
self.s_activate_model = getattr(self.tp, self.get_export_name("thermopack_var", "activate_model"))
# Information
self.s_get_model_id = getattr(self.tp, self.get_export_name("thermopack_var", "get_eos_identification"))
# Init methods
self.eoslibinit_init_thermo = getattr(self.tp, self.get_export_name("eoslibinit", "init_thermo"))
self.Rgas = c_double.in_dll(self.tp, self.get_export_name("thermopack_constants", "rgas")).value
self.nc = None
self.minimum_temperature_c = c_double.in_dll(self.tp, self.get_export_name("thermopack_constants", "tptmin"))
self.minimum_pressure_c = c_double.in_dll(self.tp, self.get_export_name("thermopack_constants", "tppmin"))
self.solideos_solid_init = getattr(self.tp, self.get_export_name("solideos", "solid_init"))
self.eoslibinit_init_volume_translation = getattr(self.tp, self.get_export_name("eoslibinit", "init_volume_translation"))
self.eoslibinit_redefine_critical_parameters = getattr(self.tp, self.get_export_name("eoslibinit", "redefine_critical_parameters"))
# Eos interface
self.s_eos_specificvolume = getattr(self.tp, self.get_export_name("eos", "specificvolume"))
self.s_eos_zfac = getattr(self.tp, self.get_export_name("eos", "zfac"))
self.s_eos_thermo = getattr(self.tp, self.get_export_name("eos", "thermo"))
self.s_eos_entropy = getattr(self.tp, self.get_export_name("eos", "entropy"))
self.s_eos_enthalpy = getattr(self.tp, self.get_export_name("eos", "enthalpy"))
self.s_eos_compmoleweight = getattr(self.tp, self.get_export_name("eos", "compmoleweight"))
self.s_eos_idealenthalpysingle = getattr(self.tp, self.get_export_name("eos", "idealenthalpysingle"))
# Speed of sound
#self.sos_singlePhaseSpeedOfSound = getattr(self.tp, '__speed_of_sound_MOD_singlephasespeedofsound')
self.s_sos_sound_velocity_2ph = getattr(self.tp, self.get_export_name("speed_of_sound", "sound_velocity_2ph"))
# Component info
self.s_compdata_compindex = getattr(self.tp, self.get_export_name("compdata", "comp_index_active"))
self.s_compdata_compname = getattr(self.tp, self.get_export_name("compdata", "comp_name_active"))
# Flashes
self.s_set_ph_tolerance = getattr(self.tp, self.get_export_name("ph_solver", "setphtolerance"))
self.s_twophasetpflash = getattr(self.tp, self.get_export_name("tp_solver", "twophasetpflash"))
self.s_psflash_twophase = getattr(self.tp, self.get_export_name("ps_solver", "twophasepsflash"))
#self.tpflash_multiphase = getattr(self.tp, '__mp_tp_solver_MOD_mp_flash_tp')
self.s_uvflash_twophase = getattr(self.tp, self.get_export_name("uv_solver", "twophaseuvflash"))
self.s_phflash_twophase = getattr(self.tp, self.get_export_name("ph_solver", "twophasephflash"))
#self.s_svflash_twophase = getattr(self.tp, self.get_export_name("sv_solver", "twophasesvflash"))
self.s_guess_phase = getattr(self.tp, self.get_export_name("thermo_utils", "guessphase"))
# TV interfaces
self.s_internal_energy_tv = getattr(self.tp, self.get_export_name("eostv", "internal_energy"))
self.s_entropy_tv = getattr(self.tp, self.get_export_name("eostv", "entropytv"))
self.s_pressure_tv = getattr(self.tp, self.get_export_name("eostv", "pressure"))
self.s_lnphi_tv = getattr(self.tp, self.get_export_name("eostv", "thermotv"))
self.s_enthalpy_tv = getattr(self.tp, self.get_export_name("eostv", "enthalpytv"))
self.s_helmholtz_energy = getattr(self.tp, self.get_export_name("eostv", "free_energy"))
self.s_chempot = getattr(self.tp, self.get_export_name("eostv", "chemical_potential"))
# Saturation properties
self.s_bubble_t = getattr(self.tp, self.get_export_name("saturation", "safe_bubt"))
self.s_bubble_p = getattr(self.tp, self.get_export_name("saturation", "safe_bubp"))
self.s_dew_t = getattr(self.tp, self.get_export_name("saturation", "safe_dewt"))
self.s_dew_p = getattr(self.tp, self.get_export_name("saturation", "safe_dewp"))
self.s_envelope_plot = getattr(self.tp, self.get_export_name("saturation_curve", "envelopeplot"))
self.s_binary_plot = getattr(self.tp, self.get_export_name("binaryplot", "vllebinaryxy"))
self.s_global_binary_plot = getattr(self.tp, self.get_export_name("binaryplot", "global_binary_plot"))
self.s_get_bp_term = getattr(self.tp, self.get_export_name("binaryplot", "get_bp_term"))
self.s_solid_envelope_plot = getattr(self.tp, self.get_export_name("solid_saturation", "solidenvelopeplot"))
self.s_isotherm = getattr(self.tp, self.get_export_name("isolines", "isotherm"))
self.s_isobar = getattr(self.tp, self.get_export_name("isolines", "isobar"))
self.s_isenthalp = getattr(self.tp, self.get_export_name("isolines", "isenthalp"))
self.s_isentrope = getattr(self.tp, self.get_export_name("isolines", "isentrope"))
# Stability
self.s_crit_tv = getattr(self.tp, self.get_export_name("critical", "calccriticaltv"))
# Virials
self.s_virial_coeffcients = getattr(self.tp, self.get_export_name("eostv", "virial_coefficients"))
self.s_second_virial_matrix = getattr(self.tp, self.get_export_name("eostv", "secondvirialcoeffmatrix"))
self.s_binary_third_virial_matrix = getattr(self.tp, self.get_export_name("eostv", "binarythirdvirialcoeffmatrix"))
self.add_eos()
def __del__(self):
"""Delete FORTRAN memory allocated for this instance"""
self.delete_eos()
def activate(self):
"""Activate this instance of thermopack parameters for calculation"""
self.s_activate_model.argtypes = [POINTER( c_int )]
self.s_activate_model.restype = None
self.s_activate_model(self.model_index_c)
def add_eos(self):
"""Allocate FORTRAN memory for this class instance"""
self.s_add_eos.argtypes = None
self.s_add_eos.restype = c_int
self.model_index_c = c_int(self.s_add_eos())
def delete_eos(self):
"""de-allocate FORTRAN memory for this class instance"""
self.activate()
self.s_delete_eos.argtypes = [POINTER( c_int )]
self.s_delete_eos.restype = None
self.s_delete_eos(self.model_index_c)
def get_model_id(self):
"""Get model identification
Returns:
str: Eos name
"""
self.activate()
eosid_len = 40
eosid_c = c_char_p(b" " * eosid_len)
eosid_len_c = c_len_type(eosid_len)
self.s_get_model_id.argtypes = [c_char_p, c_len_type]
self.s_get_model_id.restype = None
self.s_get_model_id(eosid_c, eosid_len_c)
eosid = eosid_c.value.decode('ascii').strip()
return eosid
def get_export_name(self, module, method):
"""Generate library export name based on module and method name
Args:
module (str): Name of module
method (str): Name of method
Returns:
str: Library export name
"""
if len(module) > 0:
export_name = self.prefix + module + self.module + method + self.postfix
else:
export_name = method + self.postfix_nm
return export_name
#################################
# Init
#################################
def init_thermo(self, eos, mixing, alpha, comps, nphases,
liq_vap_discr_method=None, csp_eos=None, csp_ref_comp=None,
kij_ref="Default", alpha_ref="Default", saft_ref="Default",
b_exponent=None, TrendEosForCp=None, cptype=None,
silent=None):
"""Initialize thermopack
Args:
eos (str): Equation of state
mixing (str): Mixture model for cubic eos
alpha (str): Alpha formulations for cubic EOS
comps (string): Comma separated list of components
nphases (int): Maximum number of phases considered during multi-phase flash calculations
liq_vap_discr_method (int, optional): Method to discriminate between liquid and vapor in case of an undefined single phase. Defaults to None.
csp_eos (str, optional): Corrensponding state equation. Defaults to None.
csp_ref_comp (str, optional): CSP reference component. Defaults to None.
kij_ref (str, optional): Data set identifiers. Defaults to "Default".
alpha_ref (str, optional): Data set identifiers. Defaults to "Default".
saft_ref (str, optional): Data set identifiers. Defaults to "Default".
b_exponent (float, optional): Exponent used in co-volume mixing. Defaults to None.
TrendEosForCp (str, optional): Option to init trend for ideal gas properties. Defaults to None.
cptype (int array, optional): Equation type number for Cp. Defaults to None.
silent (bool, optional): Supress messages during init?. Defaults to None.
"""
self.activate()
self.nc = max(len(comps.split(" ")), len(comps.split(",")))
null_pointer = POINTER(c_int)()
eos_c = c_char_p(eos.encode('ascii'))
eos_len = c_len_type(len(eos))
mixing_c = c_char_p(mixing.encode('ascii'))
mixing_len = c_len_type(len(mixing))
alpha_c = c_char_p(alpha.encode('ascii'))
alpha_len = c_len_type(len(alpha))
comp_string_c = c_char_p(comps.encode('ascii'))
comp_string_len = c_len_type(len(comps))
nphases_c = c_int(nphases)
if liq_vap_discr_method is None:
liq_vap_discr_method_c = null_pointer
else:
liq_vap_discr_method_c = POINTER(c_int)(c_int(liq_vap_discr_method))
if csp_eos is None:
csp_eos_c = c_char_p()
csp_eos_len = c_len_type(0)
else:
csp_eos_c = c_char_p(csp_eos.encode('ascii'))
csp_eos_len = c_len_type(len(csp_eos))
if csp_ref_comp is None:
csp_ref_comp_c = c_char_p()
csp_ref_comp_len = c_len_type(0)
else:
csp_ref_comp_c = c_char_p(csp_ref_comp.encode('ascii'))
csp_ref_comp_len = c_len_type(len(csp_ref_comp))
kij_ref_len = c_len_type(len(kij_ref))
kij_ref_c = c_char_p(kij_ref.encode('ascii'))
alpha_ref_len = c_len_type(len(alpha_ref))
alpha_ref_c = c_char_p(alpha_ref.encode('ascii'))
saft_ref_len = c_len_type(len(saft_ref))
saft_ref_c = c_char_p(saft_ref.encode('ascii'))
if b_exponent is None:
b_exponent_c = POINTER(c_double)()
else:
b_exponent_c = POINTER(c_double)(c_double(b_exponent))
if TrendEosForCp is None:
TrendEosForCp_c = c_char_p()
TrendEosForCp_len = c_len_type(0)
else:
TrendEosForCp_c = c_char_p(TrendEosForCp.encode('ascii'))
TrendEosForCp_len = c_len_type(len(TrendEosForCp))
if cptype is None:
cptype_c = null_pointer
else:
cptype_c = (c_int * self.nc)(*cptype)
if silent is None:
silent_c = null_pointer
else:
if silent:
silent_int = 1
else:
silent_int = 0
silent_c = POINTER(c_int)(c_int(silent_int))
self.eoslibinit_init_thermo.argtypes = [c_char_p,
c_char_p,
c_char_p,
c_char_p,
POINTER( c_int ),
POINTER( c_int ),
c_char_p,
c_char_p,
c_char_p,
c_char_p,
c_char_p,
POINTER( c_double ),
c_char_p,
POINTER( c_int ),
POINTER( c_int ),
c_len_type, c_len_type,
c_len_type, c_len_type,
c_len_type, c_len_type,
c_len_type, c_len_type,
c_len_type, c_len_type]
self.eoslibinit_init_thermo.restype = None
self.eoslibinit_init_thermo(eos_c,
mixing_c,
alpha_c,
comp_string_c,
byref(nphases_c),
liq_vap_discr_method_c,
csp_eos_c,
csp_ref_comp_c,
kij_ref_c,
alpha_ref_c,
saft_ref_c,
b_exponent_c,
TrendEosForCp_c,
cptype_c,
silent_c,
eos_len,
mixing_len,
alpha_len,
comp_string_len,
csp_eos_len,
csp_ref_comp_len,
kij_ref_len,
alpha_ref_len,
saft_ref_len,
TrendEosForCp_len)
def init_peneloux_volume_translation(self, parameter_reference="Default"):
"""Initialialize Peneloux volume translations
Args:
parameter_reference (str): String defining parameter set, Defaults to "Default"
"""
self.activate()
volume_trans_model = "PENELOUX"
volume_trans_model_c = c_char_p(volume_trans_model.encode('ascii'))
volume_trans_model_len = c_len_type(len(volume_trans_model))
ref_string_c = c_char_p(parameter_reference.encode('ascii'))
ref_string_len = c_len_type(len(parameter_reference))
self.eoslibinit_init_volume_translation.argtypes = [c_char_p,
c_char_p,
c_len_type,
c_len_type]
self.eoslibinit_init_volume_translation.restype = None
self.eoslibinit_init_volume_translation(volume_trans_model_c,
ref_string_c,
volume_trans_model_len,
ref_string_len)
def redefine_critical_parameters(self, silent=True):
"""Recalculate critical properties of pure fluids
Args:
silent (bool): Ignore warnings? Defaults to True
"""
self.activate()
if silent:
silent_c = c_int(1)
else:
silent_c = c_int(0)
self.eoslibinit_redefine_critical_parameters.argtypes = [ POINTER( c_int ) ]
self.eoslibinit_redefine_critical_parameters.restype = None
self.eoslibinit_redefine_critical_parameters(byref(silent_c))
#################################
# Solids
#################################
def init_solid(self, scomp):
"""Initialize pure solid
Args:
scomp (str): Component name
"""
self.activate()
scomp_c = c_char_p(scomp.encode('ascii'))
scomp_len = c_len_type(len(scomp))
self.solideos_solid_init.argtypes = [c_char_p, c_len_type]
self.solideos_solid_init.restype = None
self.solideos_solid_init(scomp_c, scomp_len)
#################################
# Utility
#################################
def getcompindex(self, comp):
"""Get component index
Args:
comp (str): Component name
Returns:
int: Component FORTRAN index
"""
self.activate()
comp_c = c_char_p(comp.encode('ascii'))
comp_len = c_len_type(len(comp))
self.s_compdata_compindex.argtypes = [c_char_p, c_len_type]
self.s_compdata_compindex.restype = c_int
idx = self.s_compdata_compindex(comp_c, comp_len)
return idx
def get_comp_name(self, index):
"""Get component name
Args:
int: Component FORTRAN index
Returns:
comp (str): Component name
"""
self.activate()
comp_len = 40
comp_c = c_char_p(b" " * comp_len)
comp_len_c = c_len_type(comp_len)
index_c = c_int(index)
self.s_compdata_compname.argtypes = [POINTER(c_int), c_char_p, c_len_type]
self.s_compdata_compname.restype = None
self.s_compdata_compname(byref(index_c), comp_c, comp_len_c)
compname = comp_c.value.decode('ascii').strip()
return compname
def compmoleweight(self, comp):
"""Get component mole weight (g/mol)
Args:
comp (int): Component FORTRAN index
Returns:
float: Component mole weight (g/mol)
"""
self.activate()
comp_c = c_int(comp)
self.s_eos_compmoleweight.argtypes = [POINTER( c_int )]
self.s_eos_compmoleweight.restype = c_double
mw_i = self.s_eos_compmoleweight(byref(comp_c))
return mw_i
def get_phase_flags(self):
"""Get phase identifiers used by thermopack
Returns:
int: Phase int identifiers
"""
iTWOPH = c_int()
iLIQPH = c_int()
iVAPPH = c_int()
iMINGIBBSPH = c_int()
iSINGLEPH = c_int()
iSOLIDPH = c_int()
iFAKEPH = c_int()
self.s_get_phase_flags.argtypes = [POINTER( c_int ),
POINTER( c_int ),
POINTER( c_int ),
POINTER( c_int ),
POINTER( c_int ),
POINTER( c_int ),
POINTER( c_int )]
self.s_get_phase_flags.restype = None
self.s_get_phase_flags(byref(iTWOPH),
byref(iLIQPH),
byref(iVAPPH),
byref(iMINGIBBSPH),
byref(iSINGLEPH),
byref(iSOLIDPH),
byref(iFAKEPH))
self.TWOPH = iTWOPH.value
self.LIQPH = iLIQPH.value
self.VAPPH = iVAPPH.value
self.MINGIBBSPH = iMINGIBBSPH.value
self.SINGLEPH = iSINGLEPH.value
self.SOLIDPH = iSOLIDPH.value
self.FAKEPH = iFAKEPH.value
def get_phase_type(self, i_phase):
"""Get phase type
Args:
i_phase (int): Phase flag returned by thermopack
Returns:
str: Phase type
"""
phase_string_list = ["TWO_PHASE", "LIQUID", "VAPOR", "MINIMUM_GIBBS", "SINGLE", "SOLID", "FAKE"]
return phase_string_list[i_phase]
def set_tmin(self, temp):
"""Set minimum temperature in Thermopack. Used to limit search
domain for numerical solvers.
Args:
temp (float): Temperature (K)
"""
self.minimum_temperature_c.value = temp
def get_tmin(self):
"""Get minimum temperature in Thermopack. Used to limit search
domain for numerical solvers.
Returns:
float: Temperature (K)
"""
temp = self.minimum_temperature_c.value
return temp
def set_pmin(self, press):
"""Get minimum pressure in Thermopack. Used to limit search
domain for numerical solvers.
Args:
press (float): Pressure (Pa)
"""
self.minimum_pressure_c.value = press
#################################
# Phase properties
#################################
def specific_volume(self, temp, press, x, phase, dvdt=None, dvdp=None, dvdn=None):
""" Calculate single-phase specific volume
Note that the order of the output match the default order of input for the differentials.
Note further that dvdt, dvdp and dvdn only are flags to enable calculation.
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
x (array_like): Molar composition
phase (int): Calcualte root for specified phase
dvdt (logical, optional): Calculate volume differentials with respect to temperature while pressure and composition are held constant. Defaults to None.
dvdp (logical, optional): Calculate volume differentials with respect to pressure while temperature and composition are held constant. Defaults to None.
dvdn (logical, optional): Calculate volume differentials with respect to mol numbers while pressure and temperature are held constant. Defaults to None.
Returns:
float: Specific volume (m3/mol), and optionally differentials
"""
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
v_c = c_double(0.0)
if dvdt is None:
dvdt_c = null_pointer
else:
dvdt_c = POINTER(c_double)(c_double(0.0))
if dvdp is None:
dvdp_c = null_pointer
else:
dvdp_c = POINTER(c_double)(c_double(0.0))
if dvdn is None:
dvdn_c = null_pointer
else:
dvdn_c = (c_double * len(x))(0.0)
self.s_eos_specificvolume.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_eos_specificvolume.restype = None
self.s_eos_specificvolume(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
byref(v_c),
dvdt_c,
dvdp_c,
dvdn_c)
return_tuple = (v_c.value, )
if not dvdt is None:
return_tuple += (dvdt_c[0], )
if not dvdp is None:
return_tuple += (dvdp_c[0], )
if not dvdn is None:
return_tuple += (np.array(dvdn_c), )
return return_tuple
def zfac(self,temp,press,x,phase,dzdt=None,dzdp=None,dzdn=None):
""" Calculate single-phase compressibility
Note that the order of the output match the default order of input for the differentials.
Note further that dzdt, dzdp and dzdn only are flags to enable calculation.
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
x (array_like): Molar composition
phase (int): Calcualte root for specified phase
dzdt (logical, optional): Calculate compressibility differentials with respect to temperature while pressure and composition are held constant. Defaults to None.
dzdp (logical, optional): Calculate compressibility differentials with respect to pressure while temperature and composition are held constant. Defaults to None.
dzdn (logical, optional): Calculate compressibility differentials with respect to mol numbers while pressure and temperature are held constant. Defaults to None.
Returns:
float: Compressibility (-), and optionally differentials
"""
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
z_c = c_double(0.0)
if dzdt is None:
dzdt_c = null_pointer
else:
dzdt_c = POINTER(c_double)(c_double(0.0))
if dzdp is None:
dzdp_c = null_pointer
else:
dzdp_c = POINTER(c_double)(c_double(0.0))
if dzdn is None:
dzdn_c = null_pointer
else:
dzdn_c = (c_double * len(x))(0.0)
self.s_eos_zfac.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_eos_zfac.restype = None
self.s_eos_zfac(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
byref(z_c),
dzdt_c,
dzdp_c,
dzdn_c)
return_tuple = (z_c.value, )
if not dzdt is None:
return_tuple += (dzdt_c[0], )
if not dzdp is None:
return_tuple += (dzdp_c[0], )
if not dzdn is None:
return_tuple += (np.array(dzdn_c), )
return return_tuple
def thermo(self,temp,press,x,phase,dlnfugdt=None,dlnfugdp=None,
dlnfugdn=None,ophase=None,v=None):
""" Calculate logarithm of fugacity coefficient given composition,
temperature and pressure.
Note that the order of the output match the default order of input for the differentials.
Note further that dlnfugdt, dlnfugdp, dlnfugdn and ophase only are flags to enable calculation.
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
x (array_like): Molar composition (.)
phase (int): Calcualte root for specified phase
dlnfugdt (logical, optional): Calculate fugacity coefficient differentials with respect to temperature while pressure and composition are held constant. Defaults to None.
dlnfugdp (logical, optional): Calculate fugacity coefficient differentials with respect to pressure while temperature and composition are held constant. Defaults to None.
dlnfugdn (logical, optional): Calculate fugacity coefficient differentials with respect to mol numbers while pressure and temperature are held constant. Defaults to None.
ophase (int, optional): Phase flag. Only set when phase=MINGIBBSPH.
v (float, optional): Specific volume (m3/mol)
Returns:
ndarray: fugacity coefficient (-), and optionally differentials
"""
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
lnfug_c = (c_double * len(x))(0.0)
if dlnfugdt is None:
dlnfugdt_c = null_pointer
else:
dlnfugdt_c = (c_double * len(x))(0.0)
if dlnfugdp is None:
dlnfugdp_c = null_pointer
else:
dlnfugdp_c = (c_double * len(x))(0.0)
if dlnfugdn is None:
dlnfugdn_c = null_pointer
else:
dlnfugdn_c = (c_double * len(x)**2)(0.0)
if ophase is None:
ophase_c = POINTER(c_int)()
else:
ophase_c = POINTER(c_int)(c_int(0))
metaExtremum_c = POINTER(c_int)()
if v is None:
v_c = null_pointer
else:
v_c = POINTER(c_double)(c_double(0.0))
self.s_eos_thermo.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int ),
POINTER( c_double )]
self.s_eos_thermo.restype = None
self.s_eos_thermo(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
lnfug_c,
dlnfugdt_c,
dlnfugdp_c,
dlnfugdn_c,
ophase_c,
metaExtremum_c,
v_c)
return_tuple = (np.array(lnfug_c), )
if not dlnfugdt is None:
return_tuple += (np.array(dlnfugdt_c), )
if not dlnfugdp is None:
return_tuple += (np.array(dlnfugdp_c), )
if not dlnfugdn is None:
dlnfugdn_r = np.zeros((len(x),len(x)))
for i in range(len(x)):
for j in range(len(x)):
dlnfugdn_r[i][j] = dlnfugdn_c[i+j*len(x)]
return_tuple += (dlnfugdn_r, )
if not ophase is None:
return_tuple += (ophase_c[0], )
if not v is None:
return_tuple += (v_c[0], )
return return_tuple
def enthalpy(self,temp,press,x,phase,dhdt=None,dhdp=None,dhdn=None):
""" Calculate specific single-phase enthalpy
Note that the order of the output match the default order of input for the differentials.
Note further that dhdt, dhdp and dhdn only are flags to enable calculation.
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
x (array_like): Molar composition
phase (int): Calcualte root for specified phase
dhdt (logical, optional): Calculate enthalpy differentials with respect to temperature while pressure and composition are held constant. Defaults to None.
dhdp (logical, optional): Calculate enthalpy differentials with respect to pressure while temperature and composition are held constant. Defaults to None.
dhdn (logical, optional): Calculate enthalpy differentials with respect to mol numbers while pressure and temperature are held constant. Defaults to None.
Returns:
float: Specific enthalpy (J/mol), and optionally differentials
"""
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
h_c = c_double(0.0)
if dhdt is None:
dhdt_c = null_pointer
else:
dhdt_c = POINTER(c_double)(c_double(0.0))
if dhdp is None:
dhdp_c = null_pointer
else:
dhdp_c = POINTER(c_double)(c_double(0.0))
if dhdn is None:
dhdn_c = null_pointer
else:
dhdn_c = (c_double * len(x))(0.0)
residual_c = POINTER(c_int)()
self.s_eos_enthalpy.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_eos_enthalpy.restype = None
self.s_eos_enthalpy(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
byref(h_c),
dhdt_c,
dhdp_c,
dhdn_c,
residual_c)
return_tuple = (h_c.value, )
if not dhdt is None:
return_tuple += (dhdt_c[0], )
if not dhdp is None:
return_tuple += (dhdp_c[0], )
if not dhdn is None:
return_tuple += (np.array(dhdn_c), )
return return_tuple
def entropy(self,temp,press,x,phase,dsdt=None,dsdp=None,dsdn=None):
""" Calculate specific single-phase entropy
Note that the order of the output match the default order of input for the differentials.
Note further that dsdt, dhsp and dsdn only are flags to enable calculation.
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
x (array_like): Molar composition
phase (int): Calcualte root for specified phase
dsdt (logical, optional): Calculate entropy differentials with respect to temperature while pressure and composition are held constant. Defaults to None.
dsdp (logical, optional): Calculate entropy differentials with respect to pressure while temperature and composition are held constant. Defaults to None.
dsdn (logical, optional): Calculate entropy differentials with respect to mol numbers while pressure and temperature are held constant. Defaults to None.
Returns:
float: Specific entropy (J/mol/K), and optionally differentials
"""
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
s_c = c_double(0.0)
if dsdt is None:
dsdt_c = null_pointer
else:
dsdt_c = POINTER(c_double)(c_double(0.0))
if dsdp is None:
dsdp_c = null_pointer
else:
dsdp_c = POINTER(c_double)(c_double(0.0))
if dsdn is None:
dsdn_c = null_pointer
else:
dsdn_c = (c_double * len(x))(0.0)
residual_c = POINTER(c_int)()
self.s_eos_entropy.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_eos_entropy.restype = None
self.s_eos_entropy(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
byref(s_c),
dsdt_c,
dsdp_c,
dsdn_c,
residual_c)
return_tuple = (s_c.value, )
if not dsdt is None:
return_tuple += (dsdt_c[0], )
if not dsdp is None:
return_tuple += (dsdp_c[0], )
if not dsdn is None:
return_tuple += (np.array(dsdn_c), )
return return_tuple
def idealenthalpysingle(self,temp,press,j,dhdt=None,dhdp=None):
""" Calculate specific ideal enthalpy
Note that the order of the output match the default order of input for the differentials.
Note further that dhdt, and dhdp only are flags to enable calculation.
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
x (array_like): Molar composition
phase (int): Calcualte root for specified phase
dhdt (logical, optional): Calculate ideal enthalpy differentials with respect to temperature while pressure and composition are held constant. Defaults to None.
dhdp (logical, optional): Calculate ideal enthalpy differentials with respect to pressure while temperature and composition are held constant. Defaults to None.
Returns:
float: Specific ideal enthalpy (J/mol), and optionally differentials
"""
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
j_c = c_int(j)
h_c = c_double(0.0)
if dhdt is None:
dhdt_c = null_pointer
else:
dhdt_c = POINTER(c_double)(c_double(0.0))
if dhdp is None:
dhdp_c = null_pointer
else:
dhdp_c = POINTER(c_double)(c_double(0.0))
self.s_eos_idealenthalpysingle.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_eos_idealenthalpysingle.restype = None
self.s_eos_idealenthalpysingle(byref(temp_c),
byref(press_c),
byref(j_c),
byref(h_c),
dhdt_c,
dhdp_c)
return_tuple = (h_c.value, )
if not dhdt is None:
return_tuple += (dhdt_c[0], )
if not dhdp is None:
return_tuple += (dhdp_c[0], )
return return_tuple
def speed_of_sound(self,temp,press,x,y,z,betaV,betaL,phase):
"""Calculate speed of sound for single phase or two phase mixture assuming
mechanical, thermal and chemical equilibrium.
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
x (array_like): Liquid molar composition
y (array_like): Gas molar composition
z (array_like): Overall molar composition
betaV (float): Molar gas phase fraction
betaL (float): Molar liquid phase fraction
phase (int): Calcualte root for specified phase
Returns:
float: Speed of sound (m/s)
"""
self.activate()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
y_c = (c_double * len(y))(*y)
z_c = (c_double * len(z))(*z)
betaV_c = c_double(betaV)
betaL_c = c_double(betaL)
phase_c = c_int(phase)
ph_c = POINTER(c_int)()
self.s_sos_sound_velocity_2ph.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int )]
self.s_sos_sound_velocity_2ph.restype = c_double
sos = self.s_sos_sound_velocity_2ph(byref(temp_c),
byref(press_c),
x_c,
y_c,
z_c,
byref(betaV_c),
byref(betaL_c),
byref(phase_c),
ph_c)
return sos
#################################
# Flash interfaces
#################################
def set_ph_tolerance(self, tol):
"""Set tolerance of isobaric-isentalpic (PH) flash
Args:
tol (float): Tolerance
"""
tol_c = c_double(tol)
self.s_set_ph_tolerance.argtypes = [POINTER( c_double )]
self.s_set_ph_tolerance.restype = None
self.s_set_ph_tolerance(byref(tol_c))
def two_phase_tpflash(self,temp,press,z):
"""Do isothermal-isobaric (TP) flash
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
z (array_like): Overall molar composition
Returns:
x (ndarray): Liquid molar composition
y (ndarray): Gas molar composition
betaV (float): Molar gas phase fraction
betaL (float): Molar liquid phase fraction
phase (int): Phase identifier (iTWOPH/iLIQPH/iVAPPH)
"""
self.activate()
temp_c = c_double(temp)
press_c = c_double(press)
z_c = (c_double * len(z))(*z)
x_c = (c_double * len(z))(0.0)
y_c = (c_double * len(z))(0.0)
betaV_c = c_double(0.0)
betaL_c = c_double(0.0)
phase_c = c_int(0)
self.s_twophasetpflash.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double )]
self.s_twophasetpflash.restype = None
self.s_twophasetpflash(byref(temp_c),
byref(press_c),
z_c,
byref(betaV_c),
byref(betaL_c),
byref(phase_c),
x_c,
y_c)
x = np.array(x_c)
y = np.array(y_c)
return x, y, betaV_c.value, betaL_c.value, phase_c.value
def two_phase_psflash(self,press,z,entropy,temp=None):
"""Do isentropic-isobaric (SP) flash
Args:
press (float): Pressure (Pa)
z (array_like): Overall molar composition
entropy (float): Specific entropy (J/mol/K)
temp (float, optional): Initial guess for temperature (K)
Returns:
temp (float): Temperature (K)
x (ndarray): Liquid molar composition
y (ndarray): Gas molar composition
betaV (float): Molar gas phase fraction
betaL (float): Molar liquid phase fraction
phase (int): Phase identifier (iTWOPH/iLIQPH/iVAPPH)
"""
self.activate()
press_c = c_double(press)
z_c = (c_double * len(z))(*z)
s_c = c_double(entropy)
if not temp is None:
temp_c = POINTER( c_double )(c_double(temp))
else:
temp_c = POINTER( c_double )(c_double(0.0))
x_c = (c_double * len(z))(0.0)
y_c = (c_double * len(z))(0.0)
betaV_c = c_double(0.0)
betaL_c = c_double(0.0)
phase_c = c_int(0)
ierr_c = c_int(0)
self.s_psflash_twophase.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int )]
self.s_psflash_twophase.restype = None
self.s_psflash_twophase(temp_c,
byref(press_c),
z_c,
byref(betaV_c),
byref(betaL_c),
x_c,
y_c,
byref(s_c),
byref(phase_c),
byref(ierr_c))
if ierr_c.value != 0:
raise Exception("PS flash calclualtion failed")
x = np.array(x_c)
y = np.array(y_c)
return temp_c[0], x, y, betaV_c.value, betaL_c.value, phase_c.value
def two_phase_phflash(self,press,z,enthalpy,temp=None):
"""Do isenthalpic-isobaric (HP) flash
Args:
press (float): Pressure (Pa)
z (array_like): Overall molar composition
enthalpy (float): Specific enthalpy (J/mol)
temp (float, optional): Initial guess for temperature (K)
Returns:
temp (float): Temperature (K)
x (ndarray): Liquid molar composition
y (ndarray): Gas molar composition
betaV (float): Molar gas phase fraction
betaL (float): Molar liquid phase fraction
phase (int): Phase identifier (iTWOPH/iLIQPH/iVAPPH)
"""
self.activate()
press_c = c_double(press)
z_c = (c_double * len(z))(*z)
h_c = c_double(enthalpy)
if not temp is None:
temp_c = POINTER( c_double )(c_double(temp))
else:
temp_c = POINTER( c_double )(c_double(0.0))
x_c = (c_double * len(z))(0.0)
y_c = (c_double * len(z))(0.0)
betaV_c = c_double(0.0)
betaL_c = c_double(0.0)
phase_c = c_int(0)
ierr_c = c_int(0)
self.s_phflash_twophase.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int )]
self.s_phflash_twophase.restype = None
self.s_phflash_twophase(temp_c,
byref(press_c),
z_c,
byref(betaV_c),
byref(betaL_c),
x_c,
y_c,
byref(h_c),
byref(phase_c),
byref(ierr_c))
if ierr_c.value != 0:
raise Exception("PH flash calclualtion failed")
x = np.array(x_c)
y = np.array(y_c)
return temp_c[0], x, y, betaV_c.value, betaL_c.value, phase_c.value
def two_phase_uvflash(self,z,specific_energy,specific_volume,temp=None,press=None):
"""Do isoenergetic-isochoric (UV) flash
Args:
press (float): Pressure (Pa)
z (array_like): Overall molar composition
specific_energy (float): Specific energy (J/mol)
specific_volume (float): Specific volume (m3/mol)
temp (float, optional): Initial guess for temperature (K)
press (float, optional): Initial guess for pressure (Pa)
Returns:
temp (float): Temperature (K)
press (float): Pressure (Pa)
x (ndarray): Liquid molar composition
y (ndarray): Gas molar composition
betaV (float): Molar gas phase fraction
betaL (float): Molar liquid phase fraction
phase (int): Phase identifier (iTWOPH/iLIQPH/iVAPPH)
"""
self.activate()
z_c = (c_double * len(z))(*z)
e_c = c_double(specific_energy)
v_c = c_double(specific_volume)
if not temp is None:
temp_c = POINTER( c_double )(c_double(temp))
else:
temp_c = POINTER( c_double )(c_double(0.0))
if not press is None:
press_c = POINTER( c_double )(c_double(press))
else:
press_c = POINTER( c_double )(c_double(0.0))
x_c = (c_double * len(z))(0.0)
y_c = (c_double * len(z))(0.0)
betaV_c = c_double(0.0)
betaL_c = c_double(0.0)
phase_c = c_int(0)
self.s_uvflash_twophase.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_uvflash_twophase(temp_c,
press_c,
z_c,
byref(betaV_c),
byref(betaL_c),
x_c,
y_c,
byref(e_c),
byref(v_c),
byref(phase_c))
x = np.array(x_c)
y = np.array(y_c)
return temp_c[0], press_c[0], x, y, betaV_c.value, betaL_c.value, phase_c.value
def guess_phase(self, temp, press, z):
"""If only one root exsist for the equation of state the phase type can be
determined from either the psedo-critical volume or a volume ratio to the co-volume
Args:
temp (float): Temperature (K)
press (float): Pressure (Pa)
Returns:
int: Phase int (VAPPH or LIQPH)
"""
self.activate()
temp_c = c_double(temp)
press_c = c_double(press)
z_c = (c_double * len(z))(*z)
null_pointer = POINTER(c_double)()
temp_comp_c = null_pointer
press_comp_c = null_pointer
vb_ratio_c = null_pointer
self.s_guess_phase.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_guess_phase.restype = c_int
phase = self.s_guess_phase(byref(temp_c),
byref(press_c),
z_c,
temp_comp_c,
press_comp_c,
vb_ratio_c)
return phase
#################################
# Temperature-volume property interfaces
#################################
def pressure_tv(self, temp, volume, n, dpdt=None, dpdv=None, dpdn=None):
"""Calculate pressure given temperature, volume and mol numbers.
Args:
temp (float): Temperature (K)
volume (float): Volume (m3)
n (array_like): Mol numbers (mol)
dpdt (No type, optional): Flag to activate calculation. Defaults to None.
dpdv (No type, optional): Flag to activate calculation. Defaults to None.
dpdn (No type, optional): Flag to activate calculation. Defaults to None.
Returns:
float: Pressure (Pa)
Optionally pressure differentials
"""
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dpdt is None:
dpdt_c = null_pointer
else:
dpdt_c = POINTER(c_double)(c_double(0.0))
if dpdv is None:
dpdv_c = null_pointer
else:
dpdv_c = POINTER(c_double)(c_double(0.0))
d2pdv2_c = null_pointer
if dpdn is None:
dpdn_c = null_pointer
else:
dpdn_c = (c_double * len(n))(0.0)
recalculate_c = POINTER(c_int)(c_int(1))
self.s_pressure_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_pressure_tv.restype = c_double
P = self.s_pressure_tv(byref(temp_c),
byref(v_c),
n_c,
dpdv_c,
dpdt_c,
d2pdv2_c,
dpdn_c,
recalculate_c)
return_tuple = (P, )
if not dpdt is None:
return_tuple += (dpdt_c[0], )
if not dpdv is None:
return_tuple += (dpdv_c[0], )
if not dpdn is None:
return_tuple += (np.array(dpdn_c), )
return return_tuple
def internal_energy_tv(self, temp, volume, n, dedt=None, dedv=None):
"""Calculate internal energy given temperature, volume and mol numbers.
Args:
temp (float): Temperature (K)
volume (float): Volume (m3)
n (array_like): Mol numbers (mol)
dedt (No type, optional): Flag to activate calculation. Defaults to None.
dedv (No type, optional): Flag to activate calculation. Defaults to None.
Returns:
float: Energy (J)
Optionally energy differentials
"""
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
e_c = c_double(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dedt is None:
dedt_c = null_pointer
else:
dedt_c = POINTER(c_double)(c_double(0.0))
if dedv is None:
dedv_c = null_pointer
else:
dedv_c = POINTER(c_double)(c_double(0.0))
recalculate_c = POINTER(c_int)(c_int(1))
self.s_internal_energy_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_internal_energy_tv.restype = None
self.s_internal_energy_tv(byref(temp_c),
byref(v_c),
n_c,
byref(e_c),
dedt_c,
dedv_c,
recalculate_c)
return_tuple = (e_c.value, )
if not dedt is None:
return_tuple += (dedt_c[0], )
if not dedv is None:
return_tuple += (dedv_c[0], )
return return_tuple
def entropy_tv(self, temp, volume, n, dsdt=None, dsdv=None, dsdn=None):
"""Calculate entropy given temperature, volume and mol numbers.
Args:
temp (float): Temperature (K)
volume (float): Volume (m3)
n (array_like): Mol numbers (mol)
dsdt (No type, optional): Flag to activate calculation. Defaults to None.
dsdv (No type, optional): Flag to activate calculation. Defaults to None.
dsdn (No type, optional): Flag to activate calculation. Defaults to None.
Returns:
float: Entropy (J/K)
Optionally entropy differentials
"""
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
s_c = c_double(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dsdt is None:
dsdt_c = null_pointer
else:
dsdt_c = POINTER(c_double)(c_double(0.0))
if dsdv is None:
dsdv_c = null_pointer
else:
dsdv_c = POINTER(c_double)(c_double(0.0))
if dsdn is None:
dsdn_c = null_pointer
else:
dsdn_c = (c_double * len(n))(0.0)
residual_c = POINTER(c_int)(c_int(0))
self.s_entropy_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_entropy_tv.restype = None
self.s_entropy_tv(byref(temp_c),
byref(v_c),
n_c,
byref(s_c),
dsdt_c,
dsdv_c,
dsdn_c,
residual_c)
return_tuple = (s_c.value, )
if not dsdt is None:
return_tuple += (dsdt_c[0], )
if not dsdv is None:
return_tuple += (dsdv_c[0], )
if not dsdn is None:
return_tuple += (np.array(dsdn_c), )
return return_tuple
def enthalpy_tv(self, temp, volume, n, dhdt=None, dhdv=None, dhdn=None):
"""Calculate enthalpy given temperature, volume and mol numbers.
Args:
temp (float): Temperature (K)
volume (float): Volume (m3)
n (array_like): Mol numbers (mol)
dhdt (No type, optional): Flag to activate calculation. Defaults to None.
dhdv (No type, optional): Flag to activate calculation. Defaults to None.
dhdn (No type, optional): Flag to activate calculation. Defaults to None.
Returns:
float: Enthalpy (J)
Optionally enthalpy differentials
"""
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
h_c = c_double(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dhdt is None:
dhdt_c = null_pointer
else:
dhdt_c = POINTER(c_double)(c_double(0.0))
if dhdv is None:
dhdv_c = null_pointer
else:
dhdv_c = POINTER(c_double)(c_double(0.0))
if dhdn is None:
dhdn_c = null_pointer
else:
dhdn_c = (c_double * len(n))(0.0)
residual_c = POINTER(c_int)(c_int(0))
self.s_enthalpy_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_enthalpy_tv.restype = None
self.s_enthalpy_tv(byref(temp_c),
byref(v_c),
n_c,
byref(h_c),
dhdt_c,
dhdv_c,
dhdn_c,
residual_c)
return_tuple = (h_c.value, )
if not dhdt is None:
return_tuple += (dhdt_c[0], )
if not dhdv is None:
return_tuple += (dhdv_c[0], )
if not dhdn is None:
return_tuple += (np.array(dhdn_c), )
return return_tuple
def helmholtz_tv(self, temp, volume, n, dadt=None, dadv=None):
"""Calculate Helmholtz energy given temperature, volume and mol numbers.
Args:
temp (float): Temperature (K)
volume (float): Volume (m3)
n (array_like): Mol numbers (mol)
dadt (No type, optional): Flag to activate calculation. Defaults to None.
dadv (No type, optional): Flag to activate calculation. Defaults to None.
Returns:
float: Helmholtz energy (J)
Optionally energy differentials
"""
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
a_c = c_double(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dadt is None:
dadt_c = null_pointer
else:
dadt_c = POINTER(c_double)(c_double(0.0))
if dadv is None:
dadv_c = null_pointer
else:
dadv_c = POINTER(c_double)(c_double(0.0))
d2adt2_c = null_pointer
d2adv2_c = null_pointer
d2advdt_c = null_pointer
recalculate_c = POINTER(c_int)(c_int(1))
self.s_helmholtz_energy.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_helmholtz_energy.restype = None
self.s_helmholtz_energy(byref(temp_c),
byref(v_c),
n_c,
byref(a_c),
dadt_c,
dadv_c,
d2adt2_c,
d2adv2_c,
d2advdt_c,
recalculate_c)
return_tuple = (a_c.value, )
if not dadt is None:
return_tuple += (dadt_c[0], )
if not dadv is None:
return_tuple += (dadv_c[0], )
return return_tuple
def chemical_potential_tv(self, temp, volume, n, dmudt=None, dmudv=None, dmudn=None):
"""Calculate chemical potential given temperature, volume and mol numbers.
Args:
temp (float): Temperature (K)
volume (float): Volume (m3)
n (array_like): Mol numbers (mol)
dmudt (No type, optional): Flag to activate calculation. Defaults to None.
dmudv (No type, optional): Flag to activate calculation. Defaults to None.
dmudn (No type, optional): Flag to activate calculation. Defaults to None.
Returns:
float: Chemical potential (J/mol)
Optionally chemical potential differentials
"""
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
mu_c = (c_double * len(n))(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dmudt is None:
dmudt_c = null_pointer
else:
dmudt_c = (c_double * len(n))(0.0)
if dmudv is None:
dmudv_c = null_pointer
else:
dmudv_c = (c_double * len(n))(0.0)
if dmudn is None:
dmudn_c = null_pointer
else:
dmudn_c = (c_double * len(n)**2)(0.0)
recalculate_c = POINTER(c_int)(c_int(1))
self.s_chempot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_chempot.restype = None
self.s_chempot(byref(temp_c),
byref(v_c),
n_c,
mu_c,
dmudv_c,
dmudt_c,
dmudn_c,
recalculate_c)
return_tuple = (np.array(mu_c), )
if not dmudt is None:
return_tuple += (np.array(dmudt_c), )
if not dmudv is None:
return_tuple += (np.array(dmudv_c), )
if not dmudn is None:
dmudn = np.zeros((len(n), len(n)))
for i in range(len(n)):
for j in range(len(n)):
dmudn[i][j] = dmudn_c[i + j*len(n)]
return_tuple += (np.array(dmudn), )
return return_tuple
def fugacity_tv(self, temp, volume, n, dlnphidt=None, dlnphidv=None, dlnphidn=None):
"""Calculate natural logarithm of fugacity given temperature, volume and mol numbers.
Args:
temp (float): Temperature (K)
volume (float): Volume (m3)
n (array_like): Mol numbers (mol)
dlnphidt (No type, optional): Flag to activate calculation. Defaults to None.
dlnphidv (No type, optional): Flag to activate calculation. Defaults to None.
dlnphidn (No type, optional): Flag to activate calculation. Defaults to None.
Returns:
ndarry: Natural logarithm of fugacity
Optionally differentials
"""
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
lnphi_c = (c_double * len(n))(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dlnphidt is None:
dlnphidt_c = null_pointer
else:
dlnphidt_c = (c_double * len(n))(0.0)
if dlnphidv is None:
dlnphidv_c = null_pointer
else:
dlnphidv_c = (c_double * len(n))(0.0)
if dlnphidn is None:
dlnphidn_c = null_pointer
else:
dlnphidn_c = (c_double * len(n)**2)(0.0)
self.s_lnphi_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_lnphi_tv.restype = None
self.s_lnphi_tv(byref(temp_c),
byref(v_c),
n_c,
lnphi_c,
dlnphidt_c,
dlnphidv_c,
dlnphidn_c)
return_tuple = (np.array(lnphi_c), )
if not dlnphidt is None:
return_tuple += (np.array(dlnphidt_c), )
if not dlnphidv is None:
return_tuple += (np.array(dlnphidv_c), )
if not dlnphidn is None:
dlnphidn = np.zeros((len(n),len(n)))
for i in range(len(n)):
for j in range(len(n)):
dlnphidn[i][j] = dlnphidn_c[i + j*len(n)]
return_tuple += (dlnphidn, )
return return_tuple
#################################
# Saturation interfaces
#################################
def bubble_temperature(self, press, z):
"""Calculate bubble temperature given pressure and composition
Args:
press (float): Pressure (Pa)
z (array_like): Composition (-)
Raises:
Exception: Faild to calculate
Returns:
float: Temperature (K)
ndarray: Incipient phase composition
"""
self.activate()
press_c = c_double(press)
y_c = (c_double * len(z))(0.0)
z_c = (c_double * len(z))(*z)
ierr_c = c_int(0)
self.s_bubble_t.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_bubble_t.restype = c_double
temp = self.s_bubble_t(byref(press_c),
z_c,
y_c,
byref(ierr_c))
y = np.array(y_c)
if ierr_c.value != 0:
raise Exception("bubble_temperature calclualtion failed")
return temp, y
def bubble_pressure(self, temp, z):
"""Calculate bubble pressure given temperature and composition
Args:
temp (float): Temperature (K)
z (array_like): Composition (-)
Raises:
Exception: Faild to calculate
Returns:
float: Pressure (Pa)
ndarray: Incipient phase composition
"""
self.activate()
temp_c = c_double(temp)
y_c = (c_double * len(z))(0.0)
z_c = (c_double * len(z))(*z)
ierr_c = c_int(0)
self.s_bubble_p.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_bubble_p.restype = c_double
press = self.s_bubble_p(byref(temp_c),
z_c,
y_c,
byref(ierr_c))
y = np.array(y_c)
if ierr_c.value != 0:
raise Exception("bubble_pressure calclualtion failed")
return press, y
def dew_temperature(self,press,z):
"""Calculate dew temperature given pressure and composition
Args:
temp (float): Pressure (Pa)
z (float): Compositon (-)
Raises:
Exception: Not able to solve for dew point
Returns:
float : Temperature (K)
ndarray : Incipient phase composition (-)
"""
self.activate()
press_c = c_double(press)
x_c = (c_double * len(z))(0.0)
z_c = (c_double * len(z))(*z)
ierr_c = c_int(0)
self.s_dew_t.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_dew_t.restype = c_double
temp = self.s_dew_t(byref(press_c),
x_c,
z_c,
byref(ierr_c))
x = np.array(x_c)
if ierr_c.value != 0:
raise Exception("dew_temperature calclualtion failed")
return temp, x
def dew_pressure(self,temp,z):
"""Calculate dew pressure given temperature and composition
Args:
temp (float): Temperature (K)
z (float): Compositon (-)
Raises:
Exception: Not able to solve for dew point
Returns:
float : Pressure (Pa)
ndarray : Incipient phase composition (-)
"""
self.activate()
temp_c = c_double(temp)
x_c = (c_double * len(z))(0.0)
z_c = (c_double * len(z))(*z)
ierr_c = c_int(0)
self.s_dew_p.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_dew_p.restype = c_double
press = self.s_dew_p(byref(temp_c),
x_c,
z_c,
byref(ierr_c))
x = np.array(x_c)
if ierr_c.value != 0:
raise Exception("bubble_pressure calclualtion failed")
return press, x
def get_envelope_twophase(self, initial_pressure, z, maximum_pressure=1.5e7,
minimum_temperature=None, step_size=None,
calc_v=False):
"""Get the phase-envelope
Args:
initial_pressure (float): Start mapping form dew point at initial pressure (Pa).
z (array_like): Composition (-)
maximum_pressure (float , optional): Exit on maximum pressure (Pa). Defaults to 1.5e7.
minimum_temperature (float , optional): Exit on minimum pressure (Pa). Defaults to None.
step_size (float , optional): Tune step size of envelope trace. Defaults to None.
calc_v (bool, optional): Calculate specifc volume of saturated phase? Defaults to False
Returns:
ndarray: Temperature values (K)
ndarray: Pressure values (Pa)
ndarray (optional): Specific volume (m3/mol)
"""
self.activate()
nmax = 1000
z_c = (c_double * len(z))(*z)
temp_c = c_double(0.0)
press_c = c_double(initial_pressure)
spec_c = c_int(1)
beta_in_c = c_double(1.0)
max_press_c = c_double(maximum_pressure)
nmax_c = c_int(nmax)
Ta_c = (c_double * nmax)(0.0)
Pa_c = (c_double * nmax)(0.0)
Ki_c = (c_double * (nmax*len(z)))(0.0)
beta_c = (c_double * nmax)(0.0)
n_c = c_int(0)
null_pointer = POINTER(c_double)()
criconden_c = null_pointer
crit_c = null_pointer
if step_size is None:
ds_c = null_pointer
else:
ds_c = POINTER(c_double)(c_double(step_size))
exitOnTriplePoint_c = POINTER(c_int)()
if minimum_temperature is None:
tme_c = null_pointer
else:
tme_c = POINTER(c_double)(c_double(minimum_temperature))
self.s_envelope_plot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double )]
self.s_envelope_plot.restype = None
self.s_envelope_plot(z_c,
byref(temp_c),
byref(press_c),
byref(spec_c),
byref(beta_in_c),
byref(max_press_c),
byref(nmax_c),
Ta_c,
Pa_c,
Ki_c,
beta_c,
byref(n_c),
criconden_c,
crit_c,
ds_c,
exitOnTriplePoint_c,
tme_c)
t_vals = np.array(Ta_c[0:n_c.value])
p_vals = np.array(Pa_c[0:n_c.value])
return_tuple = (t_vals, p_vals)
if calc_v:
# Special treatment for single phase
if np.amax(z) == 1:
t_vals_single = np.zeros(2*n_c.value)
p_vals_single = np.zeros(2*n_c.value)
v_vals_single = np.zeros_like(t_vals_single)
for i in range(n_c.value):
t_vals_single[i] = t_vals[i]
t_vals_single[-i-1] = t_vals[i]
p_vals_single[i] = p_vals[i]
p_vals_single[-i-1] = p_vals[i]
v_vals_single[i], = self.specific_volume(t_vals[i], p_vals[i], z, self.VAPPH)
v_vals_single[-i-1], = self.specific_volume(t_vals[i], p_vals[i], z, self.LIQPH)
return_tuple = (t_vals_single, p_vals_single, v_vals_single)
else:
v_vals = np.zeros_like(t_vals)
for i in range(n_c.value):
if beta_c[i] > 0.5:
phase = self.VAPPH
else:
phase = self.LIQPH
v_vals[i], = self.specific_volume(t_vals[i], p_vals[i], z, phase)
return_tuple += (v_vals, )
return return_tuple
def get_binary_pxy(self,
temp,
maximum_pressure=1.5e7,
minimum_pressure=1.0e5,
maximum_dz = 0.003,
maximum_dlns=0.01):
"""Calculate binary three phase envelope
Args:
temp (float): Temperature (K)
maximum_pressure (float, optional): Exit on maximum pressure (Pa). Defaults to 1.5e7.
minimum_pressure (float, optional): Exit on minimum pressure (Pa). Defaults to 1.0e5.
maximum_dz (float, optional): [description]. Defaults to 0.003.
maximum_dlns (float, optional): [description]. Defaults to 0.01.
Returns:
tuple of arrays: LLE, L1VE, L2VE
"""
# Redefinition of module parameter:
self.activate()
nmax = 10000
#c_int.in_dll(self.tp, self.get_export_name("binaryplot", "maxpoints")).value
temp_c = c_double(temp)
min_temp_c = c_double(0.0)
ispec_c = c_int(1)
press_c = c_double(0.0)
max_press_c = c_double(maximum_pressure)
min_press_c = c_double(minimum_pressure)
dz_max_c = c_double(maximum_dz)
dlns_max_c = c_double(maximum_dlns)
filename = "binaryVLLE.dat"
filename_c = c_char_p(filename.encode('ascii'))
filename_len = c_len_type(len(filename))
res_c = (c_double * (nmax*9))(0.0)
nres_c = (c_int * 3)(0)
wsf_c = c_int(1)
self.s_binary_plot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_char_p ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int ),
POINTER( c_double ),
c_len_type]
self.s_binary_plot.restype = None
self.s_binary_plot(byref(temp_c),
byref(press_c),
byref(ispec_c),
byref(min_temp_c),
byref(max_press_c),
byref(dz_max_c),
filename_c,
byref(dlns_max_c),
res_c,
nres_c,
byref(wsf_c),
byref(min_press_c),
filename_len)
nLLE = nres_c[0]
nL1VE = nres_c[1]
nL2VE = nres_c[2]
if nLLE > 0:
xLLE = np.zeros(nLLE)
wLLE = np.zeros(nLLE)
pLLE = np.zeros(nLLE)
for i in range(nLLE):
xLLE[i] = res_c[i*9]
wLLE[i] = res_c[i*9+1]
pLLE[i] = res_c[i*9+2]
LLE = (xLLE, wLLE, pLLE)
else:
LLE = (None, None, None)
if nL1VE > 0:
xL1VE = np.zeros(nL1VE)
wL1VE = np.zeros(nL1VE)
pL1VE = np.zeros(nL1VE)
for i in range(nL1VE):
xL1VE[i] = res_c[i*9+3]
wL1VE[i] = res_c[i*9+4]
pL1VE[i] = res_c[i*9+5]
L1VE = (xL1VE, wL1VE, pL1VE)
else:
L1VE = (None, None, None)
if nL2VE > 0:
xL2VE = np.zeros(nL2VE)
wL2VE = np.zeros(nL2VE)
pL2VE = np.zeros(nL2VE)
for i in range(nL2VE):
xL2VE[i] = res_c[i*9+6]
wL2VE[i] = res_c[i*9+7]
pL2VE[i] = res_c[i*9+8]
L2VE = (xL2VE, wL2VE, pL2VE)
else:
L2VE = (None, None, None)
return LLE, L1VE, L2VE
def get_bp_term(self,
i_term):
"""Get error description for binary plot error
Args:
i_term (int): binary plot error identifyer
Returns:
str: Error message
"""
message_len = 50
message_c = c_char_p(b" " * message_len)
message_len = c_len_type(message_len)
i_term_c = c_int(i_term)
self.s_get_bp_term.argtypes = [POINTER( c_int ),
c_char_p,
c_len_type]
self.s_get_bp_term.restype = None
self.s_get_bp_term(byref(i_term_c),
message_c,
message_len)
message = message_c.value.decode('ascii')
return message
def global_binary_plot(self,
maximum_pressure=1.5e7,
minimum_pressure=1.0e5,
minimum_temperature=150.0,
maximum_temperature=500.0,
include_azeotropes=False):
"""Calculate global binary phase envelope
Args:
maximum_pressure (float, optional): Exit on maximum pressure (Pa). Defaults to 1.5e7.
minimum_pressure (float, optional): Exit on minimum pressure (Pa). Defaults to 1.0e5.
minimum_temperature (float, optional): Terminate phase line traceing at minimum temperature. Defaults to 150.0 K.
maximum_temperature (float, optional): Terminate phase line traceing at maximum temperature. Defaults to 500.0 K.
include_azeotropes (bool, optional): Include azeotropic lines. Defaults to False.
Returns:
tuple of arrays
"""
self.activate()
max_press_c = c_double(maximum_pressure)
min_press_c = c_double(minimum_pressure)
max_temp_c = c_double(maximum_temperature)
min_temp_c = c_double(minimum_temperature)
az_bool_c = c_int(1 if include_azeotropes else 0)
filename = "global_binary.dat"
filename_c = c_char_p(filename.encode('ascii'))
filename_len = c_len_type(len(filename))
i_term_c = c_int(0)
self.s_global_binary_plot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
c_char_p,
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_int ),
c_len_type]
self.s_global_binary_plot.restype = None
self.s_global_binary_plot(byref(min_press_c),
byref(max_press_c),
byref(min_temp_c),
filename_c,
byref(i_term_c),
byref(max_temp_c),
byref(az_bool_c),
filename_len)
if not i_term_c.value == 0:
message = self.get_bp_term(i_term_c.value)
print(message)
# Load file with filename and read into arrays
return plotutils.get_globa_binary_data(filename)
def solid_envelope_plot(self, initial_pressure, z, maximum_pressure=1.5e7,
minimum_temperature=170.0, calc_esv=False):
"""Calculate phase envelope including solid lines
Args:
initial_pressure (float): Start mapping from initial pressure (Pa).
z (array_like): Composition (-)
maximum_pressure (float , optional): Exit on maximum pressure (Pa). Defaults to 1.5e7.
calc_esv (bool, optional): Calculate specifc volume of saturated phase? Defaults to False
Returns:
tuple of arrays
"""
self.activate()
z_c = (c_double * len(z))(*z)
temp_c = c_double(0.0)
press_c = c_double(initial_pressure)
max_press_c = c_double(maximum_pressure)
filename = "solid_envelope.dat"
filename_c = c_char_p(filename.encode('ascii'))
filename_len = c_len_type(len(filename))
i_spec_c = c_int(1)
esv_bool_c = c_int(1 if calc_esv else 0)
min_t = self.get_tmin()
self.set_tmin(minimum_temperature)
self.s_solid_envelope_plot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
c_char_p,
POINTER( c_int ),
c_len_type]
self.s_solid_envelope_plot.restype = None
self.s_solid_envelope_plot(z_c,
byref(temp_c),
byref(press_c),
byref(i_spec_c),
byref(max_press_c),
filename_c,
byref(esv_bool_c),
filename_len)
self.set_tmin(min_t)
#if .not. i_term_c.value == 0:
# message = self.get_bp_term(iTerm)
# print(message)
# Load file with filename and read into lists....
return plotutils.get_solid_envelope_data(filename)
def get_isotherm(self,
temp,
z,
minimum_pressure=1.0e5,
maximum_pressure=1.5e7,
nmax=100):
"""Get iso-therm at specified temperature
Args:
temp (float): Temperature (K)
z (array_like): Composition (-)
minimum_pressure (float, optional): Map to minimum pressure. Defaults to 1.0e5. (Pa)
maximum_pressure (float, optional): Map to maximum pressure. Defaults to 1.5e7. (Pa)
nmax (int, optional): Maximum number of points on iso-therm. Defaults to 100.
Returns:
Multiple numpy arrays.
"""
self.activate()
temp_c = c_double(temp)
minimum_pressure_c = c_double(minimum_pressure)
maximum_pressure_c = c_double(maximum_pressure)
z_c = (c_double * len(z))(*z)
va_c = (c_double * nmax)(0.0)
pa_c = (c_double * nmax)(0.0)
sa_c = (c_double * nmax)(0.0)
ha_c = (c_double * nmax)(0.0)
nmax_c = c_int(nmax)
na_c = c_int(0)
self.s_isotherm.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_isotherm.restype = None
self.s_isotherm(byref(temp_c),
byref(minimum_pressure_c),
byref(maximum_pressure_c),
z_c,
byref(nmax_c),
pa_c,
va_c,
sa_c,
ha_c,
byref(na_c))
p_vals = np.array(pa_c[0:na_c.value])
v_vals = np.array(va_c[0:na_c.value])
s_vals = np.array(sa_c[0:na_c.value])
h_vals = np.array(ha_c[0:na_c.value])
return p_vals, v_vals, s_vals, h_vals
def get_isobar(self,
press,
z,
minimum_temperature=200.0,
maximum_temperature=500.0,
nmax=100):
"""Get isobar at specified pressure.
Args:
press (float): Pressure (Pa)
z (array_like): Composition (-)
minimum_temperature (float, optional): Minimum temperature. Defaults to 200.0. (K)
maximum_temperature (float, optional): Maximum temperature. Defaults to 500.0. (K)
nmax (int, optional): Maximum number of points on iso-bar. Defaults to 100.
Returns:
Multiple numpy arrays.
"""
self.activate()
press_c = c_double(press)
minimum_temperature_c = c_double(minimum_temperature)
maximum_temperature_c = c_double(maximum_temperature)
z_c = (c_double * len(z))(*z)
va_c = (c_double * nmax)(0.0)
ta_c = (c_double * nmax)(0.0)
sa_c = (c_double * nmax)(0.0)
ha_c = (c_double * nmax)(0.0)
nmax_c = c_int(nmax)
na_c = c_int(0)
self.s_isobar.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_isobar.restype = None
self.s_isobar(byref(press_c),
byref(minimum_temperature_c),
byref(maximum_temperature_c),
z_c,
byref(nmax_c),
ta_c,
va_c,
sa_c,
ha_c,
byref(na_c))
t_vals = np.array(ta_c[0:na_c.value])
v_vals = np.array(va_c[0:na_c.value])
s_vals = np.array(sa_c[0:na_c.value])
h_vals = np.array(ha_c[0:na_c.value])
return t_vals, v_vals, s_vals, h_vals
def get_isenthalp(self,
enthalpy,
z,
minimum_pressure=1.0e5,
maximum_pressure=1.5e7,
minimum_temperature=200.0,
maximum_temperature=500.0,
nmax=100):
"""Get isenthalpy given specified enthalpy.
Args:
enthalpy (float): Enthalpy (J/mol)
z (array_like): Composition (-)
minimum_pressure (float, optional): Minimum pressure. Defaults to 1.0e5. (Pa)
maximum_pressure (float, optional): Maximum pressure. Defaults to 1.5e7. (Pa)
minimum_temperature (float, optional): Minimum temperature. Defaults to 200.0. (K)
maximum_temperature (float, optional): Maximum temperature. Defaults to 500.0. (K)
nmax (int, optional): Maximum number of points on isenthalp. Defaults to 100.
Returns:
Multiple numpy arrays.
"""
self.activate()
enthalpy_c = c_double(enthalpy)
minimum_pressure_c = c_double(minimum_pressure)
maximum_pressure_c = c_double(maximum_pressure)
minimum_temperature_c = c_double(minimum_temperature)
maximum_temperature_c = c_double(maximum_temperature)
z_c = (c_double * len(z))(*z)
va_c = (c_double * nmax)(0.0)
ta_c = (c_double * nmax)(0.0)
sa_c = (c_double * nmax)(0.0)
pa_c = (c_double * nmax)(0.0)
nmax_c = c_int(nmax)
na_c = c_int(0)
self.s_isenthalp.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_isenthalp.restype = None
self.s_isenthalp(byref(enthalpy_c),
byref(minimum_pressure_c),
byref(maximum_pressure_c),
byref(minimum_temperature_c),
byref(maximum_temperature_c),
z_c,
byref(nmax_c),
pa_c,
va_c,
sa_c,
ta_c,
byref(na_c))
t_vals = np.array(ta_c[0:na_c.value])
v_vals = np.array(va_c[0:na_c.value])
s_vals = np.array(sa_c[0:na_c.value])
p_vals = np.array(pa_c[0:na_c.value])
return t_vals, p_vals, v_vals, s_vals
def get_isentrope(self,
entropy,
z,
minimum_pressure=1.0e5,
maximum_pressure=1.5e7,
minimum_temperature=200.0,
maximum_temperature=500.0,
nmax=100):
"""Get isentrope at specified entropy.
Args:
entropy (float): Entropy (J/mol/K)
z (array_like): Composition (-)
minimum_pressure (float, optional): Minimum pressure. Defaults to 1.0e5. (Pa)
maximum_pressure (float, optional): Maximum pressure. Defaults to 1.5e7. (Pa)
minimum_temperature (float, optional): Minimum temperature. Defaults to 200.0. (K)
maximum_temperature (float, optional): Maximum temperature. Defaults to 500.0. (K)
nmax (int, optional): Maximum number of points on isentrope. Defaults to 100.
Returns:
Multiple numpy arrays.
"""
self.activate()
entropy_c = c_double(entropy)
minimum_pressure_c = c_double(minimum_pressure)
maximum_pressure_c = c_double(maximum_pressure)
minimum_temperature_c = c_double(minimum_temperature)
maximum_temperature_c = c_double(maximum_temperature)
z_c = (c_double * len(z))(*z)
va_c = (c_double * nmax)(0.0)
ta_c = (c_double * nmax)(0.0)
ha_c = (c_double * nmax)(0.0)
pa_c = (c_double * nmax)(0.0)
nmax_c = c_int(nmax)
na_c = c_int(0)
self.s_isentrope.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_isentrope.restype = None
self.s_isentrope(byref(entropy_c),
byref(minimum_pressure_c),
byref(maximum_pressure_c),
byref(minimum_temperature_c),
byref(maximum_temperature_c),
z_c,
byref(nmax_c),
pa_c,
va_c,
ha_c,
ta_c,
byref(na_c))
t_vals = np.array(ta_c[0:na_c.value])
v_vals = np.array(va_c[0:na_c.value])
h_vals = np.array(ha_c[0:na_c.value])
p_vals = np.array(pa_c[0:na_c.value])
return t_vals, p_vals, v_vals, h_vals
#################################
# Stability interfaces
#################################
def critical(self, n, temp=0.0, v=0.0, tol=1.0e-7):
"""Calculate critical point in variables T and V
Args:
n (array_like): Mol numbers (mol)
temp (float, optional): Initial guess for temperature (K). Defaults to 0.0.
v (float, optional): Initial guess for volume (m3). Defaults to 0.0.
tol (float, optional): Error tolerance (-). Defaults to 1.0e-8.
Raises:
Exception: Failure to solve for critcal point
Returns:
float: Temperature (K)
float: Volume (m3)
float: Pressure (Pa)
"""
self.activate()
temp_c = c_double(temp)
v_c = c_double(v)
n_c = (c_double * len(n))(*n)
ierr_c = c_int(0)
P_c = c_double(0.0)
tol_c = c_double(tol)
self.s_crit_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double )]
self.s_crit_tv.restype = None
self.s_crit_tv(byref(temp_c),
byref(v_c),
n_c,
byref(ierr_c),
byref(tol_c),
byref(P_c))
if ierr_c.value != 0:
raise Exception("critical calclualtion failed")
return temp_c.value, v_c.value, P_c.value
#################################
# Virial interfaces
#################################
def virial_coeffcients(self, temp, n):
"""Calculate (composition-dependent) virial coefficients B and C,
defined as P/RT = rho + B*rho**2 + C*rho**3 + O(rho**4) as rho->0.
Args:
temp (float): Temperature
n (array_like): Mol numbers (mol)
Returns:
float: B (m3/mol)
float: C (m6/mol2)
"""
self.activate()
temp_c = POINTER( c_double )(c_double(temp))
n_c = (c_double * len(n))(*n)
B_c = c_double(0.0)
C_c = c_double(0.0)
self.s_virial_coeffcients.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_virial_coeffcients.restype = None
self.s_virial_coeffcients(temp_c,
n_c,
byref(B_c),
byref(C_c))
return B_c.value, C_c.value
def second_virial_matrix(self, temp):
"""Calculate composition-independent virial coefficients B,
defined as P = RT*rho + B*rho**2 + C*rho**3 + O(rho**4) as rho->0.
Including cross coefficients.
Args:
temp (float): Temperature (K)
Returns:
ndarray: B - Second virial coefficient matrix (m3/mol)
"""
self.activate()
temp_c = POINTER( c_double )(c_double(temp))
bmat_c = (c_double * self.nc**2)(0.0)
self.s_second_virial_matrix.argtypes = [POINTER( c_double ),
POINTER( c_double )]
self.s_second_virial_matrix.restype = None
self.s_second_virial_matrix(temp_c, bmat_c)
bmat = np.zeros((self.nc,self.nc))
for i in range(self.nc):
for j in range(self.nc):
bmat[i][j] = bmat_c[i+j*self.nc]
return bmat
def binary_third_virial_matrix(self, temp):
"""Calculate composition-independent virial coefficients C,
defined as P = RT*rho + B*rho**2 + C*rho**3 + O(rho**4) as rho->0.
Including cross coefficients
Currently the code only support binary mixtures
Args:
temp (float): Temperature (K)
Returns:
ndarray: C - Third virial coefficient matrix (m6/mol2)
"""
self.activate()
assert self.nc == 2
temp_c = POINTER( c_double )(c_double(temp))
cmat_c = (c_double * self.nc**2)(0.0)
self.s_binary_third_virial_matrix.argtypes = [POINTER( c_double ),
POINTER( c_double )]
self.s_binary_third_virial_matrix.restype = None
self.s_binary_third_virial_matrix(temp_c, cmat_c)
cmat = np.zeros((self.nc,self.nc))
for i in range(self.nc):
for j in range(self.nc):
cmat[i][j] = cmat_c[i+j*self.nc]
return cmat
| 39.389925 | 182 | 0.491498 |
from __future__ import print_function
import sys
from ctypes import *
from os import path
import numpy as np
from . import plotutils, utils, platform_specifics
if utils.gcc_major_version_greater_than(7):
c_len_type = c_size_t
else:
c_len_type = c_int
class thermopack(object):
def __init__(self):
pf_specifics = platform_specifics.get_platform_specifics()
self.prefix = pf_specifics["prefix"]
self.module = pf_specifics["module"]
self.postfix = pf_specifics["postfix"]
self.postfix_nm = pf_specifics["postfix_no_module"]
dyn_lib_path = path.join(path.dirname(__file__), pf_specifics["dyn_lib"])
self.tp = cdll.LoadLibrary(dyn_lib_path)
self.s_get_phase_flags = self.tp.get_phase_flags_c
self.get_phase_flags()
self.s_add_eos = getattr(self.tp, self.get_export_name("thermopack_var", "add_eos"))
self.s_delete_eos = getattr(self.tp, self.get_export_name("thermopack_var", "delete_eos"))
self.s_activate_model = getattr(self.tp, self.get_export_name("thermopack_var", "activate_model"))
self.s_get_model_id = getattr(self.tp, self.get_export_name("thermopack_var", "get_eos_identification"))
self.eoslibinit_init_thermo = getattr(self.tp, self.get_export_name("eoslibinit", "init_thermo"))
self.Rgas = c_double.in_dll(self.tp, self.get_export_name("thermopack_constants", "rgas")).value
self.nc = None
self.minimum_temperature_c = c_double.in_dll(self.tp, self.get_export_name("thermopack_constants", "tptmin"))
self.minimum_pressure_c = c_double.in_dll(self.tp, self.get_export_name("thermopack_constants", "tppmin"))
self.solideos_solid_init = getattr(self.tp, self.get_export_name("solideos", "solid_init"))
self.eoslibinit_init_volume_translation = getattr(self.tp, self.get_export_name("eoslibinit", "init_volume_translation"))
self.eoslibinit_redefine_critical_parameters = getattr(self.tp, self.get_export_name("eoslibinit", "redefine_critical_parameters"))
self.s_eos_specificvolume = getattr(self.tp, self.get_export_name("eos", "specificvolume"))
self.s_eos_zfac = getattr(self.tp, self.get_export_name("eos", "zfac"))
self.s_eos_thermo = getattr(self.tp, self.get_export_name("eos", "thermo"))
self.s_eos_entropy = getattr(self.tp, self.get_export_name("eos", "entropy"))
self.s_eos_enthalpy = getattr(self.tp, self.get_export_name("eos", "enthalpy"))
self.s_eos_compmoleweight = getattr(self.tp, self.get_export_name("eos", "compmoleweight"))
self.s_eos_idealenthalpysingle = getattr(self.tp, self.get_export_name("eos", "idealenthalpysingle"))
self.s_sos_sound_velocity_2ph = getattr(self.tp, self.get_export_name("speed_of_sound", "sound_velocity_2ph"))
self.s_compdata_compindex = getattr(self.tp, self.get_export_name("compdata", "comp_index_active"))
self.s_compdata_compname = getattr(self.tp, self.get_export_name("compdata", "comp_name_active"))
self.s_set_ph_tolerance = getattr(self.tp, self.get_export_name("ph_solver", "setphtolerance"))
self.s_twophasetpflash = getattr(self.tp, self.get_export_name("tp_solver", "twophasetpflash"))
self.s_psflash_twophase = getattr(self.tp, self.get_export_name("ps_solver", "twophasepsflash"))
self.s_uvflash_twophase = getattr(self.tp, self.get_export_name("uv_solver", "twophaseuvflash"))
self.s_phflash_twophase = getattr(self.tp, self.get_export_name("ph_solver", "twophasephflash"))
self.s_guess_phase = getattr(self.tp, self.get_export_name("thermo_utils", "guessphase"))
self.s_internal_energy_tv = getattr(self.tp, self.get_export_name("eostv", "internal_energy"))
self.s_entropy_tv = getattr(self.tp, self.get_export_name("eostv", "entropytv"))
self.s_pressure_tv = getattr(self.tp, self.get_export_name("eostv", "pressure"))
self.s_lnphi_tv = getattr(self.tp, self.get_export_name("eostv", "thermotv"))
self.s_enthalpy_tv = getattr(self.tp, self.get_export_name("eostv", "enthalpytv"))
self.s_helmholtz_energy = getattr(self.tp, self.get_export_name("eostv", "free_energy"))
self.s_chempot = getattr(self.tp, self.get_export_name("eostv", "chemical_potential"))
self.s_bubble_t = getattr(self.tp, self.get_export_name("saturation", "safe_bubt"))
self.s_bubble_p = getattr(self.tp, self.get_export_name("saturation", "safe_bubp"))
self.s_dew_t = getattr(self.tp, self.get_export_name("saturation", "safe_dewt"))
self.s_dew_p = getattr(self.tp, self.get_export_name("saturation", "safe_dewp"))
self.s_envelope_plot = getattr(self.tp, self.get_export_name("saturation_curve", "envelopeplot"))
self.s_binary_plot = getattr(self.tp, self.get_export_name("binaryplot", "vllebinaryxy"))
self.s_global_binary_plot = getattr(self.tp, self.get_export_name("binaryplot", "global_binary_plot"))
self.s_get_bp_term = getattr(self.tp, self.get_export_name("binaryplot", "get_bp_term"))
self.s_solid_envelope_plot = getattr(self.tp, self.get_export_name("solid_saturation", "solidenvelopeplot"))
self.s_isotherm = getattr(self.tp, self.get_export_name("isolines", "isotherm"))
self.s_isobar = getattr(self.tp, self.get_export_name("isolines", "isobar"))
self.s_isenthalp = getattr(self.tp, self.get_export_name("isolines", "isenthalp"))
self.s_isentrope = getattr(self.tp, self.get_export_name("isolines", "isentrope"))
self.s_crit_tv = getattr(self.tp, self.get_export_name("critical", "calccriticaltv"))
self.s_virial_coeffcients = getattr(self.tp, self.get_export_name("eostv", "virial_coefficients"))
self.s_second_virial_matrix = getattr(self.tp, self.get_export_name("eostv", "secondvirialcoeffmatrix"))
self.s_binary_third_virial_matrix = getattr(self.tp, self.get_export_name("eostv", "binarythirdvirialcoeffmatrix"))
self.add_eos()
def __del__(self):
self.delete_eos()
def activate(self):
self.s_activate_model.argtypes = [POINTER( c_int )]
self.s_activate_model.restype = None
self.s_activate_model(self.model_index_c)
def add_eos(self):
self.s_add_eos.argtypes = None
self.s_add_eos.restype = c_int
self.model_index_c = c_int(self.s_add_eos())
def delete_eos(self):
self.activate()
self.s_delete_eos.argtypes = [POINTER( c_int )]
self.s_delete_eos.restype = None
self.s_delete_eos(self.model_index_c)
def get_model_id(self):
self.activate()
eosid_len = 40
eosid_c = c_char_p(b" " * eosid_len)
eosid_len_c = c_len_type(eosid_len)
self.s_get_model_id.argtypes = [c_char_p, c_len_type]
self.s_get_model_id.restype = None
self.s_get_model_id(eosid_c, eosid_len_c)
eosid = eosid_c.value.decode('ascii').strip()
return eosid
def get_export_name(self, module, method):
if len(module) > 0:
export_name = self.prefix + module + self.module + method + self.postfix
else:
export_name = method + self.postfix_nm
return export_name
_discr_method))
if csp_eos is None:
csp_eos_c = c_char_p()
csp_eos_len = c_len_type(0)
else:
csp_eos_c = c_char_p(csp_eos.encode('ascii'))
csp_eos_len = c_len_type(len(csp_eos))
if csp_ref_comp is None:
csp_ref_comp_c = c_char_p()
csp_ref_comp_len = c_len_type(0)
else:
csp_ref_comp_c = c_char_p(csp_ref_comp.encode('ascii'))
csp_ref_comp_len = c_len_type(len(csp_ref_comp))
kij_ref_len = c_len_type(len(kij_ref))
kij_ref_c = c_char_p(kij_ref.encode('ascii'))
alpha_ref_len = c_len_type(len(alpha_ref))
alpha_ref_c = c_char_p(alpha_ref.encode('ascii'))
saft_ref_len = c_len_type(len(saft_ref))
saft_ref_c = c_char_p(saft_ref.encode('ascii'))
if b_exponent is None:
b_exponent_c = POINTER(c_double)()
else:
b_exponent_c = POINTER(c_double)(c_double(b_exponent))
if TrendEosForCp is None:
TrendEosForCp_c = c_char_p()
TrendEosForCp_len = c_len_type(0)
else:
TrendEosForCp_c = c_char_p(TrendEosForCp.encode('ascii'))
TrendEosForCp_len = c_len_type(len(TrendEosForCp))
if cptype is None:
cptype_c = null_pointer
else:
cptype_c = (c_int * self.nc)(*cptype)
if silent is None:
silent_c = null_pointer
else:
if silent:
silent_int = 1
else:
silent_int = 0
silent_c = POINTER(c_int)(c_int(silent_int))
self.eoslibinit_init_thermo.argtypes = [c_char_p,
c_char_p,
c_char_p,
c_char_p,
POINTER( c_int ),
POINTER( c_int ),
c_char_p,
c_char_p,
c_char_p,
c_char_p,
c_char_p,
POINTER( c_double ),
c_char_p,
POINTER( c_int ),
POINTER( c_int ),
c_len_type, c_len_type,
c_len_type, c_len_type,
c_len_type, c_len_type,
c_len_type, c_len_type,
c_len_type, c_len_type]
self.eoslibinit_init_thermo.restype = None
self.eoslibinit_init_thermo(eos_c,
mixing_c,
alpha_c,
comp_string_c,
byref(nphases_c),
liq_vap_discr_method_c,
csp_eos_c,
csp_ref_comp_c,
kij_ref_c,
alpha_ref_c,
saft_ref_c,
b_exponent_c,
TrendEosForCp_c,
cptype_c,
silent_c,
eos_len,
mixing_len,
alpha_len,
comp_string_len,
csp_eos_len,
csp_ref_comp_len,
kij_ref_len,
alpha_ref_len,
saft_ref_len,
TrendEosForCp_len)
def init_peneloux_volume_translation(self, parameter_reference="Default"):
self.activate()
volume_trans_model = "PENELOUX"
volume_trans_model_c = c_char_p(volume_trans_model.encode('ascii'))
volume_trans_model_len = c_len_type(len(volume_trans_model))
ref_string_c = c_char_p(parameter_reference.encode('ascii'))
ref_string_len = c_len_type(len(parameter_reference))
self.eoslibinit_init_volume_translation.argtypes = [c_char_p,
c_char_p,
c_len_type,
c_len_type]
self.eoslibinit_init_volume_translation.restype = None
self.eoslibinit_init_volume_translation(volume_trans_model_c,
ref_string_c,
volume_trans_model_len,
ref_string_len)
def redefine_critical_parameters(self, silent=True):
self.activate()
if silent:
silent_c = c_int(1)
else:
silent_c = c_int(0)
self.eoslibinit_redefine_critical_parameters.argtypes = [ POINTER( c_int ) ]
self.eoslibinit_redefine_critical_parameters.restype = None
self.eoslibinit_redefine_critical_parameters(byref(silent_c))
get_phase_flags.restype = None
self.s_get_phase_flags(byref(iTWOPH),
byref(iLIQPH),
byref(iVAPPH),
byref(iMINGIBBSPH),
byref(iSINGLEPH),
byref(iSOLIDPH),
byref(iFAKEPH))
self.TWOPH = iTWOPH.value
self.LIQPH = iLIQPH.value
self.VAPPH = iVAPPH.value
self.MINGIBBSPH = iMINGIBBSPH.value
self.SINGLEPH = iSINGLEPH.value
self.SOLIDPH = iSOLIDPH.value
self.FAKEPH = iFAKEPH.value
def get_phase_type(self, i_phase):
phase_string_list = ["TWO_PHASE", "LIQUID", "VAPOR", "MINIMUM_GIBBS", "SINGLE", "SOLID", "FAKE"]
return phase_string_list[i_phase]
def set_tmin(self, temp):
self.minimum_temperature_c.value = temp
def get_tmin(self):
temp = self.minimum_temperature_c.value
return temp
def set_pmin(self, press):
self.minimum_pressure_c.value = press
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_eos_specificvolume.restype = None
self.s_eos_specificvolume(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
byref(v_c),
dvdt_c,
dvdp_c,
dvdn_c)
return_tuple = (v_c.value, )
if not dvdt is None:
return_tuple += (dvdt_c[0], )
if not dvdp is None:
return_tuple += (dvdp_c[0], )
if not dvdn is None:
return_tuple += (np.array(dvdn_c), )
return return_tuple
def zfac(self,temp,press,x,phase,dzdt=None,dzdp=None,dzdn=None):
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
z_c = c_double(0.0)
if dzdt is None:
dzdt_c = null_pointer
else:
dzdt_c = POINTER(c_double)(c_double(0.0))
if dzdp is None:
dzdp_c = null_pointer
else:
dzdp_c = POINTER(c_double)(c_double(0.0))
if dzdn is None:
dzdn_c = null_pointer
else:
dzdn_c = (c_double * len(x))(0.0)
self.s_eos_zfac.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_eos_zfac.restype = None
self.s_eos_zfac(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
byref(z_c),
dzdt_c,
dzdp_c,
dzdn_c)
return_tuple = (z_c.value, )
if not dzdt is None:
return_tuple += (dzdt_c[0], )
if not dzdp is None:
return_tuple += (dzdp_c[0], )
if not dzdn is None:
return_tuple += (np.array(dzdn_c), )
return return_tuple
def thermo(self,temp,press,x,phase,dlnfugdt=None,dlnfugdp=None,
dlnfugdn=None,ophase=None,v=None):
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
lnfug_c = (c_double * len(x))(0.0)
if dlnfugdt is None:
dlnfugdt_c = null_pointer
else:
dlnfugdt_c = (c_double * len(x))(0.0)
if dlnfugdp is None:
dlnfugdp_c = null_pointer
else:
dlnfugdp_c = (c_double * len(x))(0.0)
if dlnfugdn is None:
dlnfugdn_c = null_pointer
else:
dlnfugdn_c = (c_double * len(x)**2)(0.0)
if ophase is None:
ophase_c = POINTER(c_int)()
else:
ophase_c = POINTER(c_int)(c_int(0))
metaExtremum_c = POINTER(c_int)()
if v is None:
v_c = null_pointer
else:
v_c = POINTER(c_double)(c_double(0.0))
self.s_eos_thermo.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int ),
POINTER( c_double )]
self.s_eos_thermo.restype = None
self.s_eos_thermo(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
lnfug_c,
dlnfugdt_c,
dlnfugdp_c,
dlnfugdn_c,
ophase_c,
metaExtremum_c,
v_c)
return_tuple = (np.array(lnfug_c), )
if not dlnfugdt is None:
return_tuple += (np.array(dlnfugdt_c), )
if not dlnfugdp is None:
return_tuple += (np.array(dlnfugdp_c), )
if not dlnfugdn is None:
dlnfugdn_r = np.zeros((len(x),len(x)))
for i in range(len(x)):
for j in range(len(x)):
dlnfugdn_r[i][j] = dlnfugdn_c[i+j*len(x)]
return_tuple += (dlnfugdn_r, )
if not ophase is None:
return_tuple += (ophase_c[0], )
if not v is None:
return_tuple += (v_c[0], )
return return_tuple
def enthalpy(self,temp,press,x,phase,dhdt=None,dhdp=None,dhdn=None):
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
h_c = c_double(0.0)
if dhdt is None:
dhdt_c = null_pointer
else:
dhdt_c = POINTER(c_double)(c_double(0.0))
if dhdp is None:
dhdp_c = null_pointer
else:
dhdp_c = POINTER(c_double)(c_double(0.0))
if dhdn is None:
dhdn_c = null_pointer
else:
dhdn_c = (c_double * len(x))(0.0)
residual_c = POINTER(c_int)()
self.s_eos_enthalpy.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_eos_enthalpy.restype = None
self.s_eos_enthalpy(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
byref(h_c),
dhdt_c,
dhdp_c,
dhdn_c,
residual_c)
return_tuple = (h_c.value, )
if not dhdt is None:
return_tuple += (dhdt_c[0], )
if not dhdp is None:
return_tuple += (dhdp_c[0], )
if not dhdn is None:
return_tuple += (np.array(dhdn_c), )
return return_tuple
def entropy(self,temp,press,x,phase,dsdt=None,dsdp=None,dsdn=None):
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
phase_c = c_int(phase)
s_c = c_double(0.0)
if dsdt is None:
dsdt_c = null_pointer
else:
dsdt_c = POINTER(c_double)(c_double(0.0))
if dsdp is None:
dsdp_c = null_pointer
else:
dsdp_c = POINTER(c_double)(c_double(0.0))
if dsdn is None:
dsdn_c = null_pointer
else:
dsdn_c = (c_double * len(x))(0.0)
residual_c = POINTER(c_int)()
self.s_eos_entropy.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_eos_entropy.restype = None
self.s_eos_entropy(byref(temp_c),
byref(press_c),
x_c,
byref(phase_c),
byref(s_c),
dsdt_c,
dsdp_c,
dsdn_c,
residual_c)
return_tuple = (s_c.value, )
if not dsdt is None:
return_tuple += (dsdt_c[0], )
if not dsdp is None:
return_tuple += (dsdp_c[0], )
if not dsdn is None:
return_tuple += (np.array(dsdn_c), )
return return_tuple
def idealenthalpysingle(self,temp,press,j,dhdt=None,dhdp=None):
self.activate()
null_pointer = POINTER(c_double)()
temp_c = c_double(temp)
press_c = c_double(press)
j_c = c_int(j)
h_c = c_double(0.0)
if dhdt is None:
dhdt_c = null_pointer
else:
dhdt_c = POINTER(c_double)(c_double(0.0))
if dhdp is None:
dhdp_c = null_pointer
else:
dhdp_c = POINTER(c_double)(c_double(0.0))
self.s_eos_idealenthalpysingle.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_eos_idealenthalpysingle.restype = None
self.s_eos_idealenthalpysingle(byref(temp_c),
byref(press_c),
byref(j_c),
byref(h_c),
dhdt_c,
dhdp_c)
return_tuple = (h_c.value, )
if not dhdt is None:
return_tuple += (dhdt_c[0], )
if not dhdp is None:
return_tuple += (dhdp_c[0], )
return return_tuple
def speed_of_sound(self,temp,press,x,y,z,betaV,betaL,phase):
self.activate()
temp_c = c_double(temp)
press_c = c_double(press)
x_c = (c_double * len(x))(*x)
y_c = (c_double * len(y))(*y)
z_c = (c_double * len(z))(*z)
betaV_c = c_double(betaV)
betaL_c = c_double(betaL)
phase_c = c_int(phase)
ph_c = POINTER(c_int)()
self.s_sos_sound_velocity_2ph.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int )]
self.s_sos_sound_velocity_2ph.restype = c_double
sos = self.s_sos_sound_velocity_2ph(byref(temp_c),
byref(press_c),
x_c,
y_c,
z_c,
byref(betaV_c),
byref(betaL_c),
byref(phase_c),
ph_c)
return sos
POINTER( c_double )]
self.s_twophasetpflash.restype = None
self.s_twophasetpflash(byref(temp_c),
byref(press_c),
z_c,
byref(betaV_c),
byref(betaL_c),
byref(phase_c),
x_c,
y_c)
x = np.array(x_c)
y = np.array(y_c)
return x, y, betaV_c.value, betaL_c.value, phase_c.value
def two_phase_psflash(self,press,z,entropy,temp=None):
self.activate()
press_c = c_double(press)
z_c = (c_double * len(z))(*z)
s_c = c_double(entropy)
if not temp is None:
temp_c = POINTER( c_double )(c_double(temp))
else:
temp_c = POINTER( c_double )(c_double(0.0))
x_c = (c_double * len(z))(0.0)
y_c = (c_double * len(z))(0.0)
betaV_c = c_double(0.0)
betaL_c = c_double(0.0)
phase_c = c_int(0)
ierr_c = c_int(0)
self.s_psflash_twophase.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int )]
self.s_psflash_twophase.restype = None
self.s_psflash_twophase(temp_c,
byref(press_c),
z_c,
byref(betaV_c),
byref(betaL_c),
x_c,
y_c,
byref(s_c),
byref(phase_c),
byref(ierr_c))
if ierr_c.value != 0:
raise Exception("PS flash calclualtion failed")
x = np.array(x_c)
y = np.array(y_c)
return temp_c[0], x, y, betaV_c.value, betaL_c.value, phase_c.value
def two_phase_phflash(self,press,z,enthalpy,temp=None):
self.activate()
press_c = c_double(press)
z_c = (c_double * len(z))(*z)
h_c = c_double(enthalpy)
if not temp is None:
temp_c = POINTER( c_double )(c_double(temp))
else:
temp_c = POINTER( c_double )(c_double(0.0))
x_c = (c_double * len(z))(0.0)
y_c = (c_double * len(z))(0.0)
betaV_c = c_double(0.0)
betaL_c = c_double(0.0)
phase_c = c_int(0)
ierr_c = c_int(0)
self.s_phflash_twophase.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int )]
self.s_phflash_twophase.restype = None
self.s_phflash_twophase(temp_c,
byref(press_c),
z_c,
byref(betaV_c),
byref(betaL_c),
x_c,
y_c,
byref(h_c),
byref(phase_c),
byref(ierr_c))
if ierr_c.value != 0:
raise Exception("PH flash calclualtion failed")
x = np.array(x_c)
y = np.array(y_c)
return temp_c[0], x, y, betaV_c.value, betaL_c.value, phase_c.value
def two_phase_uvflash(self,z,specific_energy,specific_volume,temp=None,press=None):
self.activate()
z_c = (c_double * len(z))(*z)
e_c = c_double(specific_energy)
v_c = c_double(specific_volume)
if not temp is None:
temp_c = POINTER( c_double )(c_double(temp))
else:
temp_c = POINTER( c_double )(c_double(0.0))
if not press is None:
press_c = POINTER( c_double )(c_double(press))
else:
press_c = POINTER( c_double )(c_double(0.0))
x_c = (c_double * len(z))(0.0)
y_c = (c_double * len(z))(0.0)
betaV_c = c_double(0.0)
betaL_c = c_double(0.0)
phase_c = c_int(0)
self.s_uvflash_twophase.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_uvflash_twophase(temp_c,
press_c,
z_c,
byref(betaV_c),
byref(betaL_c),
x_c,
y_c,
byref(e_c),
byref(v_c),
byref(phase_c))
x = np.array(x_c)
y = np.array(y_c)
return temp_c[0], press_c[0], x, y, betaV_c.value, betaL_c.value, phase_c.value
def guess_phase(self, temp, press, z):
self.activate()
temp_c = c_double(temp)
press_c = c_double(press)
z_c = (c_double * len(z))(*z)
null_pointer = POINTER(c_double)()
temp_comp_c = null_pointer
press_comp_c = null_pointer
vb_ratio_c = null_pointer
self.s_guess_phase.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_guess_phase.restype = c_int
phase = self.s_guess_phase(byref(temp_c),
byref(press_c),
z_c,
temp_comp_c,
press_comp_c,
vb_ratio_c)
return phase
NTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_pressure_tv.restype = c_double
P = self.s_pressure_tv(byref(temp_c),
byref(v_c),
n_c,
dpdv_c,
dpdt_c,
d2pdv2_c,
dpdn_c,
recalculate_c)
return_tuple = (P, )
if not dpdt is None:
return_tuple += (dpdt_c[0], )
if not dpdv is None:
return_tuple += (dpdv_c[0], )
if not dpdn is None:
return_tuple += (np.array(dpdn_c), )
return return_tuple
def internal_energy_tv(self, temp, volume, n, dedt=None, dedv=None):
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
e_c = c_double(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dedt is None:
dedt_c = null_pointer
else:
dedt_c = POINTER(c_double)(c_double(0.0))
if dedv is None:
dedv_c = null_pointer
else:
dedv_c = POINTER(c_double)(c_double(0.0))
recalculate_c = POINTER(c_int)(c_int(1))
self.s_internal_energy_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_internal_energy_tv.restype = None
self.s_internal_energy_tv(byref(temp_c),
byref(v_c),
n_c,
byref(e_c),
dedt_c,
dedv_c,
recalculate_c)
return_tuple = (e_c.value, )
if not dedt is None:
return_tuple += (dedt_c[0], )
if not dedv is None:
return_tuple += (dedv_c[0], )
return return_tuple
def entropy_tv(self, temp, volume, n, dsdt=None, dsdv=None, dsdn=None):
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
s_c = c_double(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dsdt is None:
dsdt_c = null_pointer
else:
dsdt_c = POINTER(c_double)(c_double(0.0))
if dsdv is None:
dsdv_c = null_pointer
else:
dsdv_c = POINTER(c_double)(c_double(0.0))
if dsdn is None:
dsdn_c = null_pointer
else:
dsdn_c = (c_double * len(n))(0.0)
residual_c = POINTER(c_int)(c_int(0))
self.s_entropy_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_entropy_tv.restype = None
self.s_entropy_tv(byref(temp_c),
byref(v_c),
n_c,
byref(s_c),
dsdt_c,
dsdv_c,
dsdn_c,
residual_c)
return_tuple = (s_c.value, )
if not dsdt is None:
return_tuple += (dsdt_c[0], )
if not dsdv is None:
return_tuple += (dsdv_c[0], )
if not dsdn is None:
return_tuple += (np.array(dsdn_c), )
return return_tuple
def enthalpy_tv(self, temp, volume, n, dhdt=None, dhdv=None, dhdn=None):
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
h_c = c_double(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dhdt is None:
dhdt_c = null_pointer
else:
dhdt_c = POINTER(c_double)(c_double(0.0))
if dhdv is None:
dhdv_c = null_pointer
else:
dhdv_c = POINTER(c_double)(c_double(0.0))
if dhdn is None:
dhdn_c = null_pointer
else:
dhdn_c = (c_double * len(n))(0.0)
residual_c = POINTER(c_int)(c_int(0))
self.s_enthalpy_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_enthalpy_tv.restype = None
self.s_enthalpy_tv(byref(temp_c),
byref(v_c),
n_c,
byref(h_c),
dhdt_c,
dhdv_c,
dhdn_c,
residual_c)
return_tuple = (h_c.value, )
if not dhdt is None:
return_tuple += (dhdt_c[0], )
if not dhdv is None:
return_tuple += (dhdv_c[0], )
if not dhdn is None:
return_tuple += (np.array(dhdn_c), )
return return_tuple
def helmholtz_tv(self, temp, volume, n, dadt=None, dadv=None):
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
a_c = c_double(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dadt is None:
dadt_c = null_pointer
else:
dadt_c = POINTER(c_double)(c_double(0.0))
if dadv is None:
dadv_c = null_pointer
else:
dadv_c = POINTER(c_double)(c_double(0.0))
d2adt2_c = null_pointer
d2adv2_c = null_pointer
d2advdt_c = null_pointer
recalculate_c = POINTER(c_int)(c_int(1))
self.s_helmholtz_energy.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_helmholtz_energy.restype = None
self.s_helmholtz_energy(byref(temp_c),
byref(v_c),
n_c,
byref(a_c),
dadt_c,
dadv_c,
d2adt2_c,
d2adv2_c,
d2advdt_c,
recalculate_c)
return_tuple = (a_c.value, )
if not dadt is None:
return_tuple += (dadt_c[0], )
if not dadv is None:
return_tuple += (dadv_c[0], )
return return_tuple
def chemical_potential_tv(self, temp, volume, n, dmudt=None, dmudv=None, dmudn=None):
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
mu_c = (c_double * len(n))(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dmudt is None:
dmudt_c = null_pointer
else:
dmudt_c = (c_double * len(n))(0.0)
if dmudv is None:
dmudv_c = null_pointer
else:
dmudv_c = (c_double * len(n))(0.0)
if dmudn is None:
dmudn_c = null_pointer
else:
dmudn_c = (c_double * len(n)**2)(0.0)
recalculate_c = POINTER(c_int)(c_int(1))
self.s_chempot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_chempot.restype = None
self.s_chempot(byref(temp_c),
byref(v_c),
n_c,
mu_c,
dmudv_c,
dmudt_c,
dmudn_c,
recalculate_c)
return_tuple = (np.array(mu_c), )
if not dmudt is None:
return_tuple += (np.array(dmudt_c), )
if not dmudv is None:
return_tuple += (np.array(dmudv_c), )
if not dmudn is None:
dmudn = np.zeros((len(n), len(n)))
for i in range(len(n)):
for j in range(len(n)):
dmudn[i][j] = dmudn_c[i + j*len(n)]
return_tuple += (np.array(dmudn), )
return return_tuple
def fugacity_tv(self, temp, volume, n, dlnphidt=None, dlnphidv=None, dlnphidn=None):
self.activate()
temp_c = c_double(temp)
v_c = c_double(volume)
lnphi_c = (c_double * len(n))(0.0)
n_c = (c_double * len(n))(*n)
null_pointer = POINTER(c_double)()
if dlnphidt is None:
dlnphidt_c = null_pointer
else:
dlnphidt_c = (c_double * len(n))(0.0)
if dlnphidv is None:
dlnphidv_c = null_pointer
else:
dlnphidv_c = (c_double * len(n))(0.0)
if dlnphidn is None:
dlnphidn_c = null_pointer
else:
dlnphidn_c = (c_double * len(n)**2)(0.0)
self.s_lnphi_tv.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double )]
self.s_lnphi_tv.restype = None
self.s_lnphi_tv(byref(temp_c),
byref(v_c),
n_c,
lnphi_c,
dlnphidt_c,
dlnphidv_c,
dlnphidn_c)
return_tuple = (np.array(lnphi_c), )
if not dlnphidt is None:
return_tuple += (np.array(dlnphidt_c), )
if not dlnphidv is None:
return_tuple += (np.array(dlnphidv_c), )
if not dlnphidn is None:
dlnphidn = np.zeros((len(n),len(n)))
for i in range(len(n)):
for j in range(len(n)):
dlnphidn[i][j] = dlnphidn_c[i + j*len(n)]
return_tuple += (dlnphidn, )
return return_tuple
e ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_bubble_p.restype = c_double
press = self.s_bubble_p(byref(temp_c),
z_c,
y_c,
byref(ierr_c))
y = np.array(y_c)
if ierr_c.value != 0:
raise Exception("bubble_pressure calclualtion failed")
return press, y
def dew_temperature(self,press,z):
self.activate()
press_c = c_double(press)
x_c = (c_double * len(z))(0.0)
z_c = (c_double * len(z))(*z)
ierr_c = c_int(0)
self.s_dew_t.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_dew_t.restype = c_double
temp = self.s_dew_t(byref(press_c),
x_c,
z_c,
byref(ierr_c))
x = np.array(x_c)
if ierr_c.value != 0:
raise Exception("dew_temperature calclualtion failed")
return temp, x
def dew_pressure(self,temp,z):
self.activate()
temp_c = c_double(temp)
x_c = (c_double * len(z))(0.0)
z_c = (c_double * len(z))(*z)
ierr_c = c_int(0)
self.s_dew_p.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_dew_p.restype = c_double
press = self.s_dew_p(byref(temp_c),
x_c,
z_c,
byref(ierr_c))
x = np.array(x_c)
if ierr_c.value != 0:
raise Exception("bubble_pressure calclualtion failed")
return press, x
def get_envelope_twophase(self, initial_pressure, z, maximum_pressure=1.5e7,
minimum_temperature=None, step_size=None,
calc_v=False):
self.activate()
nmax = 1000
z_c = (c_double * len(z))(*z)
temp_c = c_double(0.0)
press_c = c_double(initial_pressure)
spec_c = c_int(1)
beta_in_c = c_double(1.0)
max_press_c = c_double(maximum_pressure)
nmax_c = c_int(nmax)
Ta_c = (c_double * nmax)(0.0)
Pa_c = (c_double * nmax)(0.0)
Ki_c = (c_double * (nmax*len(z)))(0.0)
beta_c = (c_double * nmax)(0.0)
n_c = c_int(0)
null_pointer = POINTER(c_double)()
criconden_c = null_pointer
crit_c = null_pointer
if step_size is None:
ds_c = null_pointer
else:
ds_c = POINTER(c_double)(c_double(step_size))
exitOnTriplePoint_c = POINTER(c_int)()
if minimum_temperature is None:
tme_c = null_pointer
else:
tme_c = POINTER(c_double)(c_double(minimum_temperature))
self.s_envelope_plot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double )]
self.s_envelope_plot.restype = None
self.s_envelope_plot(z_c,
byref(temp_c),
byref(press_c),
byref(spec_c),
byref(beta_in_c),
byref(max_press_c),
byref(nmax_c),
Ta_c,
Pa_c,
Ki_c,
beta_c,
byref(n_c),
criconden_c,
crit_c,
ds_c,
exitOnTriplePoint_c,
tme_c)
t_vals = np.array(Ta_c[0:n_c.value])
p_vals = np.array(Pa_c[0:n_c.value])
return_tuple = (t_vals, p_vals)
if calc_v:
if np.amax(z) == 1:
t_vals_single = np.zeros(2*n_c.value)
p_vals_single = np.zeros(2*n_c.value)
v_vals_single = np.zeros_like(t_vals_single)
for i in range(n_c.value):
t_vals_single[i] = t_vals[i]
t_vals_single[-i-1] = t_vals[i]
p_vals_single[i] = p_vals[i]
p_vals_single[-i-1] = p_vals[i]
v_vals_single[i], = self.specific_volume(t_vals[i], p_vals[i], z, self.VAPPH)
v_vals_single[-i-1], = self.specific_volume(t_vals[i], p_vals[i], z, self.LIQPH)
return_tuple = (t_vals_single, p_vals_single, v_vals_single)
else:
v_vals = np.zeros_like(t_vals)
for i in range(n_c.value):
if beta_c[i] > 0.5:
phase = self.VAPPH
else:
phase = self.LIQPH
v_vals[i], = self.specific_volume(t_vals[i], p_vals[i], z, phase)
return_tuple += (v_vals, )
return return_tuple
def get_binary_pxy(self,
temp,
maximum_pressure=1.5e7,
minimum_pressure=1.0e5,
maximum_dz = 0.003,
maximum_dlns=0.01):
self.activate()
nmax = 10000
temp_c = c_double(temp)
min_temp_c = c_double(0.0)
ispec_c = c_int(1)
press_c = c_double(0.0)
max_press_c = c_double(maximum_pressure)
min_press_c = c_double(minimum_pressure)
dz_max_c = c_double(maximum_dz)
dlns_max_c = c_double(maximum_dlns)
filename = "binaryVLLE.dat"
filename_c = c_char_p(filename.encode('ascii'))
filename_len = c_len_type(len(filename))
res_c = (c_double * (nmax*9))(0.0)
nres_c = (c_int * 3)(0)
wsf_c = c_int(1)
self.s_binary_plot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_char_p ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_int ),
POINTER( c_double ),
c_len_type]
self.s_binary_plot.restype = None
self.s_binary_plot(byref(temp_c),
byref(press_c),
byref(ispec_c),
byref(min_temp_c),
byref(max_press_c),
byref(dz_max_c),
filename_c,
byref(dlns_max_c),
res_c,
nres_c,
byref(wsf_c),
byref(min_press_c),
filename_len)
nLLE = nres_c[0]
nL1VE = nres_c[1]
nL2VE = nres_c[2]
if nLLE > 0:
xLLE = np.zeros(nLLE)
wLLE = np.zeros(nLLE)
pLLE = np.zeros(nLLE)
for i in range(nLLE):
xLLE[i] = res_c[i*9]
wLLE[i] = res_c[i*9+1]
pLLE[i] = res_c[i*9+2]
LLE = (xLLE, wLLE, pLLE)
else:
LLE = (None, None, None)
if nL1VE > 0:
xL1VE = np.zeros(nL1VE)
wL1VE = np.zeros(nL1VE)
pL1VE = np.zeros(nL1VE)
for i in range(nL1VE):
xL1VE[i] = res_c[i*9+3]
wL1VE[i] = res_c[i*9+4]
pL1VE[i] = res_c[i*9+5]
L1VE = (xL1VE, wL1VE, pL1VE)
else:
L1VE = (None, None, None)
if nL2VE > 0:
xL2VE = np.zeros(nL2VE)
wL2VE = np.zeros(nL2VE)
pL2VE = np.zeros(nL2VE)
for i in range(nL2VE):
xL2VE[i] = res_c[i*9+6]
wL2VE[i] = res_c[i*9+7]
pL2VE[i] = res_c[i*9+8]
L2VE = (xL2VE, wL2VE, pL2VE)
else:
L2VE = (None, None, None)
return LLE, L1VE, L2VE
def get_bp_term(self,
i_term):
message_len = 50
message_c = c_char_p(b" " * message_len)
message_len = c_len_type(message_len)
i_term_c = c_int(i_term)
self.s_get_bp_term.argtypes = [POINTER( c_int ),
c_char_p,
c_len_type]
self.s_get_bp_term.restype = None
self.s_get_bp_term(byref(i_term_c),
message_c,
message_len)
message = message_c.value.decode('ascii')
return message
def global_binary_plot(self,
maximum_pressure=1.5e7,
minimum_pressure=1.0e5,
minimum_temperature=150.0,
maximum_temperature=500.0,
include_azeotropes=False):
self.activate()
max_press_c = c_double(maximum_pressure)
min_press_c = c_double(minimum_pressure)
max_temp_c = c_double(maximum_temperature)
min_temp_c = c_double(minimum_temperature)
az_bool_c = c_int(1 if include_azeotropes else 0)
filename = "global_binary.dat"
filename_c = c_char_p(filename.encode('ascii'))
filename_len = c_len_type(len(filename))
i_term_c = c_int(0)
self.s_global_binary_plot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
c_char_p,
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_int ),
c_len_type]
self.s_global_binary_plot.restype = None
self.s_global_binary_plot(byref(min_press_c),
byref(max_press_c),
byref(min_temp_c),
filename_c,
byref(i_term_c),
byref(max_temp_c),
byref(az_bool_c),
filename_len)
if not i_term_c.value == 0:
message = self.get_bp_term(i_term_c.value)
print(message)
return plotutils.get_globa_binary_data(filename)
def solid_envelope_plot(self, initial_pressure, z, maximum_pressure=1.5e7,
minimum_temperature=170.0, calc_esv=False):
self.activate()
z_c = (c_double * len(z))(*z)
temp_c = c_double(0.0)
press_c = c_double(initial_pressure)
max_press_c = c_double(maximum_pressure)
filename = "solid_envelope.dat"
filename_c = c_char_p(filename.encode('ascii'))
filename_len = c_len_type(len(filename))
i_spec_c = c_int(1)
esv_bool_c = c_int(1 if calc_esv else 0)
min_t = self.get_tmin()
self.set_tmin(minimum_temperature)
self.s_solid_envelope_plot.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
c_char_p,
POINTER( c_int ),
c_len_type]
self.s_solid_envelope_plot.restype = None
self.s_solid_envelope_plot(z_c,
byref(temp_c),
byref(press_c),
byref(i_spec_c),
byref(max_press_c),
filename_c,
byref(esv_bool_c),
filename_len)
self.set_tmin(min_t)
return plotutils.get_solid_envelope_data(filename)
def get_isotherm(self,
temp,
z,
minimum_pressure=1.0e5,
maximum_pressure=1.5e7,
nmax=100):
self.activate()
temp_c = c_double(temp)
minimum_pressure_c = c_double(minimum_pressure)
maximum_pressure_c = c_double(maximum_pressure)
z_c = (c_double * len(z))(*z)
va_c = (c_double * nmax)(0.0)
pa_c = (c_double * nmax)(0.0)
sa_c = (c_double * nmax)(0.0)
ha_c = (c_double * nmax)(0.0)
nmax_c = c_int(nmax)
na_c = c_int(0)
self.s_isotherm.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_isotherm.restype = None
self.s_isotherm(byref(temp_c),
byref(minimum_pressure_c),
byref(maximum_pressure_c),
z_c,
byref(nmax_c),
pa_c,
va_c,
sa_c,
ha_c,
byref(na_c))
p_vals = np.array(pa_c[0:na_c.value])
v_vals = np.array(va_c[0:na_c.value])
s_vals = np.array(sa_c[0:na_c.value])
h_vals = np.array(ha_c[0:na_c.value])
return p_vals, v_vals, s_vals, h_vals
def get_isobar(self,
press,
z,
minimum_temperature=200.0,
maximum_temperature=500.0,
nmax=100):
self.activate()
press_c = c_double(press)
minimum_temperature_c = c_double(minimum_temperature)
maximum_temperature_c = c_double(maximum_temperature)
z_c = (c_double * len(z))(*z)
va_c = (c_double * nmax)(0.0)
ta_c = (c_double * nmax)(0.0)
sa_c = (c_double * nmax)(0.0)
ha_c = (c_double * nmax)(0.0)
nmax_c = c_int(nmax)
na_c = c_int(0)
self.s_isobar.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_isobar.restype = None
self.s_isobar(byref(press_c),
byref(minimum_temperature_c),
byref(maximum_temperature_c),
z_c,
byref(nmax_c),
ta_c,
va_c,
sa_c,
ha_c,
byref(na_c))
t_vals = np.array(ta_c[0:na_c.value])
v_vals = np.array(va_c[0:na_c.value])
s_vals = np.array(sa_c[0:na_c.value])
h_vals = np.array(ha_c[0:na_c.value])
return t_vals, v_vals, s_vals, h_vals
def get_isenthalp(self,
enthalpy,
z,
minimum_pressure=1.0e5,
maximum_pressure=1.5e7,
minimum_temperature=200.0,
maximum_temperature=500.0,
nmax=100):
self.activate()
enthalpy_c = c_double(enthalpy)
minimum_pressure_c = c_double(minimum_pressure)
maximum_pressure_c = c_double(maximum_pressure)
minimum_temperature_c = c_double(minimum_temperature)
maximum_temperature_c = c_double(maximum_temperature)
z_c = (c_double * len(z))(*z)
va_c = (c_double * nmax)(0.0)
ta_c = (c_double * nmax)(0.0)
sa_c = (c_double * nmax)(0.0)
pa_c = (c_double * nmax)(0.0)
nmax_c = c_int(nmax)
na_c = c_int(0)
self.s_isenthalp.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_isenthalp.restype = None
self.s_isenthalp(byref(enthalpy_c),
byref(minimum_pressure_c),
byref(maximum_pressure_c),
byref(minimum_temperature_c),
byref(maximum_temperature_c),
z_c,
byref(nmax_c),
pa_c,
va_c,
sa_c,
ta_c,
byref(na_c))
t_vals = np.array(ta_c[0:na_c.value])
v_vals = np.array(va_c[0:na_c.value])
s_vals = np.array(sa_c[0:na_c.value])
p_vals = np.array(pa_c[0:na_c.value])
return t_vals, p_vals, v_vals, s_vals
def get_isentrope(self,
entropy,
z,
minimum_pressure=1.0e5,
maximum_pressure=1.5e7,
minimum_temperature=200.0,
maximum_temperature=500.0,
nmax=100):
self.activate()
entropy_c = c_double(entropy)
minimum_pressure_c = c_double(minimum_pressure)
maximum_pressure_c = c_double(maximum_pressure)
minimum_temperature_c = c_double(minimum_temperature)
maximum_temperature_c = c_double(maximum_temperature)
z_c = (c_double * len(z))(*z)
va_c = (c_double * nmax)(0.0)
ta_c = (c_double * nmax)(0.0)
ha_c = (c_double * nmax)(0.0)
pa_c = (c_double * nmax)(0.0)
nmax_c = c_int(nmax)
na_c = c_int(0)
self.s_isentrope.argtypes = [POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_double ),
POINTER( c_int )]
self.s_isentrope.restype = None
self.s_isentrope(byref(entropy_c),
byref(minimum_pressure_c),
byref(maximum_pressure_c),
byref(minimum_temperature_c),
byref(maximum_temperature_c),
z_c,
byref(nmax_c),
pa_c,
va_c,
ha_c,
ta_c,
byref(na_c))
t_vals = np.array(ta_c[0:na_c.value])
v_vals = np.array(va_c[0:na_c.value])
h_vals = np.array(ha_c[0:na_c.value])
p_vals = np.array(pa_c[0:na_c.value])
return t_vals, p_vals, v_vals, h_vals
ne
self.s_second_virial_matrix(temp_c, bmat_c)
bmat = np.zeros((self.nc,self.nc))
for i in range(self.nc):
for j in range(self.nc):
bmat[i][j] = bmat_c[i+j*self.nc]
return bmat
def binary_third_virial_matrix(self, temp):
self.activate()
assert self.nc == 2
temp_c = POINTER( c_double )(c_double(temp))
cmat_c = (c_double * self.nc**2)(0.0)
self.s_binary_third_virial_matrix.argtypes = [POINTER( c_double ),
POINTER( c_double )]
self.s_binary_third_virial_matrix.restype = None
self.s_binary_third_virial_matrix(temp_c, cmat_c)
cmat = np.zeros((self.nc,self.nc))
for i in range(self.nc):
for j in range(self.nc):
cmat[i][j] = cmat_c[i+j*self.nc]
return cmat
| true | true |
f721ecf13fb9e94a63ae23e4f9196bc551a982b6 | 2,161 | py | Python | matterhook/incoming.py | bobtiki/DripBot | f0335811cbb42744989c48f056a93d86b2a1564f | [
"MIT"
] | null | null | null | matterhook/incoming.py | bobtiki/DripBot | f0335811cbb42744989c48f056a93d86b2a1564f | [
"MIT"
] | null | null | null | matterhook/incoming.py | bobtiki/DripBot | f0335811cbb42744989c48f056a93d86b2a1564f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Modified to ignore SSL verification, since I can't currently
# get it to accept proper SSL connections from the Omni CA
import os
import requests
import urllib3
# Silence the SubjectAltNameWarning that our self-signed CA gives
urllib3.disable_warnings(urllib3.exceptions.SubjectAltNameWarning)
__all__ = ['Webhook']
class InvalidPayload(Exception):
pass
class HTTPError(Exception):
pass
class Webhook(object):
"""
Interacts with a Mattermost incoming webhook.
"""
def __init__(self,
url,
api_key,
channel=None,
icon_url=None,
username=None):
self.api_key = api_key
self.channel = channel
self.icon_url = icon_url
self.username = username
self.url = url
self.dir = os.path.dirname(__file__)
# a cert may be needed if you're on a secure office network
# self.cert_file_path = os.path.join(self.dir, '../certificate_ca.pem')
def __setitem__(self, channel, payload):
if isinstance(payload, dict):
try:
message = payload.pop('text')
except KeyError:
raise InvalidPayload('missing "text" key')
else:
message = payload
payload = {}
self.send(message, **payload)
@property
def incoming_hook_url(self):
return '{}/hooks/{}'.format(self.url, self.api_key)
def send(self, message, channel=None, icon_url=None, username=None):
payload = {'text': message}
if channel or self.channel:
payload['channel'] = channel or self.channel
if icon_url or self.icon_url:
payload['icon_url'] = icon_url or self.icon_url
if username or self.username:
payload['username'] = username or self.username
r = requests.post(self.incoming_hook_url, json=payload)
# Or with the cert:
# r = requests.post(self.incoming_hook_url, json=payload, verify=self.cert_file_path)
if r.status_code != 200:
raise HTTPError(r.text)
| 28.813333 | 93 | 0.613605 |
# get it to accept proper SSL connections from the Omni CA
import os
import requests
import urllib3
# Silence the SubjectAltNameWarning that our self-signed CA gives
urllib3.disable_warnings(urllib3.exceptions.SubjectAltNameWarning)
__all__ = ['Webhook']
class InvalidPayload(Exception):
pass
class HTTPError(Exception):
pass
class Webhook(object):
def __init__(self,
url,
api_key,
channel=None,
icon_url=None,
username=None):
self.api_key = api_key
self.channel = channel
self.icon_url = icon_url
self.username = username
self.url = url
self.dir = os.path.dirname(__file__)
# a cert may be needed if you're on a secure office network
def __setitem__(self, channel, payload):
if isinstance(payload, dict):
try:
message = payload.pop('text')
except KeyError:
raise InvalidPayload('missing "text" key')
else:
message = payload
payload = {}
self.send(message, **payload)
@property
def incoming_hook_url(self):
return '{}/hooks/{}'.format(self.url, self.api_key)
def send(self, message, channel=None, icon_url=None, username=None):
payload = {'text': message}
if channel or self.channel:
payload['channel'] = channel or self.channel
if icon_url or self.icon_url:
payload['icon_url'] = icon_url or self.icon_url
if username or self.username:
payload['username'] = username or self.username
r = requests.post(self.incoming_hook_url, json=payload)
if r.status_code != 200:
raise HTTPError(r.text)
| true | true |
f721ed4bfdccbf6ff15d5c7d23d84ae97fcf886d | 3,214 | py | Python | expanded_checklist/checklist/tests/abstract_tests/classification_test.py | amazon-research/generalized-fairness-metrics | 69f4bb0a665b7d4d8f3967a5aa04e3a93d526d3c | [
"Apache-2.0"
] | 3 | 2021-10-30T12:34:32.000Z | 2022-02-24T10:27:23.000Z | expanded_checklist/checklist/tests/abstract_tests/classification_test.py | amazon-research/generalized-fairness-metrics | 69f4bb0a665b7d4d8f3967a5aa04e3a93d526d3c | [
"Apache-2.0"
] | 8 | 2021-08-18T19:13:53.000Z | 2022-02-02T16:06:08.000Z | expanded_checklist/checklist/tests/abstract_tests/classification_test.py | amazon-research/generalized-fairness-metrics | 69f4bb0a665b7d4d8f3967a5aa04e3a93d526d3c | [
"Apache-2.0"
] | 4 | 2021-08-13T15:28:28.000Z | 2022-03-29T05:25:00.000Z | from abc import abstractmethod
from typing import Any, Dict
import pandas as pd
from .metric_test import MetricTest
from overrides import overrides
from munch import Munch
from expanded_checklist.checklist.utils import \
DataShape, is_2d_list, ACCUMULATED_STR
pd.options.display.float_format = "{:,.2f}".format
class ClassificationMetric(MetricTest):
def __init__(
self,
name: str,
required_ds: DataShape,
only_accumulate: bool = False
) -> None:
"""
Arguments:
name: name of the test
For the remaining parameters see MetricTest
"""
super().__init__(
name, required_ds, only_accumulate, probability_based=False,
drop_none_labels=True)
@abstractmethod
def get_binary_class_results(self, labels, preds, confs) -> Any:
"""
Get results for the binary classification.
"""
raise NotImplementedError
def get_results(self, labels, preds, confs, meta, **kwargs):
"""
Get one vs other results for each class.
If a metric/test supports direct calculation of multi-class results
then it should override this function or (even better) the get_results
function (which calls this function).
"""
# classes are ints, starting from 0
class_results = {}
if 'n_classes' in kwargs:
self.n_classes = kwargs['n_classes']
for cl in range(self.n_classes):
# turn the data into one vs other
if is_2d_list(labels):
labels_tmp = []
for x in labels:
labels_tmp.append([True if i == cl else False for i in x])
if not any(labels_tmp[-1]):
continue
else:
labels_tmp = [True if x == cl else False for x in labels]
if not any(labels_tmp):
continue
if not is_2d_list(preds):
pred_tmp = [True if x == cl else False for x in preds]
# get the conf score for a particular class
conf_tmp = [x[cl] for x in confs]
else:
pred_tmp = []
conf_tmp = []
for x in preds:
pred_tmp.append([True if i == cl else False for i in x])
# get the conf score for a particular class
for x in confs:
conf_tmp.append([i[cl] for i in x])
res = self.get_binary_class_results(labels_tmp, pred_tmp, conf_tmp)
class_results[cl] = res
accumulated = self.cross_class_accumulation(
class_results, labels, preds, confs)
if accumulated:
class_results[ACCUMULATED_STR] = accumulated
return class_results
def cross_class_accumulation(
self,
class_results: Dict, # class to results mapping
labels, preds, confs
) -> Any:
"""
Accumulate the results obtained for each class independently.
Return the result for all classes, to be assigned as a value for
ACCUMULATED_STR key.
"""
return {}
| 33.479167 | 79 | 0.57654 | from abc import abstractmethod
from typing import Any, Dict
import pandas as pd
from .metric_test import MetricTest
from overrides import overrides
from munch import Munch
from expanded_checklist.checklist.utils import \
DataShape, is_2d_list, ACCUMULATED_STR
pd.options.display.float_format = "{:,.2f}".format
class ClassificationMetric(MetricTest):
def __init__(
self,
name: str,
required_ds: DataShape,
only_accumulate: bool = False
) -> None:
super().__init__(
name, required_ds, only_accumulate, probability_based=False,
drop_none_labels=True)
@abstractmethod
def get_binary_class_results(self, labels, preds, confs) -> Any:
raise NotImplementedError
def get_results(self, labels, preds, confs, meta, **kwargs):
class_results = {}
if 'n_classes' in kwargs:
self.n_classes = kwargs['n_classes']
for cl in range(self.n_classes):
if is_2d_list(labels):
labels_tmp = []
for x in labels:
labels_tmp.append([True if i == cl else False for i in x])
if not any(labels_tmp[-1]):
continue
else:
labels_tmp = [True if x == cl else False for x in labels]
if not any(labels_tmp):
continue
if not is_2d_list(preds):
pred_tmp = [True if x == cl else False for x in preds]
conf_tmp = [x[cl] for x in confs]
else:
pred_tmp = []
conf_tmp = []
for x in preds:
pred_tmp.append([True if i == cl else False for i in x])
for x in confs:
conf_tmp.append([i[cl] for i in x])
res = self.get_binary_class_results(labels_tmp, pred_tmp, conf_tmp)
class_results[cl] = res
accumulated = self.cross_class_accumulation(
class_results, labels, preds, confs)
if accumulated:
class_results[ACCUMULATED_STR] = accumulated
return class_results
def cross_class_accumulation(
self,
class_results: Dict,
labels, preds, confs
) -> Any:
return {}
| true | true |
f721eead0cd1072ec828af4f83a7a3286c86ec53 | 2,631 | py | Python | tests/bm_interpolate_face_attributes.py | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 | [
"BSD-3-Clause"
] | 6 | 2021-02-09T05:58:53.000Z | 2021-11-01T03:28:40.000Z | tests/bm_interpolate_face_attributes.py | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 | [
"BSD-3-Clause"
] | null | null | null | tests/bm_interpolate_face_attributes.py | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 | [
"BSD-3-Clause"
] | 2 | 2021-03-12T07:00:39.000Z | 2021-04-12T09:47:36.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from pytorch3d.ops.interp_face_attrs import (
interpolate_face_attributes,
interpolate_face_attributes_python,
)
def _generate_data(N, S, K, F, D, device, requires_grad=False):
pix_to_face = torch.randint(-10, F, (N, S, S, K), device=device)
barycentric_coords = torch.randn(
N, S, S, K, 3, device=device, requires_grad=requires_grad
)
face_attrs = torch.randn(F, 3, D, device=device, requires_grad=requires_grad)
grad_pix_attrs = torch.randn(N, S, S, K, D, device=device)
return pix_to_face, barycentric_coords, face_attrs, grad_pix_attrs
def _bm_forward(N, S, F, K, D, impl):
# The runtime depends on the values of pix_to_face. So for proper
# benchmarking we should probably take the average of multiple
# values of pix to face. But this doesn't easily fit into fvcore
# benchmarking, so instead we'll just set a manual seed to make sure
# that different impls will use the same data.
torch.manual_seed(0)
device = torch.device("cuda")
data = _generate_data(N, S, K, F, D, device, requires_grad=False)
args = data[:3]
torch.cuda.synchronize()
if impl == "cuda":
fun = interpolate_face_attributes
elif impl == "python":
fun = interpolate_face_attributes_python
return lambda: fun(*args)
def _bm_forward_backward(N, S, F, K, D, impl):
torch.manual_seed(0)
device = torch.device("cuda")
data = _generate_data(N, S, K, F, D, device, requires_grad=True)
args, grad = data[:3], data[3]
torch.cuda.synchronize()
if impl == "cuda":
fun = interpolate_face_attributes
elif impl == "python":
fun = interpolate_face_attributes_python
def run():
out = fun(*args)
out.backward(gradient=grad)
return run
def bm_interpolate_face_attribues() -> None:
# For now only benchmark on GPU
if not torch.cuda.is_available():
return
Ns = [1, 4]
Ss = [128]
Ks = [1, 10, 40]
Fs = [5000]
Ds = [1, 3, 16]
impls = ["python", "cuda"]
test_cases = product(Ns, Ss, Ks, Fs, Ds, impls)
kwargs_list = []
for case in test_cases:
N, S, K, F, D, impl = case
kwargs_list.append({"N": N, "S": S, "K": K, "F": F, "D": D, "impl": impl})
benchmark(_bm_forward, "FORWARD", kwargs_list, warmup_iters=3)
benchmark(_bm_forward_backward, "FORWARD+BACKWARD", kwargs_list, warmup_iters=3)
if __name__ == "__main__":
bm_interpolate_face_attribues()
| 32.481481 | 84 | 0.662866 |
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from pytorch3d.ops.interp_face_attrs import (
interpolate_face_attributes,
interpolate_face_attributes_python,
)
def _generate_data(N, S, K, F, D, device, requires_grad=False):
pix_to_face = torch.randint(-10, F, (N, S, S, K), device=device)
barycentric_coords = torch.randn(
N, S, S, K, 3, device=device, requires_grad=requires_grad
)
face_attrs = torch.randn(F, 3, D, device=device, requires_grad=requires_grad)
grad_pix_attrs = torch.randn(N, S, S, K, D, device=device)
return pix_to_face, barycentric_coords, face_attrs, grad_pix_attrs
def _bm_forward(N, S, F, K, D, impl):
# benchmarking, so instead we'll just set a manual seed to make sure
torch.manual_seed(0)
device = torch.device("cuda")
data = _generate_data(N, S, K, F, D, device, requires_grad=False)
args = data[:3]
torch.cuda.synchronize()
if impl == "cuda":
fun = interpolate_face_attributes
elif impl == "python":
fun = interpolate_face_attributes_python
return lambda: fun(*args)
def _bm_forward_backward(N, S, F, K, D, impl):
torch.manual_seed(0)
device = torch.device("cuda")
data = _generate_data(N, S, K, F, D, device, requires_grad=True)
args, grad = data[:3], data[3]
torch.cuda.synchronize()
if impl == "cuda":
fun = interpolate_face_attributes
elif impl == "python":
fun = interpolate_face_attributes_python
def run():
out = fun(*args)
out.backward(gradient=grad)
return run
def bm_interpolate_face_attribues() -> None:
if not torch.cuda.is_available():
return
Ns = [1, 4]
Ss = [128]
Ks = [1, 10, 40]
Fs = [5000]
Ds = [1, 3, 16]
impls = ["python", "cuda"]
test_cases = product(Ns, Ss, Ks, Fs, Ds, impls)
kwargs_list = []
for case in test_cases:
N, S, K, F, D, impl = case
kwargs_list.append({"N": N, "S": S, "K": K, "F": F, "D": D, "impl": impl})
benchmark(_bm_forward, "FORWARD", kwargs_list, warmup_iters=3)
benchmark(_bm_forward_backward, "FORWARD+BACKWARD", kwargs_list, warmup_iters=3)
if __name__ == "__main__":
bm_interpolate_face_attribues()
| true | true |
f721eefbaefb6fb86fcd8f4b856fa798ccad73fe | 628 | py | Python | packt-social-media-mining/Chap02-03/twitter_hashtag_frequency.py | bitwhys/mining-social-web | 8d84c85a415d63bd53b8eb441a4258dc914f4d9f | [
"BSD-2-Clause"
] | null | null | null | packt-social-media-mining/Chap02-03/twitter_hashtag_frequency.py | bitwhys/mining-social-web | 8d84c85a415d63bd53b8eb441a4258dc914f4d9f | [
"BSD-2-Clause"
] | 7 | 2020-03-24T18:01:12.000Z | 2021-06-08T20:47:00.000Z | packt-social-media-mining/Chap02-03/twitter_hashtag_frequency.py | bitwhys/mining-social-web | 8d84c85a415d63bd53b8eb441a4258dc914f4d9f | [
"BSD-2-Clause"
] | null | null | null | # Chap02/twitter_hashtag_frequency.py
import sys
from collections import Counter
import json
def get_hashtags(tweet):
entities = tweet.get('entities', {})
hashtags = entities.get('hashtags', [])
return [tag['text'].lower() for tag in hashtags]
if __name__ == '__main__':
fname = sys.argv[1]
with open(fname, 'r') as f:
hashtags = Counter()
for line in f:
tweet = json.loads(line)
hashtags_in_tweet = get_hashtags(tweet)
hashtags.update(hashtags_in_tweet)
for tag, count in hashtags.most_common(20):
print("{}: {}".format(tag, count))
| 29.904762 | 52 | 0.627389 |
import sys
from collections import Counter
import json
def get_hashtags(tweet):
entities = tweet.get('entities', {})
hashtags = entities.get('hashtags', [])
return [tag['text'].lower() for tag in hashtags]
if __name__ == '__main__':
fname = sys.argv[1]
with open(fname, 'r') as f:
hashtags = Counter()
for line in f:
tweet = json.loads(line)
hashtags_in_tweet = get_hashtags(tweet)
hashtags.update(hashtags_in_tweet)
for tag, count in hashtags.most_common(20):
print("{}: {}".format(tag, count))
| true | true |
f721f0cb4d5df5a0d5895eb9316317e2aedfce7f | 1,750 | py | Python | apps/mail/tasks.py | magocod/djheavy | a7291edb6d2b406af73737c254be3bc3a66e44ae | [
"MIT"
] | 2 | 2020-02-24T00:29:21.000Z | 2021-01-13T02:41:01.000Z | apps/mail/tasks.py | magocod/djheavy | a7291edb6d2b406af73737c254be3bc3a66e44ae | [
"MIT"
] | 6 | 2020-10-08T15:16:34.000Z | 2021-09-22T18:37:58.000Z | apps/mail/tasks.py | magocod/djheavy | a7291edb6d2b406af73737c254be3bc3a66e44ae | [
"MIT"
] | 1 | 2020-08-05T10:12:41.000Z | 2020-08-05T10:12:41.000Z | from __future__ import absolute_import, unicode_literals
# from time import sleep
import binascii
import os
from celery import shared_task
from django.conf import settings
# Django
from django.core.cache import cache
from django.core.mail import send_mail
from django.template.loader import render_to_string
# local Django
from apps.mail.models import Mail
# from apps.utils.basetaskcelery import VerifyTaskBase
# from djheavy.celery import app
# @app.task(base=VerifyTaskBase)
@shared_task
def example_add(x: int, y: int):
"""
...
"""
return x + y
@shared_task
def simulate_send_emails(text: str):
"""
...
"""
Mail.objects.create(name=text)
# print("task db", Mail.objects.count())
if settings.ACTIVE_EMAIL: # pragma: no cover
subject = "Thank you for registering to our site"
message = " it means a world to us "
email_from = settings.EMAIL_HOST_USER
recipient_list = [text]
send_mail(subject, message, email_from, recipient_list)
dict_task = {
"sended_to": text,
}
return dict_task
@shared_task
def send_email_activation(username: str, email: str, domain: str):
"""
...
"""
token: str = binascii.hexlify(os.urandom(20)).decode()
if settings.ACTIVE_EMAIL: # pragma: no cover
subject = "Thank you for registering to our site"
message = render_to_string(
"activate_account.html",
{"username": username, "domain": domain, "token": token,},
)
email_from = settings.EMAIL_HOST_USER
recipient_list = [email]
send_mail(subject, message, email_from, recipient_list)
cache.set(token, f"{username}_{email}_{token}", 60)
return token
| 22.435897 | 70 | 0.667429 | from __future__ import absolute_import, unicode_literals
import binascii
import os
from celery import shared_task
from django.conf import settings
from django.core.cache import cache
from django.core.mail import send_mail
from django.template.loader import render_to_string
from apps.mail.models import Mail
@shared_task
def example_add(x: int, y: int):
return x + y
@shared_task
def simulate_send_emails(text: str):
Mail.objects.create(name=text)
if settings.ACTIVE_EMAIL:
subject = "Thank you for registering to our site"
message = " it means a world to us "
email_from = settings.EMAIL_HOST_USER
recipient_list = [text]
send_mail(subject, message, email_from, recipient_list)
dict_task = {
"sended_to": text,
}
return dict_task
@shared_task
def send_email_activation(username: str, email: str, domain: str):
token: str = binascii.hexlify(os.urandom(20)).decode()
if settings.ACTIVE_EMAIL:
subject = "Thank you for registering to our site"
message = render_to_string(
"activate_account.html",
{"username": username, "domain": domain, "token": token,},
)
email_from = settings.EMAIL_HOST_USER
recipient_list = [email]
send_mail(subject, message, email_from, recipient_list)
cache.set(token, f"{username}_{email}_{token}", 60)
return token
| true | true |
f721f1c921e737b25562b868ababcc6997318950 | 5,161 | py | Python | wagtail_daterange/filter.py | michael-yin/wagtail-daterange | c56aba056b179cb46bfd70acf0dee9f65cc214c7 | [
"MIT"
] | 1 | 2022-03-18T09:15:01.000Z | 2022-03-18T09:15:01.000Z | wagtail_daterange/filter.py | ivanguy/wagtail-daterange | e7d07f62271f81f3a79733938f8d5dd78aa83959 | [
"MIT"
] | null | null | null | wagtail_daterange/filter.py | ivanguy/wagtail-daterange | e7d07f62271f81f3a79733938f8d5dd78aa83959 | [
"MIT"
] | 2 | 2018-07-26T23:39:57.000Z | 2020-05-20T12:24:46.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import django
try:
import pytz
except ImportError:
pytz = None
from collections import OrderedDict
from django import forms
from django.conf import settings
from django.contrib import admin
from django.utils import timezone
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from wagtail.admin import widgets
class DateRangeFilter(admin.filters.FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg_gte = '{}__gte'.format(field_path)
self.lookup_kwarg_lte = '{}__lte'.format(field_path)
super(DateRangeFilter, self).__init__(field, request, params, model, model_admin, field_path)
self.request = request
self.form = self.get_form(request)
def get_timezone(self, request):
return timezone.get_default_timezone()
@staticmethod
def make_dt_aware(value, timezone):
if settings.USE_TZ and pytz is not None:
default_tz = timezone
if value.tzinfo is not None:
value = default_tz.normalize(value)
else:
value = default_tz.localize(value)
return value
def choices(self, cl):
yield {
'system_name': slugify(self.title),
'query_string': cl.get_query_string(
{}, remove=self._get_expected_fields()
)
}
def expected_parameters(self):
return self._get_expected_fields()
def queryset(self, request, queryset):
if self.form.is_valid():
validated_data = dict(self.form.cleaned_data.items())
if validated_data:
return queryset.filter(
**self._make_query_filter(request, validated_data)
)
return queryset
def _get_expected_fields(self):
return [self.lookup_kwarg_gte, self.lookup_kwarg_lte]
def _make_query_filter(self, request, validated_data):
query_params = {}
date_value_gte = validated_data.get(self.lookup_kwarg_gte, None)
date_value_lte = validated_data.get(self.lookup_kwarg_lte, None)
if date_value_gte:
query_params['{0}__gte'.format(self.field_path)] = self.make_dt_aware(
datetime.datetime.combine(date_value_gte, datetime.time.min),
self.get_timezone(request),
)
if date_value_lte:
query_params['{0}__lte'.format(self.field_path)] = self.make_dt_aware(
datetime.datetime.combine(date_value_lte, datetime.time.max),
self.get_timezone(request),
)
return query_params
def get_template(self):
return 'wagtail_daterange/date_filter.html'
template = property(get_template)
def get_form(self, request):
form_class = self._get_form_class()
return form_class(self.used_parameters)
def _get_form_class(self):
fields = self._get_form_fields()
form_class = type(
str('DateRangeForm'),
(forms.BaseForm,),
{'base_fields': fields}
)
return form_class
def _get_form_fields(self):
return OrderedDict((
(self.lookup_kwarg_gte, forms.DateField(
label='',
widget=widgets.AdminDateInput(attrs={'placeholder': _('From date')}),
localize=True,
required=False
)),
(self.lookup_kwarg_lte, forms.DateField(
label='',
widget=widgets.AdminDateInput(attrs={'placeholder': _('To date')}),
localize=True,
required=False
)),
))
class DateTimeRangeFilter(DateRangeFilter):
def _get_form_fields(self):
return OrderedDict((
(self.lookup_kwarg_gte, forms.DateTimeField(
label='',
widget=widgets.AdminDateTimeInput(attrs={'placeholder': _('From date')}),
localize=True,
required=False
)),
(self.lookup_kwarg_lte, forms.DateTimeField(
label='',
widget=widgets.AdminDateTimeInput(attrs={'placeholder': _('To date')}),
localize=True,
required=False
)),
))
def _make_query_filter(self, request, validated_data):
query_params = {}
date_value_gte = validated_data.get(self.lookup_kwarg_gte, None)
date_value_lte = validated_data.get(self.lookup_kwarg_lte, None)
if date_value_gte:
query_params['{0}__gte'.format(self.field_path)] = self.make_dt_aware(
date_value_gte, self.get_timezone(request)
)
if date_value_lte:
query_params['{0}__lte'.format(self.field_path)] = self.make_dt_aware(
date_value_lte, self.get_timezone(request)
)
return query_params
| 33.083333 | 101 | 0.602596 |
from __future__ import unicode_literals
import datetime
import django
try:
import pytz
except ImportError:
pytz = None
from collections import OrderedDict
from django import forms
from django.conf import settings
from django.contrib import admin
from django.utils import timezone
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from wagtail.admin import widgets
class DateRangeFilter(admin.filters.FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg_gte = '{}__gte'.format(field_path)
self.lookup_kwarg_lte = '{}__lte'.format(field_path)
super(DateRangeFilter, self).__init__(field, request, params, model, model_admin, field_path)
self.request = request
self.form = self.get_form(request)
def get_timezone(self, request):
return timezone.get_default_timezone()
@staticmethod
def make_dt_aware(value, timezone):
if settings.USE_TZ and pytz is not None:
default_tz = timezone
if value.tzinfo is not None:
value = default_tz.normalize(value)
else:
value = default_tz.localize(value)
return value
def choices(self, cl):
yield {
'system_name': slugify(self.title),
'query_string': cl.get_query_string(
{}, remove=self._get_expected_fields()
)
}
def expected_parameters(self):
return self._get_expected_fields()
def queryset(self, request, queryset):
if self.form.is_valid():
validated_data = dict(self.form.cleaned_data.items())
if validated_data:
return queryset.filter(
**self._make_query_filter(request, validated_data)
)
return queryset
def _get_expected_fields(self):
return [self.lookup_kwarg_gte, self.lookup_kwarg_lte]
def _make_query_filter(self, request, validated_data):
query_params = {}
date_value_gte = validated_data.get(self.lookup_kwarg_gte, None)
date_value_lte = validated_data.get(self.lookup_kwarg_lte, None)
if date_value_gte:
query_params['{0}__gte'.format(self.field_path)] = self.make_dt_aware(
datetime.datetime.combine(date_value_gte, datetime.time.min),
self.get_timezone(request),
)
if date_value_lte:
query_params['{0}__lte'.format(self.field_path)] = self.make_dt_aware(
datetime.datetime.combine(date_value_lte, datetime.time.max),
self.get_timezone(request),
)
return query_params
def get_template(self):
return 'wagtail_daterange/date_filter.html'
template = property(get_template)
def get_form(self, request):
form_class = self._get_form_class()
return form_class(self.used_parameters)
def _get_form_class(self):
fields = self._get_form_fields()
form_class = type(
str('DateRangeForm'),
(forms.BaseForm,),
{'base_fields': fields}
)
return form_class
def _get_form_fields(self):
return OrderedDict((
(self.lookup_kwarg_gte, forms.DateField(
label='',
widget=widgets.AdminDateInput(attrs={'placeholder': _('From date')}),
localize=True,
required=False
)),
(self.lookup_kwarg_lte, forms.DateField(
label='',
widget=widgets.AdminDateInput(attrs={'placeholder': _('To date')}),
localize=True,
required=False
)),
))
class DateTimeRangeFilter(DateRangeFilter):
def _get_form_fields(self):
return OrderedDict((
(self.lookup_kwarg_gte, forms.DateTimeField(
label='',
widget=widgets.AdminDateTimeInput(attrs={'placeholder': _('From date')}),
localize=True,
required=False
)),
(self.lookup_kwarg_lte, forms.DateTimeField(
label='',
widget=widgets.AdminDateTimeInput(attrs={'placeholder': _('To date')}),
localize=True,
required=False
)),
))
def _make_query_filter(self, request, validated_data):
query_params = {}
date_value_gte = validated_data.get(self.lookup_kwarg_gte, None)
date_value_lte = validated_data.get(self.lookup_kwarg_lte, None)
if date_value_gte:
query_params['{0}__gte'.format(self.field_path)] = self.make_dt_aware(
date_value_gte, self.get_timezone(request)
)
if date_value_lte:
query_params['{0}__lte'.format(self.field_path)] = self.make_dt_aware(
date_value_lte, self.get_timezone(request)
)
return query_params
| true | true |
f721f2d846051c7aa457b3b1b26f7643c7d67fa0 | 9,344 | py | Python | third_party/Paste/paste/urlmap.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/Paste/paste/urlmap.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | third_party/Paste/paste/urlmap.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Map URL prefixes to WSGI applications. See ``URLMap``
"""
import re
import os
import cgi
try:
# Python 3
from collections import MutableMapping as DictMixin
except ImportError:
# Python 2
from UserDict import DictMixin
from paste import httpexceptions
__all__ = ['URLMap', 'PathProxyURLMap']
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
def parse_path_expression(path):
"""
Parses a path expression like 'domain foobar.com port 20 /' or
just '/foobar' for a path alone. Returns as an address that
URLMap likes.
"""
parts = path.split()
domain = port = path = None
while parts:
if parts[0] == 'domain':
parts.pop(0)
if not parts:
raise ValueError("'domain' must be followed with a domain name")
if domain:
raise ValueError("'domain' given twice")
domain = parts.pop(0)
elif parts[0] == 'port':
parts.pop(0)
if not parts:
raise ValueError("'port' must be followed with a port number")
if port:
raise ValueError("'port' given twice")
port = parts.pop(0)
else:
if path:
raise ValueError("more than one path given (have %r, got %r)"
% (path, parts[0]))
path = parts.pop(0)
s = ''
if domain:
s = 'http://%s' % domain
if port:
if not domain:
raise ValueError("If you give a port, you must also give a domain")
s += ':' + port
if path:
if s:
s += '/'
s += path
return s
class URLMap(DictMixin):
"""
URLMap instances are dictionary-like object that dispatch to one
of several applications based on the URL.
The dictionary keys are URLs to match (like
``PATH_INFO.startswith(url)``), and the values are applications to
dispatch to. URLs are matched most-specific-first, i.e., longest
URL first. The ``SCRIPT_NAME`` and ``PATH_INFO`` environmental
variables are adjusted to indicate the new context.
URLs can also include domains, like ``http://blah.com/foo``, or as
tuples ``('blah.com', '/foo')``. This will match domain names; without
the ``http://domain`` or with a domain of ``None`` any domain will be
matched (so long as no other explicit domain matches). """
def __init__(self, not_found_app=None):
self.applications = []
if not not_found_app:
not_found_app = self.not_found_app
self.not_found_application = not_found_app
def __len__(self):
return len(self.applications)
def __iter__(self):
for app_url, app in self.applications:
yield app_url
norm_url_re = re.compile('//+')
domain_url_re = re.compile('^(http|https)://')
def not_found_app(self, environ, start_response):
mapper = environ.get('paste.urlmap_object')
if mapper:
matches = [p for p, a in mapper.applications]
extra = 'defined apps: %s' % (
',\n '.join(map(repr, matches)))
else:
extra = ''
extra += '\nSCRIPT_NAME: %r' % cgi.escape(environ.get('SCRIPT_NAME'))
extra += '\nPATH_INFO: %r' % cgi.escape(environ.get('PATH_INFO'))
extra += '\nHTTP_HOST: %r' % cgi.escape(environ.get('HTTP_HOST'))
app = httpexceptions.HTTPNotFound(
environ['PATH_INFO'],
comment=cgi.escape(extra)).wsgi_application
return app(environ, start_response)
def normalize_url(self, url, trim=True):
if isinstance(url, (list, tuple)):
domain = url[0]
url = self.normalize_url(url[1])[1]
return domain, url
assert (not url or url.startswith('/')
or self.domain_url_re.search(url)), (
"URL fragments must start with / or http:// (you gave %r)" % url)
match = self.domain_url_re.search(url)
if match:
url = url[match.end():]
if '/' in url:
domain, url = url.split('/', 1)
url = '/' + url
else:
domain, url = url, ''
else:
domain = None
url = self.norm_url_re.sub('/', url)
if trim:
url = url.rstrip('/')
return domain, url
def sort_apps(self):
"""
Make sure applications are sorted with longest URLs first
"""
def key(app_desc):
(domain, url), app = app_desc
if not domain:
# Make sure empty domains sort last:
return '\xff', -len(url)
else:
return domain, -len(url)
apps = [(key(desc), desc) for desc in self.applications]
apps.sort()
self.applications = [desc for (sortable, desc) in apps]
def __setitem__(self, url, app):
if app is None:
try:
del self[url]
except KeyError:
pass
return
dom_url = self.normalize_url(url)
if dom_url in self:
del self[dom_url]
self.applications.append((dom_url, app))
self.sort_apps()
def __getitem__(self, url):
dom_url = self.normalize_url(url)
for app_url, app in self.applications:
if app_url == dom_url:
return app
raise KeyError(
"No application with the url %r (domain: %r; existing: %s)"
% (url[1], url[0] or '*', self.applications))
def __delitem__(self, url):
url = self.normalize_url(url)
for app_url, app in self.applications:
if app_url == url:
self.applications.remove((app_url, app))
break
else:
raise KeyError(
"No application with the url %r" % (url,))
def keys(self):
return [app_url for app_url, app in self.applications]
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ.get('PATH_INFO')
path_info = self.normalize_url(path_info, False)[1]
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host+':'+port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
class PathProxyURLMap(object):
"""
This is a wrapper for URLMap that catches any strings that
are passed in as applications; these strings are treated as
filenames (relative to `base_path`) and are passed to the
callable `builder`, which will return an application.
This is intended for cases when configuration files can be
treated as applications.
`base_paste_url` is the URL under which all applications added through
this wrapper must go. Use ``""`` if you want this to not
change incoming URLs.
"""
def __init__(self, map, base_paste_url, base_path, builder):
self.map = map
self.base_paste_url = self.map.normalize_url(base_paste_url)
self.base_path = base_path
self.builder = builder
def __setitem__(self, url, app):
if isinstance(app, (str, unicode)):
app_fn = os.path.join(self.base_path, app)
app = self.builder(app_fn)
url = self.map.normalize_url(url)
# @@: This means http://foo.com/bar will potentially
# match foo.com, but /base_paste_url/bar, which is unintuitive
url = (url[0] or self.base_paste_url[0],
self.base_paste_url[1] + url[1])
self.map[url] = app
def __getattr__(self, attr):
return getattr(self.map, attr)
# This is really the only settable attribute
def not_found_application__get(self):
return self.map.not_found_application
def not_found_application__set(self, value):
self.map.not_found_application = value
not_found_application = property(not_found_application__get,
not_found_application__set)
| 35.393939 | 84 | 0.585402 |
import re
import os
import cgi
try:
from collections import MutableMapping as DictMixin
except ImportError:
from UserDict import DictMixin
from paste import httpexceptions
__all__ = ['URLMap', 'PathProxyURLMap']
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
def parse_path_expression(path):
parts = path.split()
domain = port = path = None
while parts:
if parts[0] == 'domain':
parts.pop(0)
if not parts:
raise ValueError("'domain' must be followed with a domain name")
if domain:
raise ValueError("'domain' given twice")
domain = parts.pop(0)
elif parts[0] == 'port':
parts.pop(0)
if not parts:
raise ValueError("'port' must be followed with a port number")
if port:
raise ValueError("'port' given twice")
port = parts.pop(0)
else:
if path:
raise ValueError("more than one path given (have %r, got %r)"
% (path, parts[0]))
path = parts.pop(0)
s = ''
if domain:
s = 'http://%s' % domain
if port:
if not domain:
raise ValueError("If you give a port, you must also give a domain")
s += ':' + port
if path:
if s:
s += '/'
s += path
return s
class URLMap(DictMixin):
def __init__(self, not_found_app=None):
self.applications = []
if not not_found_app:
not_found_app = self.not_found_app
self.not_found_application = not_found_app
def __len__(self):
return len(self.applications)
def __iter__(self):
for app_url, app in self.applications:
yield app_url
norm_url_re = re.compile('//+')
domain_url_re = re.compile('^(http|https)://')
def not_found_app(self, environ, start_response):
mapper = environ.get('paste.urlmap_object')
if mapper:
matches = [p for p, a in mapper.applications]
extra = 'defined apps: %s' % (
',\n '.join(map(repr, matches)))
else:
extra = ''
extra += '\nSCRIPT_NAME: %r' % cgi.escape(environ.get('SCRIPT_NAME'))
extra += '\nPATH_INFO: %r' % cgi.escape(environ.get('PATH_INFO'))
extra += '\nHTTP_HOST: %r' % cgi.escape(environ.get('HTTP_HOST'))
app = httpexceptions.HTTPNotFound(
environ['PATH_INFO'],
comment=cgi.escape(extra)).wsgi_application
return app(environ, start_response)
def normalize_url(self, url, trim=True):
if isinstance(url, (list, tuple)):
domain = url[0]
url = self.normalize_url(url[1])[1]
return domain, url
assert (not url or url.startswith('/')
or self.domain_url_re.search(url)), (
"URL fragments must start with / or http:// (you gave %r)" % url)
match = self.domain_url_re.search(url)
if match:
url = url[match.end():]
if '/' in url:
domain, url = url.split('/', 1)
url = '/' + url
else:
domain, url = url, ''
else:
domain = None
url = self.norm_url_re.sub('/', url)
if trim:
url = url.rstrip('/')
return domain, url
def sort_apps(self):
def key(app_desc):
(domain, url), app = app_desc
if not domain:
return '\xff', -len(url)
else:
return domain, -len(url)
apps = [(key(desc), desc) for desc in self.applications]
apps.sort()
self.applications = [desc for (sortable, desc) in apps]
def __setitem__(self, url, app):
if app is None:
try:
del self[url]
except KeyError:
pass
return
dom_url = self.normalize_url(url)
if dom_url in self:
del self[dom_url]
self.applications.append((dom_url, app))
self.sort_apps()
def __getitem__(self, url):
dom_url = self.normalize_url(url)
for app_url, app in self.applications:
if app_url == dom_url:
return app
raise KeyError(
"No application with the url %r (domain: %r; existing: %s)"
% (url[1], url[0] or '*', self.applications))
def __delitem__(self, url):
url = self.normalize_url(url)
for app_url, app in self.applications:
if app_url == url:
self.applications.remove((app_url, app))
break
else:
raise KeyError(
"No application with the url %r" % (url,))
def keys(self):
return [app_url for app_url, app in self.applications]
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ.get('PATH_INFO')
path_info = self.normalize_url(path_info, False)[1]
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host+':'+port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
class PathProxyURLMap(object):
def __init__(self, map, base_paste_url, base_path, builder):
self.map = map
self.base_paste_url = self.map.normalize_url(base_paste_url)
self.base_path = base_path
self.builder = builder
def __setitem__(self, url, app):
if isinstance(app, (str, unicode)):
app_fn = os.path.join(self.base_path, app)
app = self.builder(app_fn)
url = self.map.normalize_url(url)
url = (url[0] or self.base_paste_url[0],
self.base_paste_url[1] + url[1])
self.map[url] = app
def __getattr__(self, attr):
return getattr(self.map, attr)
def not_found_application__get(self):
return self.map.not_found_application
def not_found_application__set(self, value):
self.map.not_found_application = value
not_found_application = property(not_found_application__get,
not_found_application__set)
| true | true |
f721f3ddf13fab1c1dd77ef776c52be317cf6e44 | 1,279 | py | Python | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/spanhandlers/attachment.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 83 | 2017-03-15T12:43:25.000Z | 2022-03-31T12:38:44.000Z | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/spanhandlers/attachment.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 18 | 2017-03-20T14:12:58.000Z | 2021-07-28T09:11:55.000Z | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/spanhandlers/attachment.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 25 | 2017-04-01T01:40:02.000Z | 2022-02-20T11:08:12.000Z | import Inline
import Core
import web
info = {
"friendly_name": "Attachment",
"example_template": "pagename:attachmentname",
"summary": "Links to an attachment of this (or another, named) page.",
"details": """
<p>If invoked as [attachment some.filename], it will either embed (if
the attachment is an image) or link to (otherwise) the named
attachment. If invoked as [attachment PageName:some.filename], it
will do the same, but for the named attachment on the named page
instead of the current page.</p>
"""
}
def SpanHandler(rest, acc):
(text, rest) = Inline.collectSpan(rest)
parts = text.split('/', 1)
name = parts[0]
pagename = ''
if name.find(':') != -1:
(pagename, name) = name.split(':', 1)
if not pagename:
pagename = web.ctx.source_page_title
if len(parts) > 1:
alt = parts[1]
else:
alt = '[Attachment ' + pagename + ':' + name + ']'
a = Core.Attachment(pagename, name, None)
acc.append(AttachmentReference(a, alt))
return rest
class AttachmentReference(Core.Renderable):
def __init__(self, attachment, alt):
self.attachment = attachment
self.alt = alt
def templateName(self):
return 'pyle_attachmentreference'
| 26.645833 | 74 | 0.634871 | import Inline
import Core
import web
info = {
"friendly_name": "Attachment",
"example_template": "pagename:attachmentname",
"summary": "Links to an attachment of this (or another, named) page.",
"details": """
<p>If invoked as [attachment some.filename], it will either embed (if
the attachment is an image) or link to (otherwise) the named
attachment. If invoked as [attachment PageName:some.filename], it
will do the same, but for the named attachment on the named page
instead of the current page.</p>
"""
}
def SpanHandler(rest, acc):
(text, rest) = Inline.collectSpan(rest)
parts = text.split('/', 1)
name = parts[0]
pagename = ''
if name.find(':') != -1:
(pagename, name) = name.split(':', 1)
if not pagename:
pagename = web.ctx.source_page_title
if len(parts) > 1:
alt = parts[1]
else:
alt = '[Attachment ' + pagename + ':' + name + ']'
a = Core.Attachment(pagename, name, None)
acc.append(AttachmentReference(a, alt))
return rest
class AttachmentReference(Core.Renderable):
def __init__(self, attachment, alt):
self.attachment = attachment
self.alt = alt
def templateName(self):
return 'pyle_attachmentreference'
| true | true |
f721f56cbde628047f9a4dc62bd67e702f776cbd | 4,788 | py | Python | Thesis@3.9.1/Lib/site-packages/mypyc/primitives/list_ops.py | nverbois/TFE21-232 | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | [
"MIT"
] | null | null | null | Thesis@3.9.1/Lib/site-packages/mypyc/primitives/list_ops.py | nverbois/TFE21-232 | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | [
"MIT"
] | null | null | null | Thesis@3.9.1/Lib/site-packages/mypyc/primitives/list_ops.py | nverbois/TFE21-232 | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | [
"MIT"
] | null | null | null | """List primitive ops."""
from typing import List
from mypyc.ir.ops import ERR_MAGIC, ERR_NEVER, ERR_FALSE, EmitterInterface
from mypyc.ir.rtypes import (
int_rprimitive,
short_int_rprimitive,
list_rprimitive,
object_rprimitive,
bool_rprimitive,
)
from mypyc.primitives.registry import (
name_ref_op,
binary_op,
func_op,
method_op,
custom_op,
name_emit,
call_emit,
call_negative_bool_emit,
)
# Get the 'builtins.list' type object.
name_ref_op(
"builtins.list",
result_type=object_rprimitive,
error_kind=ERR_NEVER,
emit=name_emit("&PyList_Type", target_type="PyObject *"),
is_borrowed=True,
)
# list(obj)
to_list = func_op(
name="builtins.list",
arg_types=[object_rprimitive],
result_type=list_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("PySequence_List"),
)
def emit_new(emitter: EmitterInterface, args: List[str], dest: str) -> None:
# TODO: This would be better split into multiple smaller ops.
emitter.emit_line("%s = PyList_New(%d); " % (dest, len(args)))
emitter.emit_line("if (likely(%s != NULL)) {" % dest)
for i, arg in enumerate(args):
emitter.emit_line("PyList_SET_ITEM(%s, %s, %s);" % (dest, i, arg))
emitter.emit_line("}")
# Construct a list from values: [item1, item2, ....]
new_list_op = custom_op(
arg_types=[object_rprimitive],
result_type=list_rprimitive,
is_var_arg=True,
error_kind=ERR_MAGIC,
steals=True,
format_str="{dest} = [{comma_args}]",
emit=emit_new,
)
# list[index] (for an integer index)
list_get_item_op = method_op(
name="__getitem__",
arg_types=[list_rprimitive, int_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_GetItem"),
)
# Version with no int bounds check for when it is known to be short
method_op(
name="__getitem__",
arg_types=[list_rprimitive, short_int_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_GetItemShort"),
priority=2,
)
# This is unsafe because it assumes that the index is a non-negative short integer
# that is in-bounds for the list.
list_get_item_unsafe_op = custom_op(
name="__getitem__",
arg_types=[list_rprimitive, short_int_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_NEVER,
format_str="{dest} = {args[0]}[{args[1]}] :: unsafe list",
emit=call_emit("CPyList_GetItemUnsafe"),
)
# list[index] = obj
list_set_item_op = method_op(
name="__setitem__",
arg_types=[list_rprimitive, int_rprimitive, object_rprimitive],
steals=[False, False, True],
result_type=bool_rprimitive,
error_kind=ERR_FALSE,
emit=call_emit("CPyList_SetItem"),
)
# list.append(obj)
list_append_op = method_op(
name="append",
arg_types=[list_rprimitive, object_rprimitive],
result_type=bool_rprimitive,
error_kind=ERR_FALSE,
emit=call_negative_bool_emit("PyList_Append"),
)
# list.extend(obj)
list_extend_op = method_op(
name="extend",
arg_types=[list_rprimitive, object_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_Extend"),
)
# list.pop()
list_pop_last = method_op(
name="pop",
arg_types=[list_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_PopLast"),
)
# list.pop(index)
list_pop = method_op(
name="pop",
arg_types=[list_rprimitive, int_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_Pop"),
)
# list.count(obj)
method_op(
name="count",
arg_types=[list_rprimitive, object_rprimitive],
result_type=short_int_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_Count"),
)
# list * int
binary_op(
op="*",
arg_types=[list_rprimitive, int_rprimitive],
result_type=list_rprimitive,
error_kind=ERR_MAGIC,
format_str="{dest} = {args[0]} * {args[1]} :: list",
emit=call_emit("CPySequence_Multiply"),
)
# int * list
binary_op(
op="*",
arg_types=[int_rprimitive, list_rprimitive],
result_type=list_rprimitive,
error_kind=ERR_MAGIC,
format_str="{dest} = {args[0]} * {args[1]} :: list",
emit=call_emit("CPySequence_RMultiply"),
)
def emit_len(emitter: EmitterInterface, args: List[str], dest: str) -> None:
temp = emitter.temp_name()
emitter.emit_declaration("Py_ssize_t %s;" % temp)
emitter.emit_line("%s = PyList_GET_SIZE(%s);" % (temp, args[0]))
emitter.emit_line("%s = CPyTagged_ShortFromSsize_t(%s);" % (dest, temp))
# len(list)
list_len_op = func_op(
name="builtins.len",
arg_types=[list_rprimitive],
result_type=short_int_rprimitive,
error_kind=ERR_NEVER,
emit=emit_len,
)
| 25.604278 | 82 | 0.701337 |
from typing import List
from mypyc.ir.ops import ERR_MAGIC, ERR_NEVER, ERR_FALSE, EmitterInterface
from mypyc.ir.rtypes import (
int_rprimitive,
short_int_rprimitive,
list_rprimitive,
object_rprimitive,
bool_rprimitive,
)
from mypyc.primitives.registry import (
name_ref_op,
binary_op,
func_op,
method_op,
custom_op,
name_emit,
call_emit,
call_negative_bool_emit,
)
name_ref_op(
"builtins.list",
result_type=object_rprimitive,
error_kind=ERR_NEVER,
emit=name_emit("&PyList_Type", target_type="PyObject *"),
is_borrowed=True,
)
to_list = func_op(
name="builtins.list",
arg_types=[object_rprimitive],
result_type=list_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("PySequence_List"),
)
def emit_new(emitter: EmitterInterface, args: List[str], dest: str) -> None:
emitter.emit_line("%s = PyList_New(%d); " % (dest, len(args)))
emitter.emit_line("if (likely(%s != NULL)) {" % dest)
for i, arg in enumerate(args):
emitter.emit_line("PyList_SET_ITEM(%s, %s, %s);" % (dest, i, arg))
emitter.emit_line("}")
new_list_op = custom_op(
arg_types=[object_rprimitive],
result_type=list_rprimitive,
is_var_arg=True,
error_kind=ERR_MAGIC,
steals=True,
format_str="{dest} = [{comma_args}]",
emit=emit_new,
)
list_get_item_op = method_op(
name="__getitem__",
arg_types=[list_rprimitive, int_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_GetItem"),
)
method_op(
name="__getitem__",
arg_types=[list_rprimitive, short_int_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_GetItemShort"),
priority=2,
)
list_get_item_unsafe_op = custom_op(
name="__getitem__",
arg_types=[list_rprimitive, short_int_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_NEVER,
format_str="{dest} = {args[0]}[{args[1]}] :: unsafe list",
emit=call_emit("CPyList_GetItemUnsafe"),
)
list_set_item_op = method_op(
name="__setitem__",
arg_types=[list_rprimitive, int_rprimitive, object_rprimitive],
steals=[False, False, True],
result_type=bool_rprimitive,
error_kind=ERR_FALSE,
emit=call_emit("CPyList_SetItem"),
)
list_append_op = method_op(
name="append",
arg_types=[list_rprimitive, object_rprimitive],
result_type=bool_rprimitive,
error_kind=ERR_FALSE,
emit=call_negative_bool_emit("PyList_Append"),
)
list_extend_op = method_op(
name="extend",
arg_types=[list_rprimitive, object_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_Extend"),
)
list_pop_last = method_op(
name="pop",
arg_types=[list_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_PopLast"),
)
list_pop = method_op(
name="pop",
arg_types=[list_rprimitive, int_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_Pop"),
)
method_op(
name="count",
arg_types=[list_rprimitive, object_rprimitive],
result_type=short_int_rprimitive,
error_kind=ERR_MAGIC,
emit=call_emit("CPyList_Count"),
)
binary_op(
op="*",
arg_types=[list_rprimitive, int_rprimitive],
result_type=list_rprimitive,
error_kind=ERR_MAGIC,
format_str="{dest} = {args[0]} * {args[1]} :: list",
emit=call_emit("CPySequence_Multiply"),
)
binary_op(
op="*",
arg_types=[int_rprimitive, list_rprimitive],
result_type=list_rprimitive,
error_kind=ERR_MAGIC,
format_str="{dest} = {args[0]} * {args[1]} :: list",
emit=call_emit("CPySequence_RMultiply"),
)
def emit_len(emitter: EmitterInterface, args: List[str], dest: str) -> None:
temp = emitter.temp_name()
emitter.emit_declaration("Py_ssize_t %s;" % temp)
emitter.emit_line("%s = PyList_GET_SIZE(%s);" % (temp, args[0]))
emitter.emit_line("%s = CPyTagged_ShortFromSsize_t(%s);" % (dest, temp))
list_len_op = func_op(
name="builtins.len",
arg_types=[list_rprimitive],
result_type=short_int_rprimitive,
error_kind=ERR_NEVER,
emit=emit_len,
)
| true | true |
f721f80c825ad0a5b4d65ff002d6c6ca07631547 | 33,858 | py | Python | tests/unit/test_notifications.py | klmitch/heyu | 9bdc552115bb22d1d01910b0b851eb3cbc3b08d1 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_notifications.py | klmitch/heyu | 9bdc552115bb22d1d01910b0b851eb3cbc3b08d1 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_notifications.py | klmitch/heyu | 9bdc552115bb22d1d01910b0b851eb3cbc3b08d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import signal
import sys
import unittest
import mock
from heyu import notifications
from heyu import protocol
from heyu import util
class TestException(Exception):
pass
class NotificationServerTest(unittest.TestCase):
def _signal_test(self, notifier_server, mock_signal):
signals = [
mock.call(signal.SIGINT, notifier_server.stop),
mock.call(signal.SIGTERM, notifier_server.stop),
]
if hasattr(signal, 'SIGUSR1'):
signals.append(mock.call(signal.SIGUSR1, notifier_server.shutdown))
mock_signal.assert_has_calls(signals)
self.assertEqual(len(signals), mock_signal.call_count)
@mock.patch.object(sys, 'argv', ['/bin/notifier.py'])
@mock.patch('tendril.get_manager', return_value='manager')
@mock.patch('gevent.signal')
@mock.patch('uuid.uuid4', return_value='some-uuid')
@mock.patch('gevent.event.Event', return_value='event')
@mock.patch.object(util, 'cert_wrapper', return_value='wrapper')
@mock.patch.object(util, 'outgoing_endpoint', return_value='endpoint')
def test_init_basic(self, mock_outgoing_endpoint, mock_cert_wrapper,
mock_Event, mock_uuid4, mock_signal, mock_get_manager):
result = notifications.NotificationServer('hub')
self.assertEqual('hub', result._hub)
self.assertEqual('manager', result._manager)
self.assertEqual('wrapper', result._wrapper)
self.assertEqual('notifier.py', result._app_name)
self.assertEqual('some-uuid', result._app_id)
self.assertEqual(None, result._hub_app)
self.assertEqual([], result._notifications)
self.assertEqual('event', result._notify_event)
mock_outgoing_endpoint.assert_called_once_with('hub')
mock_get_manager.assert_called_once_with('tcp', 'endpoint')
mock_cert_wrapper.assert_called_once_with(
None, 'notifier', secure=True)
self._signal_test(result, mock_signal)
@mock.patch.object(sys, 'argv', ['/bin/notifier.py'])
@mock.patch('tendril.get_manager', return_value='manager')
@mock.patch('gevent.signal')
@mock.patch('uuid.uuid4', return_value='some-uuid')
@mock.patch('gevent.event.Event', return_value='event')
@mock.patch.object(util, 'cert_wrapper', return_value='wrapper')
@mock.patch.object(util, 'outgoing_endpoint', return_value='endpoint')
def test_init_alt(self, mock_outgoing_endpoint, mock_cert_wrapper,
mock_Event, mock_uuid4, mock_signal, mock_get_manager):
result = notifications.NotificationServer('hub', 'cert_conf', False,
'app', 'app-uuid')
self.assertEqual('hub', result._hub)
self.assertEqual('manager', result._manager)
self.assertEqual('wrapper', result._wrapper)
self.assertEqual('app', result._app_name)
self.assertEqual('app-uuid', result._app_id)
self.assertEqual(None, result._hub_app)
self.assertEqual([], result._notifications)
self.assertEqual('event', result._notify_event)
mock_outgoing_endpoint.assert_called_once_with('hub')
mock_get_manager.assert_called_once_with('tcp', 'endpoint')
mock_cert_wrapper.assert_called_once_with(
'cert_conf', 'notifier', secure=False)
self._signal_test(result, mock_signal)
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationServer, 'start')
def test_iter_running(self, mock_start, mock_init):
server = notifications.NotificationServer()
server._hub_app = 'application'
result = iter(server)
self.assertEqual(server, result)
self.assertFalse(mock_start.called)
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationServer, 'start')
def test_iter_nonrunning(self, mock_start, mock_init):
server = notifications.NotificationServer()
server._hub_app = None
result = iter(server)
self.assertEqual(server, result)
mock_start.assert_called_once_with()
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(sys, 'exit', side_effect=TestException())
def test_next_notification(self, mock_exit, mock_init):
server = notifications.NotificationServer()
server._notifications = ['notification']
server._notify_event = mock.Mock()
server._hub_app = None
result = server.next()
self.assertEqual('notification', result)
self.assertEqual(0, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(sys, 'exit', side_effect=TestException())
def test_next_exit(self, mock_exit, mock_init):
server = notifications.NotificationServer()
server._notifications = [None]
server._notify_event = mock.Mock()
server._hub_app = None
self.assertRaises(TestException, server.next)
mock_exit.assert_called_once_with()
self.assertEqual(0, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(sys, 'exit', side_effect=TestException())
def test_next_empty_stop(self, mock_exit, mock_init):
server = notifications.NotificationServer()
server._notifications = []
server._notify_event = mock.Mock()
server._hub_app = None
self.assertRaises(StopIteration, server.next)
server._notify_event.assert_has_calls([
mock.call.clear(),
])
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(sys, 'exit', side_effect=TestException())
def test_next_empty_loop(self, mock_exit, mock_init):
server = notifications.NotificationServer()
server._notifications = []
server._notify_event = mock.Mock()
server._hub_app = 'app'
def fake_wait():
server._notifications.append('waited')
server._notify_event.wait.side_effect = fake_wait
result = server.next()
self.assertEqual('waited', result)
server._notify_event.assert_has_calls([
mock.call.clear(),
mock.call.wait(),
])
self.assertEqual(2, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(notifications, 'NotificationApplication',
return_value='app')
def test_acceptor(self, mock_NotificationApplication, mock_init):
server = notifications.NotificationServer()
server._hub_app = True
server._app_name = 'app_name'
server._app_id = 'app_id'
result = server._acceptor('tendril')
self.assertEqual('app', result)
mock_NotificationApplication.assert_called_once_with(
'tendril', server, 'app_name', 'app_id')
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_start_running(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = 'running'
server._manager = mock.Mock()
server._hub = 'hub'
server._wrapper = 'wrapper'
self.assertRaises(ValueError, server.start)
self.assertEqual('running', server._hub_app)
self.assertEqual(0, len(server._manager.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_start_stopped(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = None
server._manager = mock.Mock()
server._hub = 'hub'
server._wrapper = 'wrapper'
server.start()
self.assertEqual(True, server._hub_app)
server._manager.assert_has_calls([
mock.call.start(),
mock.call.connect('hub', server._acceptor, 'wrapper'),
])
self.assertEqual(2, len(server._manager.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_stop_stopped(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = None
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.stop()
self.assertEqual(None, server._hub_app)
self.assertEqual([], server._notifications)
self.assertEqual(0, len(server._manager.method_calls))
self.assertEqual(0, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_stop_simple(self, mock_init):
app = mock.Mock()
server = notifications.NotificationServer()
server._hub_app = app
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.stop()
self.assertEqual(None, server._hub_app)
self.assertEqual([], server._notifications)
server._manager.stop.assert_called_once_with()
self.assertEqual(1, len(server._manager.method_calls))
app.disconnect.assert_called_once_with()
self.assertEqual(1, len(app.method_calls))
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_stop_connecting(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = True
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.stop()
self.assertEqual(None, server._hub_app)
self.assertEqual([], server._notifications)
server._manager.stop.assert_called_once_with()
self.assertEqual(1, len(server._manager.method_calls))
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_stop_sentinel(self, mock_init):
app = mock.Mock()
server = notifications.NotificationServer()
server._hub_app = app
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.stop('signal', 'arguments')
self.assertEqual(None, server._hub_app)
self.assertEqual([None], server._notifications)
server._manager.stop.assert_called_once_with()
self.assertEqual(1, len(server._manager.method_calls))
app.disconnect.assert_called_once_with()
self.assertEqual(1, len(app.method_calls))
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_shutdown_stopped(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = None
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.shutdown()
self.assertEqual(None, server._hub_app)
self.assertEqual([], server._notifications)
self.assertEqual(0, len(server._manager.method_calls))
self.assertEqual(0, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_shutdown_running(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = 'running'
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.shutdown()
self.assertEqual(None, server._hub_app)
self.assertEqual([None], server._notifications)
server._manager.shutdown.assert_called_once_with()
self.assertEqual(1, len(server._manager.method_calls))
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_notify(self, mock_init):
server = notifications.NotificationServer()
server._notifications = []
server._notify_event = mock.Mock()
server.notify('notification')
self.assertEqual(['notification'], server._notifications)
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_app_name(self, mock_init):
server = notifications.NotificationServer()
server._app_name = 'app_name'
self.assertEqual('app_name', server.app_name)
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_app_id(self, mock_init):
server = notifications.NotificationServer()
server._app_id = 'app_id'
self.assertEqual('app_id', server.app_id)
class NotificationApplicationTest(unittest.TestCase):
@mock.patch('tendril.Application.__init__', return_value=None)
@mock.patch('tendril.COBSFramer', return_value='framer')
@mock.patch.object(protocol, 'Message', return_value=mock.Mock(**{
'to_frame.return_value': 'some frame',
}))
@mock.patch.object(notifications.NotificationApplication, 'send_frame')
def test_init(self, mock_send_frame, mock_Message,
mock_COBSFramer, mock_init):
parent = mock.Mock()
result = notifications.NotificationApplication(parent, 'server',
'app_name', 'app_id')
self.assertEqual('server', result.server)
self.assertEqual('app_name', result.app_name)
self.assertEqual('app_id', result.app_id)
self.assertEqual('framer', parent.framers)
mock_init.assert_called_once_with(parent)
mock_COBSFramer.assert_called_once_with(True)
mock_Message.assert_called_once_with('subscribe')
mock_Message.return_value.to_frame.assert_called_once_with()
mock_send_frame.assert_called_once_with('some frame')
@mock.patch.object(protocol.Message, 'from_frame',
side_effect=ValueError('failed to decode'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_decodeerror(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
mock_notify.assert_called_once_with(
'Failed To Parse Server Message',
'Unable to parse a message from the server: failed to decode',
notifications.ERROR)
mock_disconnect.assert_called_once_with()
self.assertFalse(mock_closed.called)
app.server.stop.assert_called_once_with()
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='unknown'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_unknownmsg(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
mock_notify.assert_called_once_with(
'Unknown Server Message',
'An unrecognized server message of type "unknown" was received.',
notifications.ERROR)
self.assertFalse(mock_disconnect.called)
self.assertFalse(mock_closed.called)
self.assertFalse(app.server.stop.called)
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='error', reason='some error'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_error(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
mock_notify.assert_called_once_with(
'Communication Error',
'An error occurred communicating with the HeyU hub: some error',
notifications.ERROR)
mock_disconnect.assert_called_once_with()
self.assertFalse(mock_closed.called)
app.server.stop.assert_called_once_with()
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='goodbye'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_goodbye(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
self.assertFalse(mock_notify.called)
mock_disconnect.assert_called_once_with()
mock_closed.assert_called_once_with(None)
self.assertFalse(app.server.stop.called)
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='subscribed'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_subscribed(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
mock_notify.assert_called_once_with(
'Connection Established',
'The connection to the HeyU hub has been established.',
notifications.CONNECTED)
self.assertFalse(mock_disconnect.called)
self.assertFalse(mock_closed.called)
self.assertFalse(app.server.stop.called)
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='notify'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_notify(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
self.assertFalse(mock_notify.called)
self.assertFalse(mock_disconnect.called)
self.assertFalse(mock_closed.called)
self.assertFalse(app.server.stop.called)
app.server.notify.assert_called_once_with(mock_from_frame.return_value)
@mock.patch.object(protocol, 'Message', return_value=mock.Mock(**{
'to_frame.return_value': 'frame',
}))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'send_frame')
@mock.patch.object(notifications.NotificationApplication, 'close')
def test_disconnect_success(self, mock_close, mock_send_frame, mock_init,
mock_Message):
app = notifications.NotificationApplication()
app.disconnect()
mock_Message.assert_called_once_with('goodbye')
mock_Message.return_value.to_frame.assert_called_once_with()
mock_send_frame.assert_called_once_with('frame')
mock_close.assert_called_once_with()
@mock.patch.object(protocol, 'Message', return_value=mock.Mock(**{
'to_frame.return_value': 'frame',
}))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'send_frame',
side_effect=TestException('test'))
@mock.patch.object(notifications.NotificationApplication, 'close')
def test_disconnect_failure(self, mock_close, mock_send_frame, mock_init,
mock_Message):
app = notifications.NotificationApplication()
app.disconnect()
mock_Message.assert_called_once_with('goodbye')
mock_Message.return_value.to_frame.assert_called_once_with()
mock_send_frame.assert_called_once_with('frame')
mock_close.assert_called_once_with()
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
def test_closed(self, mock_notify, mock_init):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.closed(None)
mock_notify.assert_called_once_with(
'Connection Closed',
'The connection to the HeyU hub has been closed.',
notifications.DISCONNECTED)
app.server.stop.assert_called_once_with()
@mock.patch.object(protocol, 'Message', return_value='notification')
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
def test_notify(self, mock_init, mock_Message):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.app_name = 'app_name'
app.app_id = 'app_id'
app.notify('summary', 'body', 'category')
mock_Message.assert_called_once_with(
'notify', summary='summary', body='body', category='category',
app_name='app_name', id='app_id')
app.server.notify.assert_called_once_with('notification')
class StdoutNotifierTest(unittest.TestCase):
@mock.patch.object(sys, 'stdout', io.BytesIO())
@mock.patch.object(notifications, 'NotificationServer', return_value=[
mock.Mock(id='notify-1', urgency=protocol.URGENCY_LOW,
app_name='application-1', summary='summary-1', body='body-1',
category='cat-1'),
mock.Mock(id='notify-2', urgency=protocol.URGENCY_NORMAL,
app_name='application-2', summary='summary-2', body='body-2',
category=None),
mock.Mock(id='notify-3', urgency=protocol.URGENCY_CRITICAL,
app_name='application-3', summary='summary-3', body='body-3',
category='cat-3'),
])
def test_output(self, mock_NotificationServer):
notifications.stdout_notifier('hub')
mock_NotificationServer.assert_called_once_with('hub', None, True)
self.assertEqual(
'ID notify-1, urgency low\n'
'Application: application-1\n'
' Summary: summary-1\n'
' Body: body-1\n'
' Category: cat-1\n'
'\n'
'ID notify-2, urgency normal\n'
'Application: application-2\n'
' Summary: summary-2\n'
' Body: body-2\n'
' Category: None\n'
'\n'
'ID notify-3, urgency critical\n'
'Application: application-3\n'
' Summary: summary-3\n'
' Body: body-3\n'
' Category: cat-3\n'
'\n'
'Notifications received: 3\n',
sys.stdout.getvalue())
class MyBytesIO(io.BytesIO):
"""
Override close() to preserve the emitted contents.
"""
def close(self):
self.contents = self.getvalue()
super(MyBytesIO, self).close()
class FileNotifierTest(unittest.TestCase):
@mock.patch('__builtin__.open', return_value=MyBytesIO())
@mock.patch.object(notifications, 'NotificationServer', return_value=[
mock.Mock(id='notify-1', urgency=protocol.URGENCY_LOW,
app_name='application-1', summary='summary-1', body='body-1',
category='cat-1'),
mock.Mock(id='notify-2', urgency=protocol.URGENCY_NORMAL,
app_name='application-2', summary='summary-2', body='body-2',
category=None),
mock.Mock(id='notify-3', urgency=protocol.URGENCY_CRITICAL,
app_name='application-3', summary='summary-3', body='body-3',
category='cat-3'),
])
def test_output(self, mock_NotificationServer, mock_open):
notifications.file_notifier('file', 'hub')
mock_open.assert_called_once_with('file', 'a')
mock_NotificationServer.assert_called_once_with('hub', None, True)
self.assertEqual(
'ID notify-1, urgency low\n'
'Application: application-1\n'
' Summary: summary-1\n'
' Body: body-1\n'
' Category: cat-1\n'
'ID notify-2, urgency normal\n'
'Application: application-2\n'
' Summary: summary-2\n'
' Body: body-2\n'
' Category: None\n'
'ID notify-3, urgency critical\n'
'Application: application-3\n'
' Summary: summary-3\n'
' Body: body-3\n'
' Category: cat-3\n',
mock_open.return_value.contents)
class ValidateSubsTest(unittest.TestCase):
def test_no_substitutions(self):
exemplar = 'this is a test'
result = notifications._validate_subs(exemplar)
self.assertEqual(exemplar, result)
def test_known_substitutions(self):
exemplar = '{id} {application} {summary} {body} {category} {urgency}'
result = notifications._validate_subs(exemplar)
self.assertEqual(exemplar, result)
def test_substitutions_passthrough(self):
exemplar = '{id} {id!r} {id: ^23s}'
result = notifications._validate_subs(exemplar)
self.assertEqual(exemplar, result)
def test_bad_char(self):
self.assertRaises(ValueError, notifications._validate_subs,
'foo { bar')
def test_bad_field(self):
self.assertRaises(ValueError, notifications._validate_subs,
'{unknown}')
class ScriptNotifierTest(unittest.TestCase):
@mock.patch.object(sys, 'stderr', io.BytesIO())
@mock.patch('subprocess.call')
@mock.patch.object(notifications, 'NotificationServer', return_value=[
mock.Mock(id='notify-1', urgency=protocol.URGENCY_LOW,
app_name='application-1', summary='summary-1', body='body-1',
category='cat-1'),
mock.Mock(id='notify-2', urgency=protocol.URGENCY_NORMAL,
app_name='application-2', summary='summary-2', body='body-2',
category=None),
mock.Mock(id='notify-3', urgency=protocol.URGENCY_CRITICAL,
app_name='application-3', summary='summary-3', body='body-3',
category='cat-3'),
])
def test_basic(self, mock_NotificationServer, mock_call):
notifications.script_notifier([
'command', 'id={id}', 'application={application}',
'summary={summary}', 'body={body}', 'category={category}',
'urgency={urgency}',
], 'hub')
mock_NotificationServer.assert_called_once_with('hub', None, True)
self.assertEqual('', sys.stderr.getvalue())
mock_call.assert_has_calls([
mock.call([
'command', 'id=notify-1', 'application=application-1',
'summary=summary-1', 'body=body-1', 'category=cat-1',
'urgency=low',
]),
mock.call([
'command', 'id=notify-2', 'application=application-2',
'summary=summary-2', 'body=body-2', 'category=',
'urgency=normal',
]),
mock.call([
'command', 'id=notify-3', 'application=application-3',
'summary=summary-3', 'body=body-3', 'category=cat-3',
'urgency=critical',
]),
])
@mock.patch.object(sys, 'stderr', io.BytesIO())
@mock.patch('subprocess.call', side_effect=TestException('bad command'))
@mock.patch.object(notifications, 'NotificationServer', return_value=[
mock.Mock(id='notify-1', urgency=protocol.URGENCY_LOW,
app_name='application-1', summary='summary-1', body='body-1',
category='cat-1'),
mock.Mock(id='notify-2', urgency=protocol.URGENCY_NORMAL,
app_name='application-2', summary='summary-2', body='body-2',
category=None),
mock.Mock(id='notify-3', urgency=protocol.URGENCY_CRITICAL,
app_name='application-3', summary='summary-3', body='body-3',
category='cat-3'),
])
def test_error(self, mock_NotificationServer, mock_call):
notifications.script_notifier([
'command', 'id={id}', 'application={application}',
'summary={summary}', 'body={body}', 'category={category}',
'urgency={urgency}',
], 'hub')
mock_NotificationServer.assert_called_once_with('hub', None, True)
self.assertEqual('Failed to call command: bad command\n'
'Failed to call command: bad command\n'
'Failed to call command: bad command\n',
sys.stderr.getvalue())
mock_call.assert_has_calls([
mock.call([
'command', 'id=notify-1', 'application=application-1',
'summary=summary-1', 'body=body-1', 'category=cat-1',
'urgency=low',
]),
mock.call([
'command', 'id=notify-2', 'application=application-2',
'summary=summary-2', 'body=body-2', 'category=',
'urgency=normal',
]),
mock.call([
'command', 'id=notify-3', 'application=application-3',
'summary=summary-3', 'body=body-3', 'category=cat-3',
'urgency=critical',
]),
])
| 42.3225 | 79 | 0.647528 |
import io
import signal
import sys
import unittest
import mock
from heyu import notifications
from heyu import protocol
from heyu import util
class TestException(Exception):
pass
class NotificationServerTest(unittest.TestCase):
def _signal_test(self, notifier_server, mock_signal):
signals = [
mock.call(signal.SIGINT, notifier_server.stop),
mock.call(signal.SIGTERM, notifier_server.stop),
]
if hasattr(signal, 'SIGUSR1'):
signals.append(mock.call(signal.SIGUSR1, notifier_server.shutdown))
mock_signal.assert_has_calls(signals)
self.assertEqual(len(signals), mock_signal.call_count)
@mock.patch.object(sys, 'argv', ['/bin/notifier.py'])
@mock.patch('tendril.get_manager', return_value='manager')
@mock.patch('gevent.signal')
@mock.patch('uuid.uuid4', return_value='some-uuid')
@mock.patch('gevent.event.Event', return_value='event')
@mock.patch.object(util, 'cert_wrapper', return_value='wrapper')
@mock.patch.object(util, 'outgoing_endpoint', return_value='endpoint')
def test_init_basic(self, mock_outgoing_endpoint, mock_cert_wrapper,
mock_Event, mock_uuid4, mock_signal, mock_get_manager):
result = notifications.NotificationServer('hub')
self.assertEqual('hub', result._hub)
self.assertEqual('manager', result._manager)
self.assertEqual('wrapper', result._wrapper)
self.assertEqual('notifier.py', result._app_name)
self.assertEqual('some-uuid', result._app_id)
self.assertEqual(None, result._hub_app)
self.assertEqual([], result._notifications)
self.assertEqual('event', result._notify_event)
mock_outgoing_endpoint.assert_called_once_with('hub')
mock_get_manager.assert_called_once_with('tcp', 'endpoint')
mock_cert_wrapper.assert_called_once_with(
None, 'notifier', secure=True)
self._signal_test(result, mock_signal)
@mock.patch.object(sys, 'argv', ['/bin/notifier.py'])
@mock.patch('tendril.get_manager', return_value='manager')
@mock.patch('gevent.signal')
@mock.patch('uuid.uuid4', return_value='some-uuid')
@mock.patch('gevent.event.Event', return_value='event')
@mock.patch.object(util, 'cert_wrapper', return_value='wrapper')
@mock.patch.object(util, 'outgoing_endpoint', return_value='endpoint')
def test_init_alt(self, mock_outgoing_endpoint, mock_cert_wrapper,
mock_Event, mock_uuid4, mock_signal, mock_get_manager):
result = notifications.NotificationServer('hub', 'cert_conf', False,
'app', 'app-uuid')
self.assertEqual('hub', result._hub)
self.assertEqual('manager', result._manager)
self.assertEqual('wrapper', result._wrapper)
self.assertEqual('app', result._app_name)
self.assertEqual('app-uuid', result._app_id)
self.assertEqual(None, result._hub_app)
self.assertEqual([], result._notifications)
self.assertEqual('event', result._notify_event)
mock_outgoing_endpoint.assert_called_once_with('hub')
mock_get_manager.assert_called_once_with('tcp', 'endpoint')
mock_cert_wrapper.assert_called_once_with(
'cert_conf', 'notifier', secure=False)
self._signal_test(result, mock_signal)
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationServer, 'start')
def test_iter_running(self, mock_start, mock_init):
server = notifications.NotificationServer()
server._hub_app = 'application'
result = iter(server)
self.assertEqual(server, result)
self.assertFalse(mock_start.called)
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationServer, 'start')
def test_iter_nonrunning(self, mock_start, mock_init):
server = notifications.NotificationServer()
server._hub_app = None
result = iter(server)
self.assertEqual(server, result)
mock_start.assert_called_once_with()
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(sys, 'exit', side_effect=TestException())
def test_next_notification(self, mock_exit, mock_init):
server = notifications.NotificationServer()
server._notifications = ['notification']
server._notify_event = mock.Mock()
server._hub_app = None
result = server.next()
self.assertEqual('notification', result)
self.assertEqual(0, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(sys, 'exit', side_effect=TestException())
def test_next_exit(self, mock_exit, mock_init):
server = notifications.NotificationServer()
server._notifications = [None]
server._notify_event = mock.Mock()
server._hub_app = None
self.assertRaises(TestException, server.next)
mock_exit.assert_called_once_with()
self.assertEqual(0, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(sys, 'exit', side_effect=TestException())
def test_next_empty_stop(self, mock_exit, mock_init):
server = notifications.NotificationServer()
server._notifications = []
server._notify_event = mock.Mock()
server._hub_app = None
self.assertRaises(StopIteration, server.next)
server._notify_event.assert_has_calls([
mock.call.clear(),
])
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(sys, 'exit', side_effect=TestException())
def test_next_empty_loop(self, mock_exit, mock_init):
server = notifications.NotificationServer()
server._notifications = []
server._notify_event = mock.Mock()
server._hub_app = 'app'
def fake_wait():
server._notifications.append('waited')
server._notify_event.wait.side_effect = fake_wait
result = server.next()
self.assertEqual('waited', result)
server._notify_event.assert_has_calls([
mock.call.clear(),
mock.call.wait(),
])
self.assertEqual(2, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
@mock.patch.object(notifications, 'NotificationApplication',
return_value='app')
def test_acceptor(self, mock_NotificationApplication, mock_init):
server = notifications.NotificationServer()
server._hub_app = True
server._app_name = 'app_name'
server._app_id = 'app_id'
result = server._acceptor('tendril')
self.assertEqual('app', result)
mock_NotificationApplication.assert_called_once_with(
'tendril', server, 'app_name', 'app_id')
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_start_running(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = 'running'
server._manager = mock.Mock()
server._hub = 'hub'
server._wrapper = 'wrapper'
self.assertRaises(ValueError, server.start)
self.assertEqual('running', server._hub_app)
self.assertEqual(0, len(server._manager.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_start_stopped(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = None
server._manager = mock.Mock()
server._hub = 'hub'
server._wrapper = 'wrapper'
server.start()
self.assertEqual(True, server._hub_app)
server._manager.assert_has_calls([
mock.call.start(),
mock.call.connect('hub', server._acceptor, 'wrapper'),
])
self.assertEqual(2, len(server._manager.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_stop_stopped(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = None
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.stop()
self.assertEqual(None, server._hub_app)
self.assertEqual([], server._notifications)
self.assertEqual(0, len(server._manager.method_calls))
self.assertEqual(0, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_stop_simple(self, mock_init):
app = mock.Mock()
server = notifications.NotificationServer()
server._hub_app = app
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.stop()
self.assertEqual(None, server._hub_app)
self.assertEqual([], server._notifications)
server._manager.stop.assert_called_once_with()
self.assertEqual(1, len(server._manager.method_calls))
app.disconnect.assert_called_once_with()
self.assertEqual(1, len(app.method_calls))
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_stop_connecting(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = True
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.stop()
self.assertEqual(None, server._hub_app)
self.assertEqual([], server._notifications)
server._manager.stop.assert_called_once_with()
self.assertEqual(1, len(server._manager.method_calls))
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_stop_sentinel(self, mock_init):
app = mock.Mock()
server = notifications.NotificationServer()
server._hub_app = app
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.stop('signal', 'arguments')
self.assertEqual(None, server._hub_app)
self.assertEqual([None], server._notifications)
server._manager.stop.assert_called_once_with()
self.assertEqual(1, len(server._manager.method_calls))
app.disconnect.assert_called_once_with()
self.assertEqual(1, len(app.method_calls))
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_shutdown_stopped(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = None
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.shutdown()
self.assertEqual(None, server._hub_app)
self.assertEqual([], server._notifications)
self.assertEqual(0, len(server._manager.method_calls))
self.assertEqual(0, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_shutdown_running(self, mock_init):
server = notifications.NotificationServer()
server._hub_app = 'running'
server._manager = mock.Mock()
server._notifications = []
server._notify_event = mock.Mock()
server.shutdown()
self.assertEqual(None, server._hub_app)
self.assertEqual([None], server._notifications)
server._manager.shutdown.assert_called_once_with()
self.assertEqual(1, len(server._manager.method_calls))
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_notify(self, mock_init):
server = notifications.NotificationServer()
server._notifications = []
server._notify_event = mock.Mock()
server.notify('notification')
self.assertEqual(['notification'], server._notifications)
server._notify_event.set.assert_called_once_with()
self.assertEqual(1, len(server._notify_event.method_calls))
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_app_name(self, mock_init):
server = notifications.NotificationServer()
server._app_name = 'app_name'
self.assertEqual('app_name', server.app_name)
@mock.patch.object(notifications.NotificationServer, '__init__',
return_value=None)
def test_app_id(self, mock_init):
server = notifications.NotificationServer()
server._app_id = 'app_id'
self.assertEqual('app_id', server.app_id)
class NotificationApplicationTest(unittest.TestCase):
@mock.patch('tendril.Application.__init__', return_value=None)
@mock.patch('tendril.COBSFramer', return_value='framer')
@mock.patch.object(protocol, 'Message', return_value=mock.Mock(**{
'to_frame.return_value': 'some frame',
}))
@mock.patch.object(notifications.NotificationApplication, 'send_frame')
def test_init(self, mock_send_frame, mock_Message,
mock_COBSFramer, mock_init):
parent = mock.Mock()
result = notifications.NotificationApplication(parent, 'server',
'app_name', 'app_id')
self.assertEqual('server', result.server)
self.assertEqual('app_name', result.app_name)
self.assertEqual('app_id', result.app_id)
self.assertEqual('framer', parent.framers)
mock_init.assert_called_once_with(parent)
mock_COBSFramer.assert_called_once_with(True)
mock_Message.assert_called_once_with('subscribe')
mock_Message.return_value.to_frame.assert_called_once_with()
mock_send_frame.assert_called_once_with('some frame')
@mock.patch.object(protocol.Message, 'from_frame',
side_effect=ValueError('failed to decode'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_decodeerror(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
mock_notify.assert_called_once_with(
'Failed To Parse Server Message',
'Unable to parse a message from the server: failed to decode',
notifications.ERROR)
mock_disconnect.assert_called_once_with()
self.assertFalse(mock_closed.called)
app.server.stop.assert_called_once_with()
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='unknown'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_unknownmsg(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
mock_notify.assert_called_once_with(
'Unknown Server Message',
'An unrecognized server message of type "unknown" was received.',
notifications.ERROR)
self.assertFalse(mock_disconnect.called)
self.assertFalse(mock_closed.called)
self.assertFalse(app.server.stop.called)
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='error', reason='some error'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_error(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
mock_notify.assert_called_once_with(
'Communication Error',
'An error occurred communicating with the HeyU hub: some error',
notifications.ERROR)
mock_disconnect.assert_called_once_with()
self.assertFalse(mock_closed.called)
app.server.stop.assert_called_once_with()
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='goodbye'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_goodbye(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
self.assertFalse(mock_notify.called)
mock_disconnect.assert_called_once_with()
mock_closed.assert_called_once_with(None)
self.assertFalse(app.server.stop.called)
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='subscribed'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_subscribed(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
mock_notify.assert_called_once_with(
'Connection Established',
'The connection to the HeyU hub has been established.',
notifications.CONNECTED)
self.assertFalse(mock_disconnect.called)
self.assertFalse(mock_closed.called)
self.assertFalse(app.server.stop.called)
self.assertFalse(app.server.notify.called)
@mock.patch.object(protocol.Message, 'from_frame', return_value=mock.Mock(
msg_type='notify'))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
@mock.patch.object(notifications.NotificationApplication, 'disconnect')
@mock.patch.object(notifications.NotificationApplication, 'closed')
def test_recv_frame_notify(self, mock_closed, mock_disconnect,
mock_notify, mock_init, mock_from_frame):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.recv_frame('test')
mock_from_frame.assert_called_once_with('test')
self.assertFalse(mock_notify.called)
self.assertFalse(mock_disconnect.called)
self.assertFalse(mock_closed.called)
self.assertFalse(app.server.stop.called)
app.server.notify.assert_called_once_with(mock_from_frame.return_value)
@mock.patch.object(protocol, 'Message', return_value=mock.Mock(**{
'to_frame.return_value': 'frame',
}))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'send_frame')
@mock.patch.object(notifications.NotificationApplication, 'close')
def test_disconnect_success(self, mock_close, mock_send_frame, mock_init,
mock_Message):
app = notifications.NotificationApplication()
app.disconnect()
mock_Message.assert_called_once_with('goodbye')
mock_Message.return_value.to_frame.assert_called_once_with()
mock_send_frame.assert_called_once_with('frame')
mock_close.assert_called_once_with()
@mock.patch.object(protocol, 'Message', return_value=mock.Mock(**{
'to_frame.return_value': 'frame',
}))
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'send_frame',
side_effect=TestException('test'))
@mock.patch.object(notifications.NotificationApplication, 'close')
def test_disconnect_failure(self, mock_close, mock_send_frame, mock_init,
mock_Message):
app = notifications.NotificationApplication()
app.disconnect()
mock_Message.assert_called_once_with('goodbye')
mock_Message.return_value.to_frame.assert_called_once_with()
mock_send_frame.assert_called_once_with('frame')
mock_close.assert_called_once_with()
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
@mock.patch.object(notifications.NotificationApplication, 'notify')
def test_closed(self, mock_notify, mock_init):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.closed(None)
mock_notify.assert_called_once_with(
'Connection Closed',
'The connection to the HeyU hub has been closed.',
notifications.DISCONNECTED)
app.server.stop.assert_called_once_with()
@mock.patch.object(protocol, 'Message', return_value='notification')
@mock.patch.object(notifications.NotificationApplication, '__init__',
return_value=None)
def test_notify(self, mock_init, mock_Message):
app = notifications.NotificationApplication()
app.server = mock.Mock()
app.app_name = 'app_name'
app.app_id = 'app_id'
app.notify('summary', 'body', 'category')
mock_Message.assert_called_once_with(
'notify', summary='summary', body='body', category='category',
app_name='app_name', id='app_id')
app.server.notify.assert_called_once_with('notification')
class StdoutNotifierTest(unittest.TestCase):
@mock.patch.object(sys, 'stdout', io.BytesIO())
@mock.patch.object(notifications, 'NotificationServer', return_value=[
mock.Mock(id='notify-1', urgency=protocol.URGENCY_LOW,
app_name='application-1', summary='summary-1', body='body-1',
category='cat-1'),
mock.Mock(id='notify-2', urgency=protocol.URGENCY_NORMAL,
app_name='application-2', summary='summary-2', body='body-2',
category=None),
mock.Mock(id='notify-3', urgency=protocol.URGENCY_CRITICAL,
app_name='application-3', summary='summary-3', body='body-3',
category='cat-3'),
])
def test_output(self, mock_NotificationServer):
notifications.stdout_notifier('hub')
mock_NotificationServer.assert_called_once_with('hub', None, True)
self.assertEqual(
'ID notify-1, urgency low\n'
'Application: application-1\n'
' Summary: summary-1\n'
' Body: body-1\n'
' Category: cat-1\n'
'\n'
'ID notify-2, urgency normal\n'
'Application: application-2\n'
' Summary: summary-2\n'
' Body: body-2\n'
' Category: None\n'
'\n'
'ID notify-3, urgency critical\n'
'Application: application-3\n'
' Summary: summary-3\n'
' Body: body-3\n'
' Category: cat-3\n'
'\n'
'Notifications received: 3\n',
sys.stdout.getvalue())
class MyBytesIO(io.BytesIO):
def close(self):
self.contents = self.getvalue()
super(MyBytesIO, self).close()
class FileNotifierTest(unittest.TestCase):
@mock.patch('__builtin__.open', return_value=MyBytesIO())
@mock.patch.object(notifications, 'NotificationServer', return_value=[
mock.Mock(id='notify-1', urgency=protocol.URGENCY_LOW,
app_name='application-1', summary='summary-1', body='body-1',
category='cat-1'),
mock.Mock(id='notify-2', urgency=protocol.URGENCY_NORMAL,
app_name='application-2', summary='summary-2', body='body-2',
category=None),
mock.Mock(id='notify-3', urgency=protocol.URGENCY_CRITICAL,
app_name='application-3', summary='summary-3', body='body-3',
category='cat-3'),
])
def test_output(self, mock_NotificationServer, mock_open):
notifications.file_notifier('file', 'hub')
mock_open.assert_called_once_with('file', 'a')
mock_NotificationServer.assert_called_once_with('hub', None, True)
self.assertEqual(
'ID notify-1, urgency low\n'
'Application: application-1\n'
' Summary: summary-1\n'
' Body: body-1\n'
' Category: cat-1\n'
'ID notify-2, urgency normal\n'
'Application: application-2\n'
' Summary: summary-2\n'
' Body: body-2\n'
' Category: None\n'
'ID notify-3, urgency critical\n'
'Application: application-3\n'
' Summary: summary-3\n'
' Body: body-3\n'
' Category: cat-3\n',
mock_open.return_value.contents)
class ValidateSubsTest(unittest.TestCase):
def test_no_substitutions(self):
exemplar = 'this is a test'
result = notifications._validate_subs(exemplar)
self.assertEqual(exemplar, result)
def test_known_substitutions(self):
exemplar = '{id} {application} {summary} {body} {category} {urgency}'
result = notifications._validate_subs(exemplar)
self.assertEqual(exemplar, result)
def test_substitutions_passthrough(self):
exemplar = '{id} {id!r} {id: ^23s}'
result = notifications._validate_subs(exemplar)
self.assertEqual(exemplar, result)
def test_bad_char(self):
self.assertRaises(ValueError, notifications._validate_subs,
'foo { bar')
def test_bad_field(self):
self.assertRaises(ValueError, notifications._validate_subs,
'{unknown}')
class ScriptNotifierTest(unittest.TestCase):
@mock.patch.object(sys, 'stderr', io.BytesIO())
@mock.patch('subprocess.call')
@mock.patch.object(notifications, 'NotificationServer', return_value=[
mock.Mock(id='notify-1', urgency=protocol.URGENCY_LOW,
app_name='application-1', summary='summary-1', body='body-1',
category='cat-1'),
mock.Mock(id='notify-2', urgency=protocol.URGENCY_NORMAL,
app_name='application-2', summary='summary-2', body='body-2',
category=None),
mock.Mock(id='notify-3', urgency=protocol.URGENCY_CRITICAL,
app_name='application-3', summary='summary-3', body='body-3',
category='cat-3'),
])
def test_basic(self, mock_NotificationServer, mock_call):
notifications.script_notifier([
'command', 'id={id}', 'application={application}',
'summary={summary}', 'body={body}', 'category={category}',
'urgency={urgency}',
], 'hub')
mock_NotificationServer.assert_called_once_with('hub', None, True)
self.assertEqual('', sys.stderr.getvalue())
mock_call.assert_has_calls([
mock.call([
'command', 'id=notify-1', 'application=application-1',
'summary=summary-1', 'body=body-1', 'category=cat-1',
'urgency=low',
]),
mock.call([
'command', 'id=notify-2', 'application=application-2',
'summary=summary-2', 'body=body-2', 'category=',
'urgency=normal',
]),
mock.call([
'command', 'id=notify-3', 'application=application-3',
'summary=summary-3', 'body=body-3', 'category=cat-3',
'urgency=critical',
]),
])
@mock.patch.object(sys, 'stderr', io.BytesIO())
@mock.patch('subprocess.call', side_effect=TestException('bad command'))
@mock.patch.object(notifications, 'NotificationServer', return_value=[
mock.Mock(id='notify-1', urgency=protocol.URGENCY_LOW,
app_name='application-1', summary='summary-1', body='body-1',
category='cat-1'),
mock.Mock(id='notify-2', urgency=protocol.URGENCY_NORMAL,
app_name='application-2', summary='summary-2', body='body-2',
category=None),
mock.Mock(id='notify-3', urgency=protocol.URGENCY_CRITICAL,
app_name='application-3', summary='summary-3', body='body-3',
category='cat-3'),
])
def test_error(self, mock_NotificationServer, mock_call):
notifications.script_notifier([
'command', 'id={id}', 'application={application}',
'summary={summary}', 'body={body}', 'category={category}',
'urgency={urgency}',
], 'hub')
mock_NotificationServer.assert_called_once_with('hub', None, True)
self.assertEqual('Failed to call command: bad command\n'
'Failed to call command: bad command\n'
'Failed to call command: bad command\n',
sys.stderr.getvalue())
mock_call.assert_has_calls([
mock.call([
'command', 'id=notify-1', 'application=application-1',
'summary=summary-1', 'body=body-1', 'category=cat-1',
'urgency=low',
]),
mock.call([
'command', 'id=notify-2', 'application=application-2',
'summary=summary-2', 'body=body-2', 'category=',
'urgency=normal',
]),
mock.call([
'command', 'id=notify-3', 'application=application-3',
'summary=summary-3', 'body=body-3', 'category=cat-3',
'urgency=critical',
]),
])
| true | true |
f721f976f94d622fc2a6a2309a5483d8547b33ec | 4,780 | py | Python | RecoLocalCalo/HcalRecAlgos/test/test_RecHitReflagger_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoLocalCalo/HcalRecAlgos/test/test_RecHitReflagger_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoLocalCalo/HcalRecAlgos/test/test_RecHitReflagger_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
maxevents=10
isMC=False
#isMC=True
process = cms.Process('TEST')
#process.load('JetMETAnalysis.PromptAnalysis.ntuple_cff')
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration/StandardSequences/GeometryExtended_cff')
process.load('Configuration/StandardSequences/Reconstruction_cff')
process.load("Configuration/StandardSequences/FrontierConditions_GlobalTag_cff")
# GLOBAL TAGS: REPLACE WITH WHATEVER IS APPROPRIATE FOR YOUR WORK!
#MC (Summer09-V16D_900GeV-v1)
if (isMC):
process.GlobalTag.globaltag ='START3X_V16D::All'
#DATA (Feb9ReReco)
else:
process.GlobalTag.globaltag ='GR09_R_34X_V5::All'
#process.TFileService = cms.Service("TFileService",
#fileName = cms.string( THISROOTFILE ),
#closeFileFast = cms.untracked.bool(True)
#)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(maxevents) )
process.source = cms.Source (
"PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/BeamCommissioning09/ZeroBias/RECO/Feb9ReReco_v2/0027/F08E9178-7016-DF11-82B4-00163E0101D2.root',
),
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
secondaryFileNames = cms.untracked.vstring()
)
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1
process.MessageLogger.cerr.default.limit = 100
# summary
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.load("hcalrechitreflagger_cfi")
#process.hcalrecoReflagged = cms.EDProducer("RecHitReflagger")
#process.hcalrechitReflagger.debug=4
process.towerMakerPET = process.towerMaker.clone()
process.towerMakerPET.hfInput = cms.InputTag("hcalrechitReflagger")
process.metPET = process.met.clone()
process.metPET.src = cms.InputTag("towerMakerPET")
process.ak5CaloJetsPET = process.ak5CaloJets.clone()
process.ak5CaloJetsPET.src = cms.InputTag("towerMakerPET")
# Output definition
process.output = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
#outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('output_file.root')
)
#New SeverityLevelComputer that forces RecHits with UserDefinedBit0 set to be excluded from new rechit collection
#process.hcalRecAlgos.SeverityLevels.append(cms.PSet(Level = cms.int32(2),
# RecHitFlags = cms.vstring('HFPET','HFS9S1'),
# ChannelStatus = cms.vstring('')))
print("STARTING SL:")
for i in process.hcalRecAlgos.SeverityLevels:
print(i)
severitylevels=[] # Store all severity levels
AddedFlag=False
NewSevLevel=10
for i in range(len(process.hcalRecAlgos.SeverityLevels)): # loop over each severity level
severitylevels.append(process.hcalRecAlgos.SeverityLevels[i].Level.value()) # store severity value
flagvec=process.hcalRecAlgos.SeverityLevels[i].RecHitFlags.value() # Get vector of rechit flags for this severity level
flaglevel=process.hcalRecAlgos.SeverityLevels[i].Level.value()
if "UserDefinedBit0" in flagvec and flaglevel!=10: # remove HFLongShort from its default position
flagvec.remove("UserDefinedBit0")
process.hcalRecAlgos.SeverityLevels[i].RecHitFlags=flagvec
print("Removed 'UserDefinedBit0' from severity level %i"%(process.hcalRecAlgos.SeverityLevels[i].Level.value()))
if (flaglevel==NewSevLevel): # Set UserDefinedBit0 severity to 10, which will exclude such rechits from CaloTower
print("FOUND LEVEL %i!"%NewSevLevel)
if "UserDefinedBit0" not in flagvec:
if (flagvec!=['']):
flagvec.append("UserDefinedBit0")
else:
flagvec=["UserDefinedBit0"]
process.hcalRecAlgos.SeverityLevels[i].RecHitFlags=flagvec
AddedFlag=True
if (AddedFlag==False):
print("Found no Severity Level = %i; Adding it now"%NewSevLevel)
process.hcalRecAlgos.SeverityLevels.append(cms.PSet(Level=cms.int32(NewSevLevel),
RecHitFlags=cms.vstring("UserDefinedBit0"),
ChannelStatus=cms.vstring("")))
print("New Severity Levels:")
for i in process.hcalRecAlgos.SeverityLevels:
print(i)
#print process.hbhereco.firstSample, " FIRST"
process.reflagging_step = cms.Path(process.hcalrechitReflagger)
process.reconstruction_step = cms.Path(process.towerMakerPET*(process.metPET+process.ak5CaloJetsPET))
process.out_step = cms.EndPath(process.output)
process.schedule = cms.Schedule(process.reflagging_step,process.reconstruction_step,process.out_step)
| 40.854701 | 124 | 0.730544 | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
maxevents=10
isMC=False
process = cms.Process('TEST')
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration/StandardSequences/GeometryExtended_cff')
process.load('Configuration/StandardSequences/Reconstruction_cff')
process.load("Configuration/StandardSequences/FrontierConditions_GlobalTag_cff")
if (isMC):
process.GlobalTag.globaltag ='START3X_V16D::All'
else:
process.GlobalTag.globaltag ='GR09_R_34X_V5::All'
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(maxevents) )
process.source = cms.Source (
"PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/BeamCommissioning09/ZeroBias/RECO/Feb9ReReco_v2/0027/F08E9178-7016-DF11-82B4-00163E0101D2.root',
),
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
secondaryFileNames = cms.untracked.vstring()
)
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1
process.MessageLogger.cerr.default.limit = 100
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.load("hcalrechitreflagger_cfi")
process.towerMakerPET = process.towerMaker.clone()
process.towerMakerPET.hfInput = cms.InputTag("hcalrechitReflagger")
process.metPET = process.met.clone()
process.metPET.src = cms.InputTag("towerMakerPET")
process.ak5CaloJetsPET = process.ak5CaloJets.clone()
process.ak5CaloJetsPET.src = cms.InputTag("towerMakerPET")
process.output = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
fileName = cms.untracked.string('output_file.root')
)
print("STARTING SL:")
for i in process.hcalRecAlgos.SeverityLevels:
print(i)
severitylevels=[]
AddedFlag=False
NewSevLevel=10
for i in range(len(process.hcalRecAlgos.SeverityLevels)):
severitylevels.append(process.hcalRecAlgos.SeverityLevels[i].Level.value())
flagvec=process.hcalRecAlgos.SeverityLevels[i].RecHitFlags.value()
flaglevel=process.hcalRecAlgos.SeverityLevels[i].Level.value()
if "UserDefinedBit0" in flagvec and flaglevel!=10:
flagvec.remove("UserDefinedBit0")
process.hcalRecAlgos.SeverityLevels[i].RecHitFlags=flagvec
print("Removed 'UserDefinedBit0' from severity level %i"%(process.hcalRecAlgos.SeverityLevels[i].Level.value()))
if (flaglevel==NewSevLevel):
print("FOUND LEVEL %i!"%NewSevLevel)
if "UserDefinedBit0" not in flagvec:
if (flagvec!=['']):
flagvec.append("UserDefinedBit0")
else:
flagvec=["UserDefinedBit0"]
process.hcalRecAlgos.SeverityLevels[i].RecHitFlags=flagvec
AddedFlag=True
if (AddedFlag==False):
print("Found no Severity Level = %i; Adding it now"%NewSevLevel)
process.hcalRecAlgos.SeverityLevels.append(cms.PSet(Level=cms.int32(NewSevLevel),
RecHitFlags=cms.vstring("UserDefinedBit0"),
ChannelStatus=cms.vstring("")))
print("New Severity Levels:")
for i in process.hcalRecAlgos.SeverityLevels:
print(i)
process.reflagging_step = cms.Path(process.hcalrechitReflagger)
process.reconstruction_step = cms.Path(process.towerMakerPET*(process.metPET+process.ak5CaloJetsPET))
process.out_step = cms.EndPath(process.output)
process.schedule = cms.Schedule(process.reflagging_step,process.reconstruction_step,process.out_step)
| true | true |
f721f9a0a04d2003639f19899e390e586887dfde | 1,478 | py | Python | tests/test_ctc1.py | madhurkashyap/boundary_detection | f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3 | [
"MIT"
] | null | null | null | tests/test_ctc1.py | madhurkashyap/boundary_detection | f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3 | [
"MIT"
] | null | null | null | tests/test_ctc1.py | madhurkashyap/boundary_detection | f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 15:15:55 2018
@author: Madhur Kashyap 2016EEZ8350
"""
import os
import sys
import logging
from keras.optimizers import Adam
prog = os.path.basename(__file__)
codedir = os.path.join(os.path.dirname(__file__),"..","code")
sys.path.append(codedir)
from Utils import initlog
from SpeechCorpus import Timit
from AcousticModels import bidi_ctc_lstm2
from TrainUtils import train_model
from AcousticDataGenerator import AcousticDataGenerator
corpus = Timit(root="C:\\Users\\nxa17016\\ML\\pyml\\RNN\\assignment3\\dataset")
corpus.split_validation();
corpus.report_statistics(folder='report/images',reptext=False);
adg = AcousticDataGenerator(corpus=corpus,ctc_mode=True,mbatch_size=32,
output='sequence',mode='grapheme',
mfcc_win=0.0125, mfcc_step=0.005);
adg.fit_train(n_samples=100);
model = bidi_ctc_lstm2(
input_dim=adg.feature_dim,
units1=100,
units2=100,
output_dim=adg.n_classes,
gpu=False,
batchnorm=True,
after_dropout=0.0,
);
train_model(model,adg.train_generator(),adg.valid_generator(),
'bidi_lstm2_ctc',
steps_per_epoch=adg.nb_train,
validation_steps=adg.nb_valid,
loss='ctc',
optimizer=Adam(),
epochs=5,
save_period=5);
X,y = next(adg.valid_generator())
yp = model.predict(X); | 29.56 | 80 | 0.654263 |
import os
import sys
import logging
from keras.optimizers import Adam
prog = os.path.basename(__file__)
codedir = os.path.join(os.path.dirname(__file__),"..","code")
sys.path.append(codedir)
from Utils import initlog
from SpeechCorpus import Timit
from AcousticModels import bidi_ctc_lstm2
from TrainUtils import train_model
from AcousticDataGenerator import AcousticDataGenerator
corpus = Timit(root="C:\\Users\\nxa17016\\ML\\pyml\\RNN\\assignment3\\dataset")
corpus.split_validation();
corpus.report_statistics(folder='report/images',reptext=False);
adg = AcousticDataGenerator(corpus=corpus,ctc_mode=True,mbatch_size=32,
output='sequence',mode='grapheme',
mfcc_win=0.0125, mfcc_step=0.005);
adg.fit_train(n_samples=100);
model = bidi_ctc_lstm2(
input_dim=adg.feature_dim,
units1=100,
units2=100,
output_dim=adg.n_classes,
gpu=False,
batchnorm=True,
after_dropout=0.0,
);
train_model(model,adg.train_generator(),adg.valid_generator(),
'bidi_lstm2_ctc',
steps_per_epoch=adg.nb_train,
validation_steps=adg.nb_valid,
loss='ctc',
optimizer=Adam(),
epochs=5,
save_period=5);
X,y = next(adg.valid_generator())
yp = model.predict(X); | true | true |
f721f9a94d7bd0380939cb8f34a40ef3e45f8ffa | 2,106 | py | Python | tensorflow_federated/python/core/impl/computation_impl_test.py | iahsanujunda/federated | 109a5653a305dc9d4bcbafc259257add4dc70365 | [
"Apache-2.0"
] | 5 | 2020-06-04T20:10:25.000Z | 2020-07-22T02:15:38.000Z | tensorflow_federated/python/core/impl/computation_impl_test.py | iahsanujunda/federated | 109a5653a305dc9d4bcbafc259257add4dc70365 | [
"Apache-2.0"
] | 7 | 2020-04-03T05:32:28.000Z | 2020-05-15T01:28:25.000Z | tensorflow_federated/python/core/impl/computation_impl_test.py | iahsanujunda/federated | 109a5653a305dc9d4bcbafc259257add4dc70365 | [
"Apache-2.0"
] | 2 | 2020-04-28T17:46:13.000Z | 2022-02-10T02:40:40.000Z | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import computation_impl
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.types import type_serialization
class ComputationImplTest(absltest.TestCase):
def test_something(self):
# TODO(b/113112108): Revise these tests after a more complete implementation
# is in place.
# At the moment, this should succeed, as both the computation body and the
# type are well-formed.
computation_impl.ComputationImpl(
pb.Computation(
**{
'type':
type_serialization.serialize_type(
computation_types.FunctionType(tf.int32, tf.int32)),
'intrinsic':
pb.Intrinsic(uri='whatever')
}), context_stack_impl.context_stack)
# This should fail, as the proto is not well-formed.
self.assertRaises(TypeError, computation_impl.ComputationImpl,
pb.Computation(), context_stack_impl.context_stack)
# This should fail, as "10" is not an instance of pb.Computation.
self.assertRaises(TypeError, computation_impl.ComputationImpl, 10,
context_stack_impl.context_stack)
if __name__ == '__main__':
absltest.main()
| 39 | 82 | 0.723172 |
from absl.testing import absltest
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import computation_impl
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.types import type_serialization
class ComputationImplTest(absltest.TestCase):
def test_something(self):
computation_impl.ComputationImpl(
pb.Computation(
**{
'type':
type_serialization.serialize_type(
computation_types.FunctionType(tf.int32, tf.int32)),
'intrinsic':
pb.Intrinsic(uri='whatever')
}), context_stack_impl.context_stack)
self.assertRaises(TypeError, computation_impl.ComputationImpl,
pb.Computation(), context_stack_impl.context_stack)
self.assertRaises(TypeError, computation_impl.ComputationImpl, 10,
context_stack_impl.context_stack)
if __name__ == '__main__':
absltest.main()
| true | true |
f721fa2c57691ec9594ce9f452796ed5cddcfa12 | 2,996 | py | Python | ui/sentiment.py | minminfly68/Song-recommendation-Project-CAPP-30122- | 9f97d6accdfd33c5bac267980b6c10d6d5b93bc7 | [
"MIT"
] | null | null | null | ui/sentiment.py | minminfly68/Song-recommendation-Project-CAPP-30122- | 9f97d6accdfd33c5bac267980b6c10d6d5b93bc7 | [
"MIT"
] | 6 | 2021-03-19T03:18:44.000Z | 2021-09-22T19:00:52.000Z | ui/sentiment.py | minminfly68/Song-recommendation-Project-CAPP-30122- | 9f97d6accdfd33c5bac267980b6c10d6d5b93bc7 | [
"MIT"
] | null | null | null | '''
Conduct Sentiment Analysis
Chun Hu, Yimin Li, Tianyue Niu
'''
import os
import json
import re
import pandas as pd
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from textblob import TextBlob
# turn off warnings
pd.set_option('mode.chained_assignment', None)
cwd = os.path.dirname(__file__)
top_10s_path = os.path.join(cwd, 'top10s.csv')
def merge_two_df(top_songs, lyrics):
'''
Input:
top_songs (pandas data frame): kaggle data
lyrics (json file): lyrics scraped
Output:
a merged data containing lyrics (pandas data frame)
'''
# merge two df
top_songs['lyrics'] = ''
for index, row in top_songs.iterrows():
tit = top_songs.title[index]
if tit in lyrics:
top_songs['lyrics'][index] = lyrics[tit]
return top_songs
def process_words(words, stop):
'''
Input:
words (list): a list of words
stop (list): extra stop words we want to remove
Output:
new_words (list): a list of normalized words
'''
lemmatizer = WordNetLemmatizer()
new_words = []
for word in words:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_word = new_word.lower()
if new_word not in stop and new_word not in stopwords.words('english'):
new_word = lemmatizer.lemmatize(new_word, pos='v')
new_words.append(new_word)
return new_words
def add_sentiment(top_songs):
'''
Input:
top_songs (pandas df): raw version
Output:
top_songs (pandas df): with sentiment analysis result
'''
# tokenize words
top_songs['tokenized'] = top_songs['lyrics'].apply(\
lambda x: [word_tokenize(s) for s in sent_tokenize(x)])
# normalize words
top_songs['normalized'] = top_songs['tokenized']
stop = ['chorus', 'verse', 'intro', 'pre', 'outro', 'interlude']
for index, row in top_songs['tokenized'].items():
new_sent = []
for sent in row:
new_sent += process_words(sent, stop)
top_songs['normalized'][index] = new_sent
# calculate sentiment
top_songs['sentiment'] = ''
for index, row in top_songs.iterrows():
obj = TextBlob(' '.join(top_songs['normalized'][index]))
sentiment = obj.sentiment.polarity
top_songs['sentiment'][index] = sentiment
return top_songs
def create_final_top_songs ():
'''
Input:
None
Output:
top_songs (pandas df): final cleaned & processed data frame
'''
top_songs = pd.read_csv(top_10s_path)
with open('lyrics_file.json') as f:
lyrics = json.load(f)
top_songs = merge_two_df(top_songs, lyrics)
df = add_sentiment(top_songs)
df.to_csv('top_songs.csv')
return
if __name__ == "__main__":
create_final_top_songs()
| 25.176471 | 83 | 0.636515 |
import os
import json
import re
import pandas as pd
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from textblob import TextBlob
pd.set_option('mode.chained_assignment', None)
cwd = os.path.dirname(__file__)
top_10s_path = os.path.join(cwd, 'top10s.csv')
def merge_two_df(top_songs, lyrics):
top_songs['lyrics'] = ''
for index, row in top_songs.iterrows():
tit = top_songs.title[index]
if tit in lyrics:
top_songs['lyrics'][index] = lyrics[tit]
return top_songs
def process_words(words, stop):
lemmatizer = WordNetLemmatizer()
new_words = []
for word in words:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_word = new_word.lower()
if new_word not in stop and new_word not in stopwords.words('english'):
new_word = lemmatizer.lemmatize(new_word, pos='v')
new_words.append(new_word)
return new_words
def add_sentiment(top_songs):
top_songs['tokenized'] = top_songs['lyrics'].apply(\
lambda x: [word_tokenize(s) for s in sent_tokenize(x)])
top_songs['normalized'] = top_songs['tokenized']
stop = ['chorus', 'verse', 'intro', 'pre', 'outro', 'interlude']
for index, row in top_songs['tokenized'].items():
new_sent = []
for sent in row:
new_sent += process_words(sent, stop)
top_songs['normalized'][index] = new_sent
top_songs['sentiment'] = ''
for index, row in top_songs.iterrows():
obj = TextBlob(' '.join(top_songs['normalized'][index]))
sentiment = obj.sentiment.polarity
top_songs['sentiment'][index] = sentiment
return top_songs
def create_final_top_songs ():
top_songs = pd.read_csv(top_10s_path)
with open('lyrics_file.json') as f:
lyrics = json.load(f)
top_songs = merge_two_df(top_songs, lyrics)
df = add_sentiment(top_songs)
df.to_csv('top_songs.csv')
return
if __name__ == "__main__":
create_final_top_songs()
| true | true |
f721fa58cff2f8d988c4fa6e76255de74891ffd2 | 2,117 | py | Python | libs/EXTERNAL/capnproto/doc/_plugins/capnp_lexer.py | brycejh/vtr-verilog-to-routing | f61da5eb2d4e008728a01def827d55a0e9f285d0 | [
"MIT"
] | 4,518 | 2017-06-06T15:33:15.000Z | 2022-03-31T16:43:23.000Z | doc/_plugins/capnp_lexer.py | seanwallawalla-forks/capnproto | 8009588ff84cbdf233f6d23d1d507462b050b427 | [
"MIT"
] | 1,399 | 2015-07-24T22:09:09.000Z | 2022-03-29T06:22:48.000Z | doc/_plugins/capnp_lexer.py | seanwallawalla-forks/capnproto | 8009588ff84cbdf233f6d23d1d507462b050b427 | [
"MIT"
] | 492 | 2017-06-07T08:40:53.000Z | 2022-03-30T20:57:05.000Z | #! /usr/bin/env python
from pygments.lexer import RegexLexer
from pygments.token import *
class CapnpLexer(RegexLexer):
name = "Cap'n Proto lexer"
aliases = ['capnp']
filenames = ['*.capnp']
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|extends|in|of|on|as|with|from|fixed|bulk|realtime)\b',
Token.Keyword),
(r'[a-zA-Z0-9_.]+', Token.Name),
(r'[^#@=:$a-zA-Z0-9_]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
(r'', Name.Class, '#pop')
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
(r'', Name.Class, '#pop')
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
(r'', Literal, '#pop')
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
(r'', Literal, '#pop')
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
(r'', Name.Attribute, '#pop')
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
(r'', Name.Attribute, '#pop')
],
}
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name = "CapnpPygmentsLexer",
version = "0.1",
packages = find_packages(),
py_modules = [ 'capnp_lexer' ],
entry_points = {'pygments.lexers': 'capnp = capnp_lexer:CapnpLexer'})
| 32.569231 | 128 | 0.421351 |
from pygments.lexer import RegexLexer
from pygments.token import *
class CapnpLexer(RegexLexer):
name = "Cap'n Proto lexer"
aliases = ['capnp']
filenames = ['*.capnp']
tokens = {
'root': [
(r'
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|extends|in|of|on|as|with|from|fixed|bulk|realtime)\b',
Token.Keyword),
(r'[a-zA-Z0-9_.]+', Token.Name),
(r'[^
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
(r'', Name.Class, '
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '
(r'[])]', Name.Class, '
(r'', Name.Class, '
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
(r'', Literal, '
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '
(r'[])]', Literal, '
(r'', Literal, '
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
(r'', Name.Attribute, '
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '
(r'[])]', Name.Attribute, '
(r'', Name.Attribute, '
],
}
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name = "CapnpPygmentsLexer",
version = "0.1",
packages = find_packages(),
py_modules = [ 'capnp_lexer' ],
entry_points = {'pygments.lexers': 'capnp = capnp_lexer:CapnpLexer'})
| true | true |
f721fc1798358232695a6e8277cd43f94a436dc0 | 875 | gyp | Python | tools/android/memconsumer/memconsumer.gyp | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 5 | 2015-04-30T00:13:21.000Z | 2019-07-10T02:17:24.000Z | tools/android/memconsumer/memconsumer.gyp | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | null | null | null | tools/android/memconsumer/memconsumer.gyp | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 5 | 2016-12-23T04:21:10.000Z | 2020-06-18T13:52:33.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'memconsumer',
'type': 'none',
'dependencies': [
'memconsumer_apk',
],
},
{
'target_name': 'memconsumer_apk',
'type': 'none',
'variables': {
'apk_name': 'MemConsumer',
'java_in_dir': 'java',
'resource_dir': 'java/res',
'native_lib_target': 'libmemconsumer',
},
'dependencies': [
'libmemconsumer',
],
'includes': [ '../../../build/java_apk.gypi' ],
},
{
'target_name': 'libmemconsumer',
'type': 'shared_library',
'sources': [
'memconsumer_hook.cc',
],
'libraries': [
'-llog',
],
},
],
}
| 21.875 | 72 | 0.512 |
{
'targets': [
{
'target_name': 'memconsumer',
'type': 'none',
'dependencies': [
'memconsumer_apk',
],
},
{
'target_name': 'memconsumer_apk',
'type': 'none',
'variables': {
'apk_name': 'MemConsumer',
'java_in_dir': 'java',
'resource_dir': 'java/res',
'native_lib_target': 'libmemconsumer',
},
'dependencies': [
'libmemconsumer',
],
'includes': [ '../../../build/java_apk.gypi' ],
},
{
'target_name': 'libmemconsumer',
'type': 'shared_library',
'sources': [
'memconsumer_hook.cc',
],
'libraries': [
'-llog',
],
},
],
}
| true | true |
f721fc81e52074ea25597094fc8f790020e21816 | 803 | py | Python | ros2_sub/setup.py | ipa-rar/ros2_practice_workspace | 6329085df960eb4e2cc849c7ee527b99be11571e | [
"MIT"
] | null | null | null | ros2_sub/setup.py | ipa-rar/ros2_practice_workspace | 6329085df960eb4e2cc849c7ee527b99be11571e | [
"MIT"
] | null | null | null | ros2_sub/setup.py | ipa-rar/ros2_practice_workspace | 6329085df960eb4e2cc849c7ee527b99be11571e | [
"MIT"
] | null | null | null | import os
from glob import glob
from setuptools import setup
package_name = 'ros2_sub'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name), glob('launch/*.py')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='ragesh_ramachandran',
maintainer_email='ragesh.ramachandran@ipa.fraunhofer.de',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'number_subscriber = ros2_sub.main:main'
],
},
)
| 26.766667 | 67 | 0.638854 | import os
from glob import glob
from setuptools import setup
package_name = 'ros2_sub'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name), glob('launch/*.py')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='ragesh_ramachandran',
maintainer_email='ragesh.ramachandran@ipa.fraunhofer.de',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'number_subscriber = ros2_sub.main:main'
],
},
)
| true | true |
f721fd69cb07522a89aac4665560e173501eb593 | 1,527 | py | Python | tests/h/oauth/tokens_test.py | tgiardina/rpp-h | fece590f901b052a59c19a24acfeba52cee33c84 | [
"BSD-2-Clause"
] | null | null | null | tests/h/oauth/tokens_test.py | tgiardina/rpp-h | fece590f901b052a59c19a24acfeba52cee33c84 | [
"BSD-2-Clause"
] | null | null | null | tests/h/oauth/tokens_test.py | tgiardina/rpp-h | fece590f901b052a59c19a24acfeba52cee33c84 | [
"BSD-2-Clause"
] | null | null | null | from unittest import mock
import pytest
from oauthlib.common import Request as OAuthRequest
from h.oauth.tokens import BearerToken
class TestBearerToken:
@pytest.mark.parametrize(
"attr",
[
"request_validator",
"token_generator",
"expires_in",
"refresh_token_generator",
"refresh_token_expires_in",
],
)
def test_init_sets_instance_vars(self, attr):
value = mock.Mock()
token = BearerToken(**{attr: value})
assert getattr(token, attr) == value
def test_create_token_sets_refresh_token_expires_in(self, oauth_request):
value = mock.Mock()
token = BearerToken(
request_validator=mock.Mock(), refresh_token_expires_in=value
)
assert oauth_request.extra_credentials is None
token.create_token(oauth_request)
assert oauth_request.extra_credentials.get("refresh_token_expires_in") == value
def test_create_token_does_not_override_extras(self, oauth_request):
value = mock.Mock()
token = BearerToken(
request_validator=mock.Mock(), refresh_token_expires_in=value
)
oauth_request.extra_credentials = {"foo": "bar"}
token.create_token(oauth_request)
assert oauth_request.extra_credentials.get("refresh_token_expires_in") == value
assert oauth_request.extra_credentials.get("foo") == "bar"
@pytest.fixture
def oauth_request(self):
return OAuthRequest("/")
| 31.163265 | 87 | 0.664702 | from unittest import mock
import pytest
from oauthlib.common import Request as OAuthRequest
from h.oauth.tokens import BearerToken
class TestBearerToken:
@pytest.mark.parametrize(
"attr",
[
"request_validator",
"token_generator",
"expires_in",
"refresh_token_generator",
"refresh_token_expires_in",
],
)
def test_init_sets_instance_vars(self, attr):
value = mock.Mock()
token = BearerToken(**{attr: value})
assert getattr(token, attr) == value
def test_create_token_sets_refresh_token_expires_in(self, oauth_request):
value = mock.Mock()
token = BearerToken(
request_validator=mock.Mock(), refresh_token_expires_in=value
)
assert oauth_request.extra_credentials is None
token.create_token(oauth_request)
assert oauth_request.extra_credentials.get("refresh_token_expires_in") == value
def test_create_token_does_not_override_extras(self, oauth_request):
value = mock.Mock()
token = BearerToken(
request_validator=mock.Mock(), refresh_token_expires_in=value
)
oauth_request.extra_credentials = {"foo": "bar"}
token.create_token(oauth_request)
assert oauth_request.extra_credentials.get("refresh_token_expires_in") == value
assert oauth_request.extra_credentials.get("foo") == "bar"
@pytest.fixture
def oauth_request(self):
return OAuthRequest("/")
| true | true |
f721ff9b0aca2610a6cdf14f769fcdcfe21e1189 | 1,242 | py | Python | Python/flask/l10n/app.py | saneravi/ML_Stuff | 74e1ed7ba9f4dccb555792315a14ba6071150304 | [
"MIT"
] | 209 | 2015-01-02T03:47:12.000Z | 2022-03-06T16:54:47.000Z | Python/flask/l10n/app.py | Kerwin-Xie/algorithms | 4347a9b7bf54ef378d16d26ef9e357ddc710664b | [
"MIT"
] | 3 | 2015-12-06T14:40:34.000Z | 2021-03-22T17:40:24.000Z | Python/flask/l10n/app.py | Kerwin-Xie/algorithms | 4347a9b7bf54ef378d16d26ef9e357ddc710664b | [
"MIT"
] | 114 | 2015-01-31T08:37:10.000Z | 2022-02-23T04:42:28.000Z | #!/usr/bin/env python
# core modules
import datetime
import flask_babel
# 3rd party modules
from flask import Flask, flash, render_template
from flask_babel import Babel, _
def format_datetime(value, format="medium"):
import flask_babel
if format == "full":
format = "EEEE, d. MMMM y 'at' HH:mm"
elif format == "medium":
format = "EE dd.MM.y HH:mm"
elif format == "date":
format = "dd.MM.y"
elif format == "isoformat":
return value.isoformat()
return flask_babel.dates.format_datetime(value, format)
app = Flask(__name__)
app.config["SECRET_KEY"] = "foo"
app.config["BABEL_TRANSLATION_DIRECTORIES"] = "./translations"
app.jinja_env.filters["datetime"] = format_datetime
babel = Babel(app)
@app.route("/")
def index():
print(_("Hello World!"))
flash(_("Hello World!"))
pubdate = datetime.datetime.now()
return render_template(
"main.html",
pubdate=pubdate,
author="John Smith",
date_localized=flask_babel.dates.format_date(pubdate),
)
@babel.localeselector
def get_locale():
print("foo")
return "de" # request.accept_languages.best_match(app.config["LANGUAGES"])
if __name__ == "__main__":
app.run(port=5000)
| 23 | 79 | 0.663446 |
import datetime
import flask_babel
from flask import Flask, flash, render_template
from flask_babel import Babel, _
def format_datetime(value, format="medium"):
import flask_babel
if format == "full":
format = "EEEE, d. MMMM y 'at' HH:mm"
elif format == "medium":
format = "EE dd.MM.y HH:mm"
elif format == "date":
format = "dd.MM.y"
elif format == "isoformat":
return value.isoformat()
return flask_babel.dates.format_datetime(value, format)
app = Flask(__name__)
app.config["SECRET_KEY"] = "foo"
app.config["BABEL_TRANSLATION_DIRECTORIES"] = "./translations"
app.jinja_env.filters["datetime"] = format_datetime
babel = Babel(app)
@app.route("/")
def index():
print(_("Hello World!"))
flash(_("Hello World!"))
pubdate = datetime.datetime.now()
return render_template(
"main.html",
pubdate=pubdate,
author="John Smith",
date_localized=flask_babel.dates.format_date(pubdate),
)
@babel.localeselector
def get_locale():
print("foo")
return "de"
if __name__ == "__main__":
app.run(port=5000)
| true | true |
f72200339a1f28f8cc91946cd3c16de8ee514ef5 | 1,306 | py | Python | test_models/data.py | OleguerCanal/transplanter | 854fa727747a484dedde9092eeee6884d7d1b44b | [
"MIT"
] | 1 | 2022-03-23T09:27:56.000Z | 2022-03-23T09:27:56.000Z | test_models/data.py | OleguerCanal/transplanter | 854fa727747a484dedde9092eeee6884d7d1b44b | [
"MIT"
] | null | null | null | test_models/data.py | OleguerCanal/transplanter | 854fa727747a484dedde9092eeee6884d7d1b44b | [
"MIT"
] | null | null | null | from typing import Optional
import pytorch_lightning as pl
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader, random_split
class CIFARDataModule(pl.LightningDataModule):
def __init__(self, data_dir: str = "./data", batch_size: int = 32):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
def setup(self, stage: Optional[str] = None):
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Train/val
cifar_full = CIFAR10(root=self.data_dir, train=True, download=True, transform=transform)
n_train, n_val = int(len(cifar_full)*0.9), int(len(cifar_full)*0.1)
self.cifar_train, self.cifar_val = random_split(cifar_full, [n_train, n_val])
# Test
self.cifar_test = CIFAR10(self.data_dir, train=False)
def train_dataloader(self):
return DataLoader(self.cifar_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.cifar_val, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.cifar_test, batch_size=self.batch_size)
| 36.277778 | 96 | 0.69219 | from typing import Optional
import pytorch_lightning as pl
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader, random_split
class CIFARDataModule(pl.LightningDataModule):
def __init__(self, data_dir: str = "./data", batch_size: int = 32):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
def setup(self, stage: Optional[str] = None):
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
cifar_full = CIFAR10(root=self.data_dir, train=True, download=True, transform=transform)
n_train, n_val = int(len(cifar_full)*0.9), int(len(cifar_full)*0.1)
self.cifar_train, self.cifar_val = random_split(cifar_full, [n_train, n_val])
self.cifar_test = CIFAR10(self.data_dir, train=False)
def train_dataloader(self):
return DataLoader(self.cifar_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.cifar_val, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.cifar_test, batch_size=self.batch_size)
| true | true |
f722004f53f62148f2709542630d070ec6adc442 | 560 | py | Python | outreach/migrations/0002_auto_20161020_1929.py | atish3/mig-website | 1bcf4c0b93078cccab6b4a25c93c29a2b5efa4be | [
"Apache-2.0"
] | 4 | 2017-10-02T17:44:14.000Z | 2020-02-14T17:13:57.000Z | outreach/migrations/0002_auto_20161020_1929.py | atish3/mig-website | 1bcf4c0b93078cccab6b4a25c93c29a2b5efa4be | [
"Apache-2.0"
] | 152 | 2015-01-04T00:08:44.000Z | 2022-01-13T00:43:03.000Z | outreach/migrations/0002_auto_20161020_1929.py | atish3/mig-website | 1bcf4c0b93078cccab6b4a25c93c29a2b5efa4be | [
"Apache-2.0"
] | 4 | 2015-04-16T04:27:05.000Z | 2021-03-21T20:45:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-20 23:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('outreach', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='outreacheventtype',
name='event_category',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='requirements.EventCategory'),
),
]
| 25.454545 | 117 | 0.664286 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('outreach', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='outreacheventtype',
name='event_category',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='requirements.EventCategory'),
),
]
| true | true |
f722015ee354379fc8050d0825f9ed09fdd7e7d9 | 7,523 | py | Python | chainer/functions/pooling/max_pooling_nd_kernel.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | 3,705 | 2017-06-01T07:36:12.000Z | 2022-03-30T10:46:15.000Z | chainer/functions/pooling/max_pooling_nd_kernel.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | 5,998 | 2017-06-01T06:40:17.000Z | 2022-03-08T01:42:44.000Z | chainer/functions/pooling/max_pooling_nd_kernel.py | zaltoprofen/chainer | 3b03f9afc80fd67f65d5e0395ef199e9506b6ee1 | [
"MIT"
] | 1,150 | 2017-06-02T03:39:46.000Z | 2022-03-29T02:29:32.000Z | import six
from chainer.functions.pooling import pooling_nd_kernel
from chainer.utils import conv_nd_kernel
class MaxPoolingNDKernelForward(pooling_nd_kernel.PoolingNDKernelForward):
def name(self):
# max_pool_{N}d_fwd
return 'max'
def out_params(self):
# T out, S indexes
return ['S indexes']
def before(self):
# 2D: T maxval = (T)-INFINITY;
# int argmax_0 = 0;
# int argmax_1 = 0;
def aux(argmax):
return 'int {} = 0;'.format(argmax)
self.argmaxs = conv_nd_kernel.vars('argmax', self.ndim)
argmax_decls = conv_nd_kernel.map_(aux, self.argmaxs)
return '\n'.join(['T maxval = (T)-(1.0/0.0);'] + argmax_decls)
def main(self, offset, xs):
# 2D: T v = in[offset_1];
# if (maxval < v) {
# maxval = v;
# argmax_0 = x_0;
# argmax_1 = x_1;
# }
w = conv_nd_kernel.Writer()
w.write('T v = in[{}];'.format(offset))
w.write('if (maxval < v) {', 'inc')
w.write('maxval = v;')
for argmax, x in six.moves.zip(self.argmaxs, xs):
w.write('{} = {};'.format(argmax, x))
w.write('}', 'dec')
return w.get()
def after(self, out_xs):
# 2D: out = maxval;
# int argmax_k_0 = argmax_0 + p_0 - out_x_0 * s_0;
# int argmax_k_1 = argmax_1 + p_1 - out_x_1 * s_1;
# indexes = (argmax_k_1 + k_1 * argmax_k_0);
def aux(argmax_k, argmax, p, out_x, s):
return 'int {} = {} + {} - {} * {};'.format(
argmax_k, argmax, p, out_x, s)
argmax_ks = conv_nd_kernel.vars('argmax_k', self.ndim)
argmax_k_decls = conv_nd_kernel.map_(
aux, argmax_ks, self.argmaxs, self.ps, out_xs, self.ss)
indexes_set = 'indexes = {};'.format(
conv_nd_kernel.muladdexp(self.ks[1:], argmax_ks[1:], argmax_ks[0]))
return '\n'.join(['out = maxval;'] + argmax_k_decls + [indexes_set])
class MaxPoolingNDKernelBackward(pooling_nd_kernel.PoolingNDKernelBackward):
def name(self):
# max_pool_{N}d_bwd
return 'max'
def in_params(self):
# 2D: raw T gy, raw S indexes, int32 d_0, int32 d_1, int32 out_0,
# int32 out_1, int32 k_0, int32 k_1, int32 s_0, int32 s_1,
# int32 p_0, int32 p_1
return (['raw S indexes'], [])
def before(self):
return 'T val = 0;'
def main(self, offset, xs, out_xs):
# 2D: int kx = (x_1 - out_x_1 * s_1 + k_1 *
# (x_0 - out_x_0 * s_0 + k_0 * 0));
# if (indexes[offset_1] == kx) {
# val = val + gy[offset_1];
# }
def aux(x, out_x, s):
return '{} - {} * {}'.format(x, out_x, s)
w = conv_nd_kernel.Writer()
w.write('int kx = {};'.format(
conv_nd_kernel.muladdexp(self.ks, conv_nd_kernel.map_(
aux, xs, out_xs, self.ss), '0')))
w.write('if (indexes[{}] == kx) {{'.format(offset), 'inc')
w.write('val = val + gy[{}];'.format(offset))
w.write('}', 'dec')
return w.get()
def after(self, xs):
return 'gx = val;'
class MaxPoolingNDKernelForwardWithIndexes(
pooling_nd_kernel.PoolingNDKernelForward):
def name(self):
# max_index_pool_{N}d_fwd
return 'max_index'
def in_params(self):
# 2D: raw T in, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0, int32 p_1,
# raw S indexes
return ['raw S indexes']
def out_params(self):
# T out
return []
def _compile_max_x(self):
def aux(max_val, out_val, stride_val, pad_val, ksize_vals):
head = ksize_vals[0]
tail = ksize_vals[1:]
if tail:
command = 'int {} = max(0, {} * {} - {} + index / ({}) % {});'
return command.format(
max_val, out_val, stride_val, pad_val,
conv_nd_kernel.mulexp(tail), head)
else:
return 'int {} = max(0, {} * {} - {} + index % {});'.format(
max_val, out_val, stride_val, pad_val, head)
max_vals = conv_nd_kernel.vars('max', self.ndim)
out_vals = conv_nd_kernel.vars('out_x', self.ndim)
stride_vals = conv_nd_kernel.vars('s', self.ndim)
pad_vals = conv_nd_kernel.vars('p', self.ndim)
ksize_vals = conv_nd_kernel.vars('k', self.ndim)
offset_ks_decls = conv_nd_kernel.map_(
aux, max_vals, out_vals, stride_vals, pad_vals,
conv_nd_kernel.succ_sublists(ksize_vals))
return offset_ks_decls
def _compile_out(self):
def aux(offset, d_val, max_val, offset1):
return 'int {} = {} * ({} + {});'.format(
offset, d_val, max_val, offset1)
d_vals = conv_nd_kernel.vars('d', self.ndim)[1:] + [1]
max_vals = conv_nd_kernel.vars('max', self.ndim)
offsets = conv_nd_kernel.vars('offset', self.ndim)
offsets1 = ['d_0 * c0'] + offsets[:-1]
offset_strs = conv_nd_kernel.map_(
aux, offsets, d_vals, max_vals, offsets1)
offset_strs.append('out = in[offset_{}];'.format(self.ndim - 1))
return offset_strs
def _operation(self):
# In the case of 2D, the kernel is the following:
#
# // result by self._compile_c0()
# int c0 = i / (out_0 * out_1);
#
# // result by self._compile_max_x()
# int out_x_0 = i / (out_1) % out_0;
# int out_x_1 = i % out_1;
# int index = indexes[i];
#
# // result by self._compile_out()
# int max_0 = max(0, out_x_0 * s_0 - p_0 + index / (k_1) % k_0);
# int max_1 = max(0, out_x_1 * s_1 - p_1 + index % k_1);
# int offset_0 = d_1 * (max_0 + d_0 * c0);
# int offset_1 = 1 * (max_1 + offset_0);
# out = in[offset_1];
c0 = self._compile_c0()
out_x, out_xs = self._compile_out_x()
max_x = self._compile_max_x()
index = ['int index = indexes[i];']
out = self._compile_out()
return '\n'.join(c0 + out_x + index + max_x + out)
class MaxPoolingNDKernelForwardWithIndexes1(MaxPoolingNDKernelForward):
def name(self):
# max_index1_pool_{N}d_fwd
return 'max_index1'
def in_params(self):
# 2D: raw T gy, raw S indexes, int32 d_0, int32 d_1, int32 out_0,
# int32 out_1, int32 k_0, int32 k_1, int32 s_0, int32 s_1,
# int32 p_0, int32 p_1
return ['raw T ggx']
def out_params(self):
# T out
return []
def after(self, out_xs):
# 2D: int offset_0 = d_1 * (argmax_0 + d_0 * c0);
# int offset_1 = 1 * (argmax_1 + offset_0);
# out = ggx[offset_1];
def aux(offset, d_val, max_val, offset1):
return 'int {} = {} * ({} + {});'.format(
offset, d_val, max_val, offset1)
d_vals = conv_nd_kernel.vars('d', self.ndim)[1:] + [1]
max_vals = conv_nd_kernel.vars('argmax', self.ndim)
offsets = conv_nd_kernel.vars('offset', self.ndim)
offsets1 = ['d_0 * c0'] + offsets[:-1]
offset_strs = conv_nd_kernel.map_(
aux, offsets, d_vals, max_vals, offsets1)
offset_strs.append('out = ggx[offset_{}];'.format(self.ndim - 1))
return '\n'.join(offset_strs)
| 36.342995 | 79 | 0.541938 | import six
from chainer.functions.pooling import pooling_nd_kernel
from chainer.utils import conv_nd_kernel
class MaxPoolingNDKernelForward(pooling_nd_kernel.PoolingNDKernelForward):
def name(self):
return 'max'
def out_params(self):
return ['S indexes']
def before(self):
def aux(argmax):
return 'int {} = 0;'.format(argmax)
self.argmaxs = conv_nd_kernel.vars('argmax', self.ndim)
argmax_decls = conv_nd_kernel.map_(aux, self.argmaxs)
return '\n'.join(['T maxval = (T)-(1.0/0.0);'] + argmax_decls)
def main(self, offset, xs):
w = conv_nd_kernel.Writer()
w.write('T v = in[{}];'.format(offset))
w.write('if (maxval < v) {', 'inc')
w.write('maxval = v;')
for argmax, x in six.moves.zip(self.argmaxs, xs):
w.write('{} = {};'.format(argmax, x))
w.write('}', 'dec')
return w.get()
def after(self, out_xs):
def aux(argmax_k, argmax, p, out_x, s):
return 'int {} = {} + {} - {} * {};'.format(
argmax_k, argmax, p, out_x, s)
argmax_ks = conv_nd_kernel.vars('argmax_k', self.ndim)
argmax_k_decls = conv_nd_kernel.map_(
aux, argmax_ks, self.argmaxs, self.ps, out_xs, self.ss)
indexes_set = 'indexes = {};'.format(
conv_nd_kernel.muladdexp(self.ks[1:], argmax_ks[1:], argmax_ks[0]))
return '\n'.join(['out = maxval;'] + argmax_k_decls + [indexes_set])
class MaxPoolingNDKernelBackward(pooling_nd_kernel.PoolingNDKernelBackward):
def name(self):
return 'max'
def in_params(self):
return (['raw S indexes'], [])
def before(self):
return 'T val = 0;'
def main(self, offset, xs, out_xs):
def aux(x, out_x, s):
return '{} - {} * {}'.format(x, out_x, s)
w = conv_nd_kernel.Writer()
w.write('int kx = {};'.format(
conv_nd_kernel.muladdexp(self.ks, conv_nd_kernel.map_(
aux, xs, out_xs, self.ss), '0')))
w.write('if (indexes[{}] == kx) {{'.format(offset), 'inc')
w.write('val = val + gy[{}];'.format(offset))
w.write('}', 'dec')
return w.get()
def after(self, xs):
return 'gx = val;'
class MaxPoolingNDKernelForwardWithIndexes(
pooling_nd_kernel.PoolingNDKernelForward):
def name(self):
return 'max_index'
def in_params(self):
return ['raw S indexes']
def out_params(self):
return []
def _compile_max_x(self):
def aux(max_val, out_val, stride_val, pad_val, ksize_vals):
head = ksize_vals[0]
tail = ksize_vals[1:]
if tail:
command = 'int {} = max(0, {} * {} - {} + index / ({}) % {});'
return command.format(
max_val, out_val, stride_val, pad_val,
conv_nd_kernel.mulexp(tail), head)
else:
return 'int {} = max(0, {} * {} - {} + index % {});'.format(
max_val, out_val, stride_val, pad_val, head)
max_vals = conv_nd_kernel.vars('max', self.ndim)
out_vals = conv_nd_kernel.vars('out_x', self.ndim)
stride_vals = conv_nd_kernel.vars('s', self.ndim)
pad_vals = conv_nd_kernel.vars('p', self.ndim)
ksize_vals = conv_nd_kernel.vars('k', self.ndim)
offset_ks_decls = conv_nd_kernel.map_(
aux, max_vals, out_vals, stride_vals, pad_vals,
conv_nd_kernel.succ_sublists(ksize_vals))
return offset_ks_decls
def _compile_out(self):
def aux(offset, d_val, max_val, offset1):
return 'int {} = {} * ({} + {});'.format(
offset, d_val, max_val, offset1)
d_vals = conv_nd_kernel.vars('d', self.ndim)[1:] + [1]
max_vals = conv_nd_kernel.vars('max', self.ndim)
offsets = conv_nd_kernel.vars('offset', self.ndim)
offsets1 = ['d_0 * c0'] + offsets[:-1]
offset_strs = conv_nd_kernel.map_(
aux, offsets, d_vals, max_vals, offsets1)
offset_strs.append('out = in[offset_{}];'.format(self.ndim - 1))
return offset_strs
def _operation(self):
c0 = self._compile_c0()
out_x, out_xs = self._compile_out_x()
max_x = self._compile_max_x()
index = ['int index = indexes[i];']
out = self._compile_out()
return '\n'.join(c0 + out_x + index + max_x + out)
class MaxPoolingNDKernelForwardWithIndexes1(MaxPoolingNDKernelForward):
def name(self):
return 'max_index1'
def in_params(self):
return ['raw T ggx']
def out_params(self):
return []
def after(self, out_xs):
def aux(offset, d_val, max_val, offset1):
return 'int {} = {} * ({} + {});'.format(
offset, d_val, max_val, offset1)
d_vals = conv_nd_kernel.vars('d', self.ndim)[1:] + [1]
max_vals = conv_nd_kernel.vars('argmax', self.ndim)
offsets = conv_nd_kernel.vars('offset', self.ndim)
offsets1 = ['d_0 * c0'] + offsets[:-1]
offset_strs = conv_nd_kernel.map_(
aux, offsets, d_vals, max_vals, offsets1)
offset_strs.append('out = ggx[offset_{}];'.format(self.ndim - 1))
return '\n'.join(offset_strs)
| true | true |
f72201c4e1f81e14abadd52a79cbc75cada4ea22 | 870 | py | Python | setup.py | paluh/wrestler | f00b4956765d8897322f63310b2c3e6668e6725d | [
"BSD-3-Clause"
] | null | null | null | setup.py | paluh/wrestler | f00b4956765d8897322f63310b2c3e6668e6725d | [
"BSD-3-Clause"
] | null | null | null | setup.py | paluh/wrestler | f00b4956765d8897322f63310b2c3e6668e6725d | [
"BSD-3-Clause"
] | null | null | null | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
CLASSIFIERS = [
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
]
REQUIREMENTS = [
'itsdangerous',
'requests',
'simplejson',
'Werkzeug',
]
setup(
name='wRESTler',
author='Tomasz Rybarczyk',
author_email='paluho@gmail.com',
classifiers=CLASSIFIERS,
description='Set of utilities for werkzeug REST services',
install_requires=REQUIREMENTS,
url='https://github.com/paluh/wrestler',
packages=['wrestler'],
zip_safe=False,
test_suite='wrestler.tests.test_suite',
version = '0.0.1',
)
| 24.857143 | 62 | 0.662069 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
CLASSIFIERS = [
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
]
REQUIREMENTS = [
'itsdangerous',
'requests',
'simplejson',
'Werkzeug',
]
setup(
name='wRESTler',
author='Tomasz Rybarczyk',
author_email='paluho@gmail.com',
classifiers=CLASSIFIERS,
description='Set of utilities for werkzeug REST services',
install_requires=REQUIREMENTS,
url='https://github.com/paluh/wrestler',
packages=['wrestler'],
zip_safe=False,
test_suite='wrestler.tests.test_suite',
version = '0.0.1',
)
| true | true |
f722029155968a27b82e4ff0a96fce5c6afb335d | 1,571 | py | Python | siliconcompiler/tools/openfpgaloader/openfpgaloader.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | 1 | 2022-03-10T03:56:49.000Z | 2022-03-10T03:56:49.000Z | siliconcompiler/tools/openfpgaloader/openfpgaloader.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | null | null | null | siliconcompiler/tools/openfpgaloader/openfpgaloader.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
import re
import sys
import shutil
import siliconcompiler
####################################################################
# Make Docs
####################################################################
def make_docs():
'''
The OpenFPGALoader is a universal utility for programming
FPGAs. Compatible with many boards, cables and FPGA from
major manufacturers (Xilinx, Altera/Intel, Lattice, Gowin,
Efinix, Anlogic). openFPGALoader works on Linux, Windows and
macOS.
Documentation: https://github.com/trabucayre/openFPGALoader
Sources: https://github.com/trabucayre/openFPGALoader
Installation: https://github.com/trabucayre/openFPGALoader
Status: SC integration WIP
'''
chip = siliconcompiler.Chip()
chip.set('arg','step','program')
chip.set('arg','index','0')
chip.set('design', '<design>')
setup(chip)
return chip
################################
# Setup Tool (pre executable)
################################
def setup(chip):
''' openFPGALoader setup function
'''
# If the 'lock' bit is set, don't reconfigure.
tool = 'openfpgaloader'
step = chip.get('arg','step')
index = chip.get('arg','index')
# tool setup
chip.set('eda', tool, 'exe', tool, clobber=False)
chip.set('eda', tool, 'vswitch', '--Version', clobber=False)
chip.set('eda', tool, 'version', 'v0.5.0', clobber=False)
options = []
options.append("inputs" + chip.get('design') + ".bit")
chip.add('eda', tool, 'option', step, index, options)
| 27.086207 | 68 | 0.574793 | import os
import subprocess
import re
import sys
import shutil
import siliconcompiler
| true | true |
f72203fb31e7832dd9f7c4b06f45103f15916b3e | 6,617 | py | Python | chillapi/abc.py | andrescevp/chillapi | 1c5f07600748eb65f413ad19f5e67653cce8b787 | [
"Apache-2.0"
] | 2 | 2021-05-05T10:54:34.000Z | 2021-05-05T12:45:18.000Z | chillapi/abc.py | andrescevp/chillapi | 1c5f07600748eb65f413ad19f5e67653cce8b787 | [
"Apache-2.0"
] | null | null | null | chillapi/abc.py | andrescevp/chillapi | 1c5f07600748eb65f413ad19f5e67653cce8b787 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from typing import List
from sqlalchemy.engine import CursorResult, Inspector
from sqlalchemy.orm.scoping import ScopedSession
from .database import _ALLOWED_DRIVERS
from .exceptions.api_manager import ColumnNotExist, ConfigError
class Repository(ABC):
""" """
def __init__(self, db: ScopedSession):
self.db = db
driver = self.db.bind.dialect.dbapi.__name__
if driver not in _ALLOWED_DRIVERS.keys():
raise ConfigError(f"{driver} driver not allowed")
self.db_dialect = _ALLOWED_DRIVERS[driver]
@abstractmethod
def execute(self, sql, params=None) -> CursorResult:
"""
:param sql: param params: (Default value = None)
:param params: (Default value = None)
"""
pass
@abstractmethod
def execute_insert(self, sql, params=None) -> CursorResult:
"""
:param sql: param params: (Default value = None)
:param params: (Default value = None)
"""
pass
@abstractmethod
def fetch_by(self, table: str, columns: List[str], filters: dict, params=None):
"""
:param table: str:
:param columns: List[str]:
:param filters: dict:
:param params: Default value = None)
:param table: str:
:param columns: List[str]:
:param filters: dict:
"""
pass
@abstractmethod
def insert(self, table: str, columns: List[str], params: dict, returning: bool = True, returning_field: str = "*") -> CursorResult:
"""
:param table: str:
:param columns: List[str]:
:param params: dict:
:param returning: bool: (Default value = True)
:param returning_field: str: (Default value = "*")
:param table: str:
:param columns: List[str]:
:param params: dict:
:param returning: bool: (Default value = True)
:param returning_field: str: (Default value = "*")
"""
pass
@abstractmethod
def insert_batch(self, table: str, columns: List[str], params: List, returning: bool = True, returning_field: str = "*") -> List:
"""
:param table: str:
:param columns: List[str]:
:param params: List:
:param returning: bool: (Default value = True)
:param returning_field: str: (Default value = "*")
:param table: str:
:param columns: List[str]:
:param params: List:
:param returning: bool: (Default value = True)
:param returning_field: str: (Default value = "*")
"""
pass
@abstractmethod
def update_batch(self, table: str, params: List, where_field: str = "id") -> List:
"""
:param table: str:
:param params: List:
:param where_field: str: (Default value = "id")
:param table: str:
:param params: List:
:param where_field: str: (Default value = "id")
"""
pass
@abstractmethod
def delete_batch(self, table: str, ids: List, where_field: str = "id") -> List:
"""
:param table: str:
:param ids: List:
:param where_field: str: (Default value = "id")
:param table: str:
:param ids: List:
:param where_field: str: (Default value = "id")
"""
pass
@abstractmethod
def insert_record(self, table: str, columns: List[str], params: dict, returning: bool = True, returning_field: str = "*") -> int:
"""
:param table: str:
:param columns: List[str]:
:param params: dict:
:param returning: bool: (Default value = True)
:param returning_field: str: (Default value = "*")
:param table: str:
:param columns: List[str]:
:param params: dict:
:param returning: bool: (Default value = True)
:param returning_field: str: (Default value = "*")
"""
pass
@abstractmethod
def update_record(self, table: str, where_field: str, where_value: str, params: dict) -> CursorResult:
"""
:param table: str:
:param where_field: str:
:param where_value: str:
:param params: dict:
:param table: str:
:param where_field: str:
:param where_value: str:
:param params: dict:
"""
pass
@abstractmethod
def delete_record(self, table: str, where_field: str, where_field_id) -> CursorResult:
"""
:param table: str:
:param where_field: str:
:param where_field_id:
:param table: str:
:param where_field: str:
"""
pass
class Extension(ABC, dict):
""" """
def execute(self, *args):
"""
:param *args:
"""
method_name = getattr(args, "method")
if method_name == "execute":
raise Exception("You can not call myself")
method = getattr(self, getattr(args, "method"))
return method(getattr(args, "args"))
class TableExtension(Extension):
""" """
enabled: bool = False
table: str = None
config: dict = None
repository: Repository = None
inspector: Inspector
def __init__(self, config: dict, columns: dict = None, repository: Repository = None, table: str = None, inspector: Inspector = None):
super().__init__()
self.inspector = inspector
self.columns = columns
self.repository = repository
self.config = config
self.table = table
self.enabled = self.config["enable"]
def validate(self):
""" """
if not self.enabled:
return True
_default_field = self.config["default_field"]
if _default_field not in self.columns.keys():
raise ColumnNotExist(f'{self.__class__.__name__}: "{_default_field}" not found on table "{self.table}" ')
return True
class AuditLog(ABC, dict):
""" """
request_id: str = None
prev_request_id: str = None
date: str = None
message: str = None
@abstractmethod
def for_json(self) -> dict:
""" """
pass
class AuditLogHandler(Extension):
""" """
@abstractmethod
def log(self, log: AuditLog):
"""
:param log: AuditLog:
:param log: AuditLog:
"""
pass
def execute(self, *args):
"""
:param *args:
"""
self.log(getattr(args, "log"))
class AttributeDict(dict):
""" """
__slots__ = ()
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
| 25.847656 | 138 | 0.569291 | from abc import ABC, abstractmethod
from typing import List
from sqlalchemy.engine import CursorResult, Inspector
from sqlalchemy.orm.scoping import ScopedSession
from .database import _ALLOWED_DRIVERS
from .exceptions.api_manager import ColumnNotExist, ConfigError
class Repository(ABC):
def __init__(self, db: ScopedSession):
self.db = db
driver = self.db.bind.dialect.dbapi.__name__
if driver not in _ALLOWED_DRIVERS.keys():
raise ConfigError(f"{driver} driver not allowed")
self.db_dialect = _ALLOWED_DRIVERS[driver]
@abstractmethod
def execute(self, sql, params=None) -> CursorResult:
pass
@abstractmethod
def execute_insert(self, sql, params=None) -> CursorResult:
pass
@abstractmethod
def fetch_by(self, table: str, columns: List[str], filters: dict, params=None):
pass
@abstractmethod
def insert(self, table: str, columns: List[str], params: dict, returning: bool = True, returning_field: str = "*") -> CursorResult:
pass
@abstractmethod
def insert_batch(self, table: str, columns: List[str], params: List, returning: bool = True, returning_field: str = "*") -> List:
pass
@abstractmethod
def update_batch(self, table: str, params: List, where_field: str = "id") -> List:
pass
@abstractmethod
def delete_batch(self, table: str, ids: List, where_field: str = "id") -> List:
pass
@abstractmethod
def insert_record(self, table: str, columns: List[str], params: dict, returning: bool = True, returning_field: str = "*") -> int:
pass
@abstractmethod
def update_record(self, table: str, where_field: str, where_value: str, params: dict) -> CursorResult:
pass
@abstractmethod
def delete_record(self, table: str, where_field: str, where_field_id) -> CursorResult:
pass
class Extension(ABC, dict):
def execute(self, *args):
method_name = getattr(args, "method")
if method_name == "execute":
raise Exception("You can not call myself")
method = getattr(self, getattr(args, "method"))
return method(getattr(args, "args"))
class TableExtension(Extension):
enabled: bool = False
table: str = None
config: dict = None
repository: Repository = None
inspector: Inspector
def __init__(self, config: dict, columns: dict = None, repository: Repository = None, table: str = None, inspector: Inspector = None):
super().__init__()
self.inspector = inspector
self.columns = columns
self.repository = repository
self.config = config
self.table = table
self.enabled = self.config["enable"]
def validate(self):
if not self.enabled:
return True
_default_field = self.config["default_field"]
if _default_field not in self.columns.keys():
raise ColumnNotExist(f'{self.__class__.__name__}: "{_default_field}" not found on table "{self.table}" ')
return True
class AuditLog(ABC, dict):
request_id: str = None
prev_request_id: str = None
date: str = None
message: str = None
@abstractmethod
def for_json(self) -> dict:
pass
class AuditLogHandler(Extension):
@abstractmethod
def log(self, log: AuditLog):
pass
def execute(self, *args):
self.log(getattr(args, "log"))
class AttributeDict(dict):
__slots__ = ()
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
| true | true |
f722058a9fefec1defa9ea29827379f0c1c55fc1 | 6,172 | py | Python | tools/marvin/marvin/marvinInit.py | pkoistin/cloudstack | fd43cf151663c48fe29f97323490d53a7c0f9d5b | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-10-29T04:14:14.000Z | 2020-10-29T04:14:14.000Z | tools/marvin/marvin/marvinInit.py | alexoughsg/Albatross.Backup | 050016dbf1d2d1f7e1a6e49b54cdeb3108a5d6b7 | [
"Apache-2.0"
] | 6 | 2020-11-16T20:46:14.000Z | 2022-02-01T01:06:16.000Z | tools/marvin/marvin/marvinInit.py | pkoistin/cloudstack | fd43cf151663c48fe29f97323490d53a7c0f9d5b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
@Desc: Initializes the marvin and does required prerequisites
for starting it.
1. Parses the configuration file passed to marvin and creates a
parsed config
2. Initializes the logging required for marvin.All logs are
now made available under a single timestamped folder.
3. Deploys the Data Center based upon input
'''
from marvin import configGenerator
from marvin import cloudstackException
from marvin.marvinLog import MarvinLog
from marvin.deployDataCenter import deployDataCenters
from marvin.codes import(
YES,
NO,
SUCCESS,
FAILED
)
import sys
import time
import os
import logging
import string
import random
class MarvinInit:
def __init__(self, config_file, load_flag, log_folder_path=None):
self.__configFile = config_file
self.__loadFlag = load_flag
self.__parsedConfig = None
self.__logFolderPath = log_folder_path
self.__tcRunLogger = None
self.__testClient = None
self.__tcRunDebugFile = None
def __parseConfig(self):
'''
@Desc : Parses the configuration file passed and assigns
the parsed configuration
'''
try:
self.__parsedConfig = configGenerator.\
getSetupConfig(self.__configFile)
return SUCCESS
except Exception, e:
print "\n Exception Occurred Under __parseConfig : %s" % str(e)
return None
def getParsedConfig(self):
return self.__parsedConfig
def getLogFolderPath(self):
return self.__logFolderPath
def getTestClient(self):
return self.__testClient
def getLogger(self):
return self.__tcRunLogger
def getDebugFile(self):
return self.__tcRunDebugFile
def init(self):
'''
@Desc :Initializes the marvin by
1. Parsing the configuration and creating a parsed config
structure
2. Creates a timestamped log folder and provides all logs
to be dumped there
3. Creates the DataCenter based upon configuration provided
'''
try:
if ((self.__parseConfig() is not None) and
(self.__initLogging() is not None) and
(self.__deployDC() is not None)):
return SUCCESS
else:
return FAILED
except Exception, e:
print "\n Exception Occurred Under init %s" % str(e)
return FAILED
def __initLogging(self):
try:
'''
@Desc : 1. Initializes the logging for marvin and so provides
various log features for automation run.
2. Initializes all logs to be available under
given Folder Path,where all test run logs
are available for a given run.
3. All logging like exception log,results, run info etc
for a given test run are available under a given
timestamped folder
'''
temp_path = "".join(str(time.time()).split("."))
if self.__logFolderPath is None:
log_config = self.__parsedConfig.logger
if log_config is not None:
if log_config.LogFolderPath is not None:
self.logFolderPath = log_config.LogFolderPath + '/' \
+ temp_path
else:
self.logFolderPath = temp_path
else:
self.logFolderPath = temp_path
else:
self.logFolderPath = self.__logFolderPath + '/' + temp_path
if os.path.exists(self.logFolderPath):
self.logFolderPath += ''.join(random.choice(
string.ascii_uppercase +
string.digits for x in range(3)))
os.makedirs(self.logFolderPath)
'''
Log File Paths
'''
tc_failed_exceptionlog = self.logFolderPath + "/failed_" \
"plus_" \
"exceptions.txt"
tc_run_log = self.logFolderPath + "/runinfo.txt"
self.__tcRunDebugFile = open(self.logFolderPath +
"/results.txt", "w")
log_obj = MarvinLog("CSLog")
self.__tcRunLogger = log_obj.setLogHandler(tc_run_log)
log_obj.setLogHandler(tc_failed_exceptionlog,
log_level=logging.FATAL)
return SUCCESS
except Exception, e:
print "\n Exception Occurred Under __initLogging :%s" % str(e)
return None
def __deployDC(self):
try:
'''
Deploy the DataCenter and retrieves test client.
'''
deploy_obj = deployDataCenters(self.__parsedConfig,
self.__tcRunLogger)
if self.__loadFlag:
deploy_obj.loadCfg()
else:
deploy_obj.deploy()
self.__testClient = deploy_obj.testClient
return SUCCESS
except Exception, e:
print "\n Exception Occurred Under __deployDC : %s" % str(e)
return None
| 36.305882 | 77 | 0.586034 |
'''
@Desc: Initializes the marvin and does required prerequisites
for starting it.
1. Parses the configuration file passed to marvin and creates a
parsed config
2. Initializes the logging required for marvin.All logs are
now made available under a single timestamped folder.
3. Deploys the Data Center based upon input
'''
from marvin import configGenerator
from marvin import cloudstackException
from marvin.marvinLog import MarvinLog
from marvin.deployDataCenter import deployDataCenters
from marvin.codes import(
YES,
NO,
SUCCESS,
FAILED
)
import sys
import time
import os
import logging
import string
import random
class MarvinInit:
def __init__(self, config_file, load_flag, log_folder_path=None):
self.__configFile = config_file
self.__loadFlag = load_flag
self.__parsedConfig = None
self.__logFolderPath = log_folder_path
self.__tcRunLogger = None
self.__testClient = None
self.__tcRunDebugFile = None
def __parseConfig(self):
'''
@Desc : Parses the configuration file passed and assigns
the parsed configuration
'''
try:
self.__parsedConfig = configGenerator.\
getSetupConfig(self.__configFile)
return SUCCESS
except Exception, e:
print "\n Exception Occurred Under __parseConfig : %s" % str(e)
return None
def getParsedConfig(self):
return self.__parsedConfig
def getLogFolderPath(self):
return self.__logFolderPath
def getTestClient(self):
return self.__testClient
def getLogger(self):
return self.__tcRunLogger
def getDebugFile(self):
return self.__tcRunDebugFile
def init(self):
'''
@Desc :Initializes the marvin by
1. Parsing the configuration and creating a parsed config
structure
2. Creates a timestamped log folder and provides all logs
to be dumped there
3. Creates the DataCenter based upon configuration provided
'''
try:
if ((self.__parseConfig() is not None) and
(self.__initLogging() is not None) and
(self.__deployDC() is not None)):
return SUCCESS
else:
return FAILED
except Exception, e:
print "\n Exception Occurred Under init %s" % str(e)
return FAILED
def __initLogging(self):
try:
'''
@Desc : 1. Initializes the logging for marvin and so provides
various log features for automation run.
2. Initializes all logs to be available under
given Folder Path,where all test run logs
are available for a given run.
3. All logging like exception log,results, run info etc
for a given test run are available under a given
timestamped folder
'''
temp_path = "".join(str(time.time()).split("."))
if self.__logFolderPath is None:
log_config = self.__parsedConfig.logger
if log_config is not None:
if log_config.LogFolderPath is not None:
self.logFolderPath = log_config.LogFolderPath + '/' \
+ temp_path
else:
self.logFolderPath = temp_path
else:
self.logFolderPath = temp_path
else:
self.logFolderPath = self.__logFolderPath + '/' + temp_path
if os.path.exists(self.logFolderPath):
self.logFolderPath += ''.join(random.choice(
string.ascii_uppercase +
string.digits for x in range(3)))
os.makedirs(self.logFolderPath)
'''
Log File Paths
'''
tc_failed_exceptionlog = self.logFolderPath + "/failed_" \
"plus_" \
"exceptions.txt"
tc_run_log = self.logFolderPath + "/runinfo.txt"
self.__tcRunDebugFile = open(self.logFolderPath +
"/results.txt", "w")
log_obj = MarvinLog("CSLog")
self.__tcRunLogger = log_obj.setLogHandler(tc_run_log)
log_obj.setLogHandler(tc_failed_exceptionlog,
log_level=logging.FATAL)
return SUCCESS
except Exception, e:
print "\n Exception Occurred Under __initLogging :%s" % str(e)
return None
def __deployDC(self):
try:
'''
Deploy the DataCenter and retrieves test client.
'''
deploy_obj = deployDataCenters(self.__parsedConfig,
self.__tcRunLogger)
if self.__loadFlag:
deploy_obj.loadCfg()
else:
deploy_obj.deploy()
self.__testClient = deploy_obj.testClient
return SUCCESS
except Exception, e:
print "\n Exception Occurred Under __deployDC : %s" % str(e)
return None
| false | true |
f72206e42403c11c1c1c1130d4e236efff455e1e | 5,448 | py | Python | pandas/tests/series/test_cumulative.py | mlline00/pandas | fd7db9819b8c7dba86b2887bee33f670b2715afc | [
"BSD-3-Clause"
] | 1 | 2021-04-01T11:19:53.000Z | 2021-04-01T11:19:53.000Z | pandas/tests/series/test_cumulative.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/series/test_cumulative.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | null | null | null | """
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
from itertools import product
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype,
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummax().values,
np.maximum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummin_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummax_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummethods_bool(self):
# GH#6270
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
args = product((a, b, c, d), methods)
for s, method in args:
expected = pd.Series(methods[method](s.values))
result = getattr(s, method)()
tm.assert_series_equal(result, expected)
e = pd.Series([False, True, np.nan, False])
cse = pd.Series([0, 1, np.nan, 1], dtype=object)
cpe = pd.Series([False, 0, np.nan, 0])
cmin = pd.Series([False, False, np.nan, False])
cmax = pd.Series([False, True, np.nan, True])
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods:
res = getattr(e, method)()
tm.assert_series_equal(res, expecteds[method])
| 31.859649 | 88 | 0.558554 | from itertools import product
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype,
)
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummax().values,
np.maximum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummin_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummax_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummethods_bool(self):
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
args = product((a, b, c, d), methods)
for s, method in args:
expected = pd.Series(methods[method](s.values))
result = getattr(s, method)()
tm.assert_series_equal(result, expected)
e = pd.Series([False, True, np.nan, False])
cse = pd.Series([0, 1, np.nan, 1], dtype=object)
cpe = pd.Series([False, 0, np.nan, 0])
cmin = pd.Series([False, False, np.nan, False])
cmax = pd.Series([False, True, np.nan, True])
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods:
res = getattr(e, method)()
tm.assert_series_equal(res, expecteds[method])
| true | true |
f7220728cd8301f75d31c36ed0264f74be8b8b0f | 187 | py | Python | month02/day03/buffer.py | Amiao-miao/all-codes | ec50036d42d40086cac5fddf6baf4de18ac91e55 | [
"Apache-2.0"
] | 1 | 2021-02-02T02:17:37.000Z | 2021-02-02T02:17:37.000Z | month02/day03/buffer.py | Amiao-miao/all-codes | ec50036d42d40086cac5fddf6baf4de18ac91e55 | [
"Apache-2.0"
] | null | null | null | month02/day03/buffer.py | Amiao-miao/all-codes | ec50036d42d40086cac5fddf6baf4de18ac91e55 | [
"Apache-2.0"
] | null | null | null |
#行缓冲打开
file=open("3.txt",'wb',buffering=10)
while True:
data=input("<<")
if not data:
break
file.write((data+"\n").encode())
# file.flush() #刷新缓冲
file.close() | 14.384615 | 36 | 0.55615 |
file=open("3.txt",'wb',buffering=10)
while True:
data=input("<<")
if not data:
break
file.write((data+"\n").encode())
.close() | true | true |
f7220870e4ac64ce035175f7d9e02ed043831af2 | 1,987 | py | Python | xknx/knxip/disconnect_request.py | marvin-w/xknx | 9f8b80fa0906a345c7def519ac3c92c955990a26 | [
"MIT"
] | null | null | null | xknx/knxip/disconnect_request.py | marvin-w/xknx | 9f8b80fa0906a345c7def519ac3c92c955990a26 | [
"MIT"
] | 1 | 2019-01-06T11:40:16.000Z | 2019-01-06T11:40:16.000Z | xknx/knxip/disconnect_request.py | marvin-w/xknx | 9f8b80fa0906a345c7def519ac3c92c955990a26 | [
"MIT"
] | null | null | null | """
Module for Serialization and Deserialization of a KNX Disconnect Request information.
Connect requests are used to disconnect a tunnel from a KNX/IP device.
"""
from xknx.exceptions import CouldNotParseKNXIP
from .body import KNXIPBody
from .hpai import HPAI
from .knxip_enum import KNXIPServiceType
class DisconnectRequest(KNXIPBody):
"""Representation of a KNX Disconnect Request."""
# pylint: disable=too-many-instance-attributes
service_type = KNXIPServiceType.DISCONNECT_REQUEST
def __init__(self, xknx):
"""Initialize DisconnectRequest object."""
super().__init__(xknx)
self.communication_channel_id = 1
self.control_endpoint = HPAI()
def calculated_length(self):
"""Get length of KNX/IP body."""
return 2 + HPAI.LENGTH
def from_knx(self, raw):
"""Parse/deserialize from KNX/IP raw data."""
def info_from_knx(info):
"""Parse info bytes."""
if len(info) < 2:
raise CouldNotParseKNXIP("Disconnect info has wrong length")
self.communication_channel_id = info[0]
# info[1] is reserved
return 2
pos = info_from_knx(raw)
pos += self.control_endpoint.from_knx(raw[pos:])
return pos
def to_knx(self):
"""Serialize to KNX/IP raw data."""
def info_to_knx():
"""Serialize information bytes."""
info = []
info.append(self.communication_channel_id)
info.append(0x00) # Reserved
return info
data = []
data.extend(info_to_knx())
data.extend(self.control_endpoint.to_knx())
return data
def __str__(self):
"""Return object as readable string."""
return (
'<DisconnectRequest CommunicationChannelID="{}" '
'control_endpoint="{}" />'.format(
self.communication_channel_id, self.control_endpoint
)
)
| 28.797101 | 85 | 0.617514 | from xknx.exceptions import CouldNotParseKNXIP
from .body import KNXIPBody
from .hpai import HPAI
from .knxip_enum import KNXIPServiceType
class DisconnectRequest(KNXIPBody):
service_type = KNXIPServiceType.DISCONNECT_REQUEST
def __init__(self, xknx):
super().__init__(xknx)
self.communication_channel_id = 1
self.control_endpoint = HPAI()
def calculated_length(self):
return 2 + HPAI.LENGTH
def from_knx(self, raw):
def info_from_knx(info):
if len(info) < 2:
raise CouldNotParseKNXIP("Disconnect info has wrong length")
self.communication_channel_id = info[0]
return 2
pos = info_from_knx(raw)
pos += self.control_endpoint.from_knx(raw[pos:])
return pos
def to_knx(self):
def info_to_knx():
info = []
info.append(self.communication_channel_id)
info.append(0x00)
return info
data = []
data.extend(info_to_knx())
data.extend(self.control_endpoint.to_knx())
return data
def __str__(self):
return (
'<DisconnectRequest CommunicationChannelID="{}" '
'control_endpoint="{}" />'.format(
self.communication_channel_id, self.control_endpoint
)
)
| true | true |
f72209c4367259bfc0bdf1ad69eaae4ca6dd390a | 80,107 | py | Python | ibutsu_client/model_utils.py | rsnyman/ibutsu-client-python | 451bae383a8bd1a35c3cf917749614cfcbd94283 | [
"MIT"
] | null | null | null | ibutsu_client/model_utils.py | rsnyman/ibutsu-client-python | 451bae383a8bd1a35c3cf917749614cfcbd94283 | [
"MIT"
] | null | null | null | ibutsu_client/model_utils.py | rsnyman/ibutsu-client-python | 451bae383a8bd1a35c3cf917749614cfcbd94283 | [
"MIT"
] | null | null | null | """
Ibutsu API
A system to store and query test results # noqa: E501
The version of the OpenAPI document: 1.13.4
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import inspect
import io
import os
import pprint
import re
import tempfile
from dateutil.parser import parse
from ibutsu_client.exceptions import (
ApiKeyError,
ApiAttributeError,
ApiTypeError,
ApiValueError,
)
none_type = type(None)
file_type = io.IOBase
def convert_js_args_to_python_args(fn):
from functools import wraps
@wraps(fn)
def wrapped_init(_self, *args, **kwargs):
"""
An attribute named `self` received from the api will conflicts with the reserved `self`
parameter of a class method. During generation, `self` attributes are mapped
to `_self` in models. Here, we name `_self` instead of `self` to avoid conflicts.
"""
spec_property_naming = kwargs.get('_spec_property_naming', False)
if spec_property_naming:
kwargs = change_keys_js_to_python(kwargs, _self if isinstance(_self, type) else _self.__class__)
return fn(_self, *args, **kwargs)
return wrapped_init
class cached_property(object):
# this caches the result of the function call for fn with no inputs
# use this as a decorator on function methods that you want converted
# into cached properties
result_key = '_results'
def __init__(self, fn):
self._fn = fn
def __get__(self, instance, cls=None):
if self.result_key in vars(self):
return vars(self)[self.result_key]
else:
result = self._fn()
setattr(self, self.result_key, result)
return result
PRIMITIVE_TYPES = (list, float, int, bool, datetime, date, str, file_type)
def allows_single_value_input(cls):
"""
This function returns True if the input composed schema model or any
descendant model allows a value only input
This is true for cases where oneOf contains items like:
oneOf:
- float
- NumberWithValidation
- StringEnum
- ArrayModel
- null
TODO: lru_cache this
"""
if (
issubclass(cls, ModelSimple) or
cls in PRIMITIVE_TYPES
):
return True
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return False
return any(allows_single_value_input(c) for c in cls._composed_schemas['oneOf'])
return False
def composed_model_input_classes(cls):
"""
This function returns a list of the possible models that can be accepted as
inputs.
TODO: lru_cache this
"""
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return []
class OpenApiModel(object):
"""The base class for all OpenAPIModels"""
def set_attribute(self, name, value):
# this is only used to set properties on self
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._spec_property_naming,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value,
self._configuration
)
self.__dict__['_data_store'][name] = value
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
def __setattr__(self, attr, value):
"""set the value of an attribute using dot notation: `instance.attr = val`"""
self[attr] = value
def __getattr__(self, attr):
"""get the value of an attribute using dot notation: `instance.attr`"""
return self.__getitem__(attr)
def __new__(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return super(OpenApiModel, cls).__new__(cls)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return super(OpenApiModel, cls).__new__(cls)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = super(OpenApiModel, cls).__new__(cls)
self_inst.__init__(*args, **kwargs)
new_inst = new_cls.__new__(new_cls, *args, **kwargs)
new_inst.__init__(*args, **kwargs)
return new_inst
@classmethod
@convert_js_args_to_python_args
def _new_from_openapi_data(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return cls._from_openapi_data(*args, **kwargs)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return cls._from_openapi_data(*args, **kwargs)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = cls._from_openapi_data(*args, **kwargs)
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
return new_inst
class ModelSimple(OpenApiModel):
"""the parent class of models whose type != object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_str(self):
"""Returns the string representation of the model"""
return str(self.value)
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
this_val = self._data_store['value']
that_val = other._data_store['value']
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
return vals_equal
class ModelNormal(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
class ModelComposed(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi and have oneOf/allOf/anyOf
When one sets a property we use var_name_to_model_instances to store the value in
the correct class instances + run any type checking + validation code.
When one gets a property we use var_name_to_model_instances to get the value
from the correct class instances.
This allows multiple composed schemas to contain the same property with additive
constraints on the value.
_composed_schemas (dict) stores the anyOf/allOf/oneOf classes
key (str): allOf/oneOf/anyOf
value (list): the classes in the XOf definition.
Note: none_type can be included when the openapi document version >= 3.1.0
_composed_instances (list): stores a list of instances of the composed schemas
defined in _composed_schemas. When properties are accessed in the self instance,
they are returned from the self._data_store or the data stores in the instances
in self._composed_schemas
_var_name_to_model_instances (dict): maps between a variable name on self and
the composed instances (self included) which contain that data
key (str): property name
value (list): list of class instances, self or instances in _composed_instances
which contain the value that the key is referring to.
"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
"""
Use cases:
1. additional_properties_type is None (additionalProperties == False in spec)
Check for property presence in self.openapi_types
if not present then throw an error
if present set in self, set attribute
always set on composed schemas
2. additional_properties_type exists
set attribute on self
always set on composed schemas
"""
if self.additional_properties_type is None:
"""
For an attribute to exist on a composed schema it must:
- fulfill schema_requirements in the self composed schema not considering oneOf/anyOf/allOf schemas AND
- fulfill schema_requirements in each oneOf/anyOf/allOf schemas
schema_requirements:
For an attribute to exist on a schema it must:
- be present in properties at the schema OR
- have additionalProperties unset (defaults additionalProperties = any type) OR
- have additionalProperties set
"""
if name not in self.openapi_types:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
# attribute must be set on self and composed instances
self.set_attribute(name, value)
for model_instance in self._composed_instances:
setattr(model_instance, name, value)
if name not in self._var_name_to_model_instances:
# we assigned an additional property
self.__dict__['_var_name_to_model_instances'][name] = self._composed_instances + [self]
return None
__unset_attribute_value__ = object()
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
# get the attribute from the correct instance
model_instances = self._var_name_to_model_instances.get(name)
values = []
# A composed model stores self and child (oneof/anyOf/allOf) models under
# self._var_name_to_model_instances.
# Any property must exist in self and all model instances
# The value stored in all model instances must be the same
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
v = model_instance._data_store[name]
if v not in values:
values.append(v)
len_values = len(values)
if len_values == 0:
return default
elif len_values == 1:
return values[0]
elif len_values > 1:
raise ApiValueError(
"Values stored for property {0} in {1} differ when looking "
"at self and self's composed instances. All values must be "
"the same".format(name, type(self).__name__),
[e for e in [self._path_to_item, name] if e]
)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
value = self.get(name, self.__unset_attribute_value__)
if value is self.__unset_attribute_value__:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
return value
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
return True
return False
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
COERCION_INDEX_BY_TYPE = {
ModelComposed: 0,
ModelNormal: 1,
ModelSimple: 2,
none_type: 3, # The type of 'None'.
list: 4,
dict: 5,
float: 6,
int: 7,
bool: 8,
datetime: 9,
date: 10,
str: 11,
file_type: 12, # 'file_type' is an alias for the built-in 'file' or 'io.IOBase' type.
}
# these are used to limit what type conversions we try to do
# when we have a valid type already and we want to try converting
# to another type
UPCONVERSION_TYPE_PAIRS = (
(str, datetime),
(str, date),
(int, float), # A float may be serialized as an integer, e.g. '3' is a valid serialized float.
(list, ModelComposed),
(dict, ModelComposed),
(str, ModelComposed),
(int, ModelComposed),
(float, ModelComposed),
(list, ModelComposed),
(list, ModelNormal),
(dict, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
)
COERCIBLE_TYPE_PAIRS = {
False: ( # client instantiation of a model with client data
# (dict, ModelComposed),
# (list, ModelComposed),
# (dict, ModelNormal),
# (list, ModelNormal),
# (str, ModelSimple),
# (int, ModelSimple),
# (float, ModelSimple),
# (list, ModelSimple),
# (str, int),
# (str, float),
# (str, datetime),
# (str, date),
# (int, str),
# (float, str),
),
True: ( # server -> client data
(dict, ModelComposed),
(list, ModelComposed),
(dict, ModelNormal),
(list, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
# (str, int),
# (str, float),
(str, datetime),
(str, date),
# (int, str),
# (float, str),
(str, file_type)
),
}
def get_simple_class(input_value):
"""Returns an input_value's simple class that we will use for type checking
Python2:
float and int will return int, where int is the python3 int backport
str and unicode will return str, where str is the python3 str backport
Note: float and int ARE both instances of int backport
Note: str_py2 and unicode_py2 are NOT both instances of str backport
Args:
input_value (class/class_instance): the item for which we will return
the simple class
"""
if isinstance(input_value, type):
# input_value is a class
return input_value
elif isinstance(input_value, tuple):
return tuple
elif isinstance(input_value, list):
return list
elif isinstance(input_value, dict):
return dict
elif isinstance(input_value, none_type):
return none_type
elif isinstance(input_value, file_type):
return file_type
elif isinstance(input_value, bool):
# this must be higher than the int check because
# isinstance(True, int) == True
return bool
elif isinstance(input_value, int):
return int
elif isinstance(input_value, datetime):
# this must be higher than the date check because
# isinstance(datetime_instance, date) == True
return datetime
elif isinstance(input_value, date):
return date
elif isinstance(input_value, str):
return str
return type(input_value)
def check_allowed_values(allowed_values, input_variable_path, input_values):
"""Raises an exception if the input_values are not allowed
Args:
allowed_values (dict): the allowed_values dict
input_variable_path (tuple): the path to the input variable
input_values (list/str/int/float/date/datetime): the values that we
are checking to see if they are in allowed_values
"""
these_allowed_values = list(allowed_values[input_variable_path].values())
if (isinstance(input_values, list)
and not set(input_values).issubset(
set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values) - set(these_allowed_values))),
raise ApiValueError(
"Invalid values for `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (isinstance(input_values, dict)
and not set(
input_values.keys()).issubset(set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values.keys()) - set(these_allowed_values)))
raise ApiValueError(
"Invalid keys in `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (not isinstance(input_values, (list, dict))
and input_values not in these_allowed_values):
raise ApiValueError(
"Invalid value for `%s` (%s), must be one of %s" %
(
input_variable_path[0],
input_values,
these_allowed_values
)
)
def is_json_validation_enabled(schema_keyword, configuration=None):
"""Returns true if JSON schema validation is enabled for the specified
validation keyword. This can be used to skip JSON schema structural validation
as requested in the configuration.
Args:
schema_keyword (string): the name of a JSON schema validation keyword.
configuration (Configuration): the configuration class.
"""
return (configuration is None or
not hasattr(configuration, '_disabled_client_side_validations') or
schema_keyword not in configuration._disabled_client_side_validations)
def check_validations(
validations, input_variable_path, input_values,
configuration=None):
"""Raises an exception if the input_values are invalid
Args:
validations (dict): the validation dictionary.
input_variable_path (tuple): the path to the input variable.
input_values (list/str/int/float/date/datetime): the values that we
are checking.
configuration (Configuration): the configuration class.
"""
if input_values is None:
return
current_validations = validations[input_variable_path]
if (is_json_validation_enabled('multipleOf', configuration) and
'multiple_of' in current_validations and
isinstance(input_values, (int, float)) and
not (float(input_values) / current_validations['multiple_of']).is_integer()):
# Note 'multipleOf' will be as good as the floating point arithmetic.
raise ApiValueError(
"Invalid value for `%s`, value must be a multiple of "
"`%s`" % (
input_variable_path[0],
current_validations['multiple_of']
)
)
if (is_json_validation_enabled('maxLength', configuration) and
'max_length' in current_validations and
len(input_values) > current_validations['max_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['max_length']
)
)
if (is_json_validation_enabled('minLength', configuration) and
'min_length' in current_validations and
len(input_values) < current_validations['min_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be greater than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['min_length']
)
)
if (is_json_validation_enabled('maxItems', configuration) and
'max_items' in current_validations and
len(input_values) > current_validations['max_items']):
raise ApiValueError(
"Invalid value for `%s`, number of items must be less than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['max_items']
)
)
if (is_json_validation_enabled('minItems', configuration) and
'min_items' in current_validations and
len(input_values) < current_validations['min_items']):
raise ValueError(
"Invalid value for `%s`, number of items must be greater than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['min_items']
)
)
items = ('exclusive_maximum', 'inclusive_maximum', 'exclusive_minimum',
'inclusive_minimum')
if (any(item in current_validations for item in items)):
if isinstance(input_values, list):
max_val = max(input_values)
min_val = min(input_values)
elif isinstance(input_values, dict):
max_val = max(input_values.values())
min_val = min(input_values.values())
else:
max_val = input_values
min_val = input_values
if (is_json_validation_enabled('exclusiveMaximum', configuration) and
'exclusive_maximum' in current_validations and
max_val >= current_validations['exclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than `%s`" % (
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('maximum', configuration) and
'inclusive_maximum' in current_validations and
max_val > current_validations['inclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['inclusive_maximum']
)
)
if (is_json_validation_enabled('exclusiveMinimum', configuration) and
'exclusive_minimum' in current_validations and
min_val <= current_validations['exclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than `%s`" %
(
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('minimum', configuration) and
'inclusive_minimum' in current_validations and
min_val < current_validations['inclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than or equal "
"to `%s`" % (
input_variable_path[0],
current_validations['inclusive_minimum']
)
)
flags = current_validations.get('regex', {}).get('flags', 0)
if (is_json_validation_enabled('pattern', configuration) and
'regex' in current_validations and
not re.search(current_validations['regex']['pattern'],
input_values, flags=flags)):
err_msg = r"Invalid value for `%s`, must match regular expression `%s`" % (
input_variable_path[0],
current_validations['regex']['pattern']
)
if flags != 0:
# Don't print the regex flags if the flags are not
# specified in the OAS document.
err_msg = r"%s with flags=`%s`" % (err_msg, flags)
raise ApiValueError(err_msg)
def order_response_types(required_types):
"""Returns the required types sorted in coercion order
Args:
required_types (list/tuple): collection of classes or instance of
list or dict with class information inside it.
Returns:
(list): coercion order sorted collection of classes or instance
of list or dict with class information inside it.
"""
def index_getter(class_or_instance):
if isinstance(class_or_instance, list):
return COERCION_INDEX_BY_TYPE[list]
elif isinstance(class_or_instance, dict):
return COERCION_INDEX_BY_TYPE[dict]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelComposed)):
return COERCION_INDEX_BY_TYPE[ModelComposed]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelNormal)):
return COERCION_INDEX_BY_TYPE[ModelNormal]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelSimple)):
return COERCION_INDEX_BY_TYPE[ModelSimple]
elif class_or_instance in COERCION_INDEX_BY_TYPE:
return COERCION_INDEX_BY_TYPE[class_or_instance]
raise ApiValueError("Unsupported type: %s" % class_or_instance)
sorted_types = sorted(
required_types,
key=lambda class_or_instance: index_getter(class_or_instance)
)
return sorted_types
def remove_uncoercible(required_types_classes, current_item, spec_property_naming,
must_convert=True):
"""Only keeps the type conversions that are possible
Args:
required_types_classes (tuple): tuple of classes that are required
these should be ordered by COERCION_INDEX_BY_TYPE
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
current_item (any): the current item (input data) to be converted
Keyword Args:
must_convert (bool): if True the item to convert is of the wrong
type and we want a big list of coercibles
if False, we want a limited list of coercibles
Returns:
(list): the remaining coercible required types, classes only
"""
current_type_simple = get_simple_class(current_item)
results_classes = []
for required_type_class in required_types_classes:
# convert our models to OpenApiModel
required_type_class_simplified = required_type_class
if isinstance(required_type_class_simplified, type):
if issubclass(required_type_class_simplified, ModelComposed):
required_type_class_simplified = ModelComposed
elif issubclass(required_type_class_simplified, ModelNormal):
required_type_class_simplified = ModelNormal
elif issubclass(required_type_class_simplified, ModelSimple):
required_type_class_simplified = ModelSimple
if required_type_class_simplified == current_type_simple:
# don't consider converting to one's own class
continue
class_pair = (current_type_simple, required_type_class_simplified)
if must_convert and class_pair in COERCIBLE_TYPE_PAIRS[spec_property_naming]:
results_classes.append(required_type_class)
elif class_pair in UPCONVERSION_TYPE_PAIRS:
results_classes.append(required_type_class)
return results_classes
def get_discriminated_classes(cls):
"""
Returns all the classes that a discriminator converts to
TODO: lru_cache this
"""
possible_classes = []
key = list(cls.discriminator.keys())[0]
if is_type_nullable(cls):
possible_classes.append(cls)
for discr_cls in cls.discriminator[key].values():
if hasattr(discr_cls, 'discriminator') and discr_cls.discriminator is not None:
possible_classes.extend(get_discriminated_classes(discr_cls))
else:
possible_classes.append(discr_cls)
return possible_classes
def get_possible_classes(cls, from_server_context):
# TODO: lru_cache this
possible_classes = [cls]
if from_server_context:
return possible_classes
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
possible_classes = []
possible_classes.extend(get_discriminated_classes(cls))
elif issubclass(cls, ModelComposed):
possible_classes.extend(composed_model_input_classes(cls))
return possible_classes
def get_required_type_classes(required_types_mixed, spec_property_naming):
"""Converts the tuple required_types into a tuple and a dict described
below
Args:
required_types_mixed (tuple/list): will contain either classes or
instance of list or dict
spec_property_naming (bool): if True these values came from the
server, and we use the data types in our endpoints.
If False, we are client side and we need to include
oneOf and discriminator classes inside the data types in our endpoints
Returns:
(valid_classes, dict_valid_class_to_child_types_mixed):
valid_classes (tuple): the valid classes that the current item
should be
dict_valid_class_to_child_types_mixed (dict):
valid_class (class): this is the key
child_types_mixed (list/dict/tuple): describes the valid child
types
"""
valid_classes = []
child_req_types_by_current_type = {}
for required_type in required_types_mixed:
if isinstance(required_type, list):
valid_classes.append(list)
child_req_types_by_current_type[list] = required_type
elif isinstance(required_type, tuple):
valid_classes.append(tuple)
child_req_types_by_current_type[tuple] = required_type
elif isinstance(required_type, dict):
valid_classes.append(dict)
child_req_types_by_current_type[dict] = required_type[str]
else:
valid_classes.extend(get_possible_classes(required_type, spec_property_naming))
return tuple(valid_classes), child_req_types_by_current_type
def change_keys_js_to_python(input_dict, model_class):
"""
Converts from javascript_key keys in the input_dict to python_keys in
the output dict using the mapping in model_class.
If the input_dict contains a key which does not declared in the model_class,
the key is added to the output dict as is. The assumption is the model_class
may have undeclared properties (additionalProperties attribute in the OAS
document).
"""
if getattr(model_class, 'attribute_map', None) is None:
return input_dict
output_dict = {}
reversed_attr_map = {value: key for key, value in
model_class.attribute_map.items()}
for javascript_key, value in input_dict.items():
python_key = reversed_attr_map.get(javascript_key)
if python_key is None:
# if the key is unknown, it is in error or it is an
# additionalProperties variable
python_key = javascript_key
output_dict[python_key] = value
return output_dict
def get_type_error(var_value, path_to_item, valid_classes, key_type=False):
error_msg = type_error_message(
var_name=path_to_item[-1],
var_value=var_value,
valid_classes=valid_classes,
key_type=key_type
)
return ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=valid_classes,
key_type=key_type
)
def deserialize_primitive(data, klass, path_to_item):
"""Deserializes string to primitive type.
:param data: str/int/float
:param klass: str/class the class to convert to
:return: int, float, str, bool, date, datetime
"""
additional_message = ""
try:
if klass in {datetime, date}:
additional_message = (
"If you need your parameter to have a fallback "
"string value, please set its type as `type: {}` in your "
"spec. That allows the value to be any type. "
)
if klass == datetime:
if len(data) < 8:
raise ValueError("This is not a datetime")
# The string should be in iso8601 datetime format.
parsed_datetime = parse(data)
date_only = (
parsed_datetime.hour == 0 and
parsed_datetime.minute == 0 and
parsed_datetime.second == 0 and
parsed_datetime.tzinfo is None and
8 <= len(data) <= 10
)
if date_only:
raise ValueError("This is a date, not a datetime")
return parsed_datetime
elif klass == date:
if len(data) < 8:
raise ValueError("This is not a date")
return parse(data).date()
else:
converted_value = klass(data)
if isinstance(data, str) and klass == float:
if str(converted_value) != data:
# '7' -> 7.0 -> '7.0' != '7'
raise ValueError('This is not a float')
return converted_value
except (OverflowError, ValueError) as ex:
# parse can raise OverflowError
raise ApiValueError(
"{0}Failed to parse {1} as {2}".format(
additional_message, repr(data), klass.__name__
),
path_to_item=path_to_item
) from ex
def get_discriminator_class(model_class,
discr_name,
discr_value, cls_visited):
"""Returns the child class specified by the discriminator.
Args:
model_class (OpenApiModel): the model class.
discr_name (string): the name of the discriminator property.
discr_value (any): the discriminator value.
cls_visited (list): list of model classes that have been visited.
Used to determine the discriminator class without
visiting circular references indefinitely.
Returns:
used_model_class (class/None): the chosen child class that will be used
to deserialize the data, for example dog.Dog.
If a class is not found, None is returned.
"""
if model_class in cls_visited:
# The class has already been visited and no suitable class was found.
return None
cls_visited.append(model_class)
used_model_class = None
if discr_name in model_class.discriminator:
class_name_to_discr_class = model_class.discriminator[discr_name]
used_model_class = class_name_to_discr_class.get(discr_value)
if used_model_class is None:
# We didn't find a discriminated class in class_name_to_discr_class.
# So look in the ancestor or descendant discriminators
# The discriminator mapping may exist in a descendant (anyOf, oneOf)
# or ancestor (allOf).
# Ancestor example: in the GrandparentAnimal -> ParentPet -> ChildCat
# hierarchy, the discriminator mappings may be defined at any level
# in the hierarchy.
# Descendant example: mammal -> whale/zebra/Pig -> BasquePig/DanishPig
# if we try to make BasquePig from mammal, we need to travel through
# the oneOf descendant discriminators to find BasquePig
descendant_classes = model_class._composed_schemas.get('oneOf', ()) + \
model_class._composed_schemas.get('anyOf', ())
ancestor_classes = model_class._composed_schemas.get('allOf', ())
possible_classes = descendant_classes + ancestor_classes
for cls in possible_classes:
# Check if the schema has inherited discriminators.
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
used_model_class = get_discriminator_class(
cls, discr_name, discr_value, cls_visited)
if used_model_class is not None:
return used_model_class
return used_model_class
def deserialize_model(model_data, model_class, path_to_item, check_type,
configuration, spec_property_naming):
"""Deserializes model_data to model instance.
Args:
model_data (int/str/float/bool/none_type/list/dict): data to instantiate the model
model_class (OpenApiModel): the model class
path_to_item (list): path to the model in the received data
check_type (bool): whether to check the data tupe for the values in
the model
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
Returns:
model instance
Raise:
ApiTypeError
ApiValueError
ApiKeyError
"""
kw_args = dict(_check_type=check_type,
_path_to_item=path_to_item,
_configuration=configuration,
_spec_property_naming=spec_property_naming)
if issubclass(model_class, ModelSimple):
return model_class._new_from_openapi_data(model_data, **kw_args)
elif isinstance(model_data, list):
return model_class._new_from_openapi_data(*model_data, **kw_args)
if isinstance(model_data, dict):
kw_args.update(model_data)
return model_class._new_from_openapi_data(**kw_args)
elif isinstance(model_data, PRIMITIVE_TYPES):
return model_class._new_from_openapi_data(model_data, **kw_args)
def deserialize_file(response_data, configuration, content_disposition=None):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
Args:
param response_data (str): the file data to write
configuration (Configuration): the instance to use to convert files
Keyword Args:
content_disposition (str): the value of the Content-Disposition
header
Returns:
(file_type): the deserialized file which is open
The user is responsible for closing and reading the file
"""
fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
f = open(path, "rb")
return f
def attempt_convert_item(input_value, valid_classes, path_to_item,
configuration, spec_property_naming, key_type=False,
must_convert=False, check_type=True):
"""
Args:
input_value (any): the data to convert
valid_classes (any): the classes that are valid
path_to_item (list): the path to the item to convert
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
key_type (bool): if True we need to convert a key type (not supported)
must_convert (bool): if True we must convert
check_type (bool): if True we check the type or the returned data in
ModelComposed/ModelNormal/ModelSimple instances
Returns:
instance (any) the fixed item
Raises:
ApiTypeError
ApiValueError
ApiKeyError
"""
valid_classes_ordered = order_response_types(valid_classes)
valid_classes_coercible = remove_uncoercible(
valid_classes_ordered, input_value, spec_property_naming)
if not valid_classes_coercible or key_type:
# we do not handle keytype errors, json will take care
# of this for us
if configuration is None or not configuration.discard_unknown_keys:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=key_type)
for valid_class in valid_classes_coercible:
try:
if issubclass(valid_class, OpenApiModel):
return deserialize_model(input_value, valid_class,
path_to_item, check_type,
configuration, spec_property_naming)
elif valid_class == file_type:
return deserialize_file(input_value, configuration)
return deserialize_primitive(input_value, valid_class,
path_to_item)
except (ApiTypeError, ApiValueError, ApiKeyError) as conversion_exc:
if must_convert:
raise conversion_exc
# if we have conversion errors when must_convert == False
# we ignore the exception and move on to the next class
continue
# we were unable to convert, must_convert == False
return input_value
def is_type_nullable(input_type):
"""
Returns true if None is an allowed value for the specified input_type.
A type is nullable if at least one of the following conditions is true:
1. The OAS 'nullable' attribute has been specified,
1. The type is the 'null' type,
1. The type is a anyOf/oneOf composed schema, and a child schema is
the 'null' type.
Args:
input_type (type): the class of the input_value that we are
checking
Returns:
bool
"""
if input_type is none_type:
return True
if issubclass(input_type, OpenApiModel) and input_type._nullable:
return True
if issubclass(input_type, ModelComposed):
# If oneOf/anyOf, check if the 'null' type is one of the allowed types.
for t in input_type._composed_schemas.get('oneOf', ()):
if is_type_nullable(t): return True
for t in input_type._composed_schemas.get('anyOf', ()):
if is_type_nullable(t): return True
return False
def is_valid_type(input_class_simple, valid_classes):
"""
Args:
input_class_simple (class): the class of the input_value that we are
checking
valid_classes (tuple): the valid classes that the current item
should be
Returns:
bool
"""
valid_type = input_class_simple in valid_classes
if not valid_type and (
issubclass(input_class_simple, OpenApiModel) or
input_class_simple is none_type):
for valid_class in valid_classes:
if input_class_simple is none_type and is_type_nullable(valid_class):
# Schema is oneOf/anyOf and the 'null' type is one of the allowed types.
return True
if not (issubclass(valid_class, OpenApiModel) and valid_class.discriminator):
continue
discr_propertyname_py = list(valid_class.discriminator.keys())[0]
discriminator_classes = (
valid_class.discriminator[discr_propertyname_py].values()
)
valid_type = is_valid_type(input_class_simple, discriminator_classes)
if valid_type:
return True
return valid_type
def validate_and_convert_types(input_value, required_types_mixed, path_to_item,
spec_property_naming, _check_type, configuration=None):
"""Raises a TypeError is there is a problem, otherwise returns value
Args:
input_value (any): the data to validate/convert
required_types_mixed (list/dict/tuple): A list of
valid classes, or a list tuples of valid classes, or a dict where
the value is a tuple of value classes
path_to_item: (list) the path to the data being validated
this stores a list of keys or indices to get to the data being
validated
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
_check_type: (boolean) if true, type will be checked and conversion
will be attempted.
configuration: (Configuration): the configuration class to use
when converting file_type items.
If passed, conversion will be attempted when possible
If not passed, no conversions will be attempted and
exceptions will be raised
Returns:
the correctly typed value
Raises:
ApiTypeError
"""
results = get_required_type_classes(required_types_mixed, spec_property_naming)
valid_classes, child_req_types_by_current_type = results
input_class_simple = get_simple_class(input_value)
valid_type = is_valid_type(input_class_simple, valid_classes)
if not valid_type:
if configuration:
# if input_value is not valid_type try to convert it
converted_instance = attempt_convert_item(
input_value,
valid_classes,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=True,
check_type=_check_type
)
return converted_instance
else:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=False)
# input_value's type is in valid_classes
if len(valid_classes) > 1 and configuration:
# there are valid classes which are not the current class
valid_classes_coercible = remove_uncoercible(
valid_classes, input_value, spec_property_naming, must_convert=False)
if valid_classes_coercible:
converted_instance = attempt_convert_item(
input_value,
valid_classes_coercible,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=False,
check_type=_check_type
)
return converted_instance
if child_req_types_by_current_type == {}:
# all types are of the required types and there are no more inner
# variables left to look at
return input_value
inner_required_types = child_req_types_by_current_type.get(
type(input_value)
)
if inner_required_types is None:
# for this type, there are not more inner variables left to look at
return input_value
if isinstance(input_value, list):
if input_value == []:
# allow an empty list
return input_value
for index, inner_value in enumerate(input_value):
inner_path = list(path_to_item)
inner_path.append(index)
input_value[index] = validate_and_convert_types(
inner_value,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
elif isinstance(input_value, dict):
if input_value == {}:
# allow an empty dict
return input_value
for inner_key, inner_val in input_value.items():
inner_path = list(path_to_item)
inner_path.append(inner_key)
if get_simple_class(inner_key) != str:
raise get_type_error(inner_key, inner_path, valid_classes,
key_type=True)
input_value[inner_key] = validate_and_convert_types(
inner_val,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
return input_value
def model_to_dict(model_instance, serialize=True):
"""Returns the model properties as a dict
Args:
model_instance (one of your model instances): the model instance that
will be converted to a dict.
Keyword Args:
serialize (bool): if True, the keys in the dict will be values from
attribute_map
"""
result = {}
model_instances = [model_instance]
if model_instance._composed_schemas:
model_instances.extend(model_instance._composed_instances)
seen_json_attribute_names = set()
used_fallback_python_attribute_names = set()
py_to_json_map = {}
for model_instance in model_instances:
for attr, value in model_instance._data_store.items():
if serialize:
# we use get here because additional property key names do not
# exist in attribute_map
try:
attr = model_instance.attribute_map[attr]
py_to_json_map.update(model_instance.attribute_map)
seen_json_attribute_names.add(attr)
except KeyError:
used_fallback_python_attribute_names.add(attr)
if isinstance(value, list):
if not value:
# empty list or None
result[attr] = value
else:
res = []
for v in value:
if isinstance(v, PRIMITIVE_TYPES) or v is None:
res.append(v)
elif isinstance(v, ModelSimple):
res.append(v.value)
else:
res.append(model_to_dict(v, serialize=serialize))
result[attr] = res
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0],
model_to_dict(item[1], serialize=serialize))
if hasattr(item[1], '_data_store') else item,
value.items()
))
elif isinstance(value, ModelSimple):
result[attr] = value.value
elif hasattr(value, '_data_store'):
result[attr] = model_to_dict(value, serialize=serialize)
else:
result[attr] = value
if serialize:
for python_key in used_fallback_python_attribute_names:
json_key = py_to_json_map.get(python_key)
if json_key is None:
continue
if python_key == json_key:
continue
json_key_assigned_no_need_for_python_key = json_key in seen_json_attribute_names
if json_key_assigned_no_need_for_python_key:
del result[python_key]
return result
def type_error_message(var_value=None, var_name=None, valid_classes=None,
key_type=None):
"""
Keyword Args:
var_value (any): the variable which has the type_error
var_name (str): the name of the variable which has the typ error
valid_classes (tuple): the accepted classes for current_item's
value
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
"""
key_or_value = 'value'
if key_type:
key_or_value = 'key'
valid_classes_phrase = get_valid_classes_phrase(valid_classes)
msg = (
"Invalid type for variable '{0}'. Required {1} type {2} and "
"passed type was {3}".format(
var_name,
key_or_value,
valid_classes_phrase,
type(var_value).__name__,
)
)
return msg
def get_valid_classes_phrase(input_classes):
"""Returns a string phrase describing what types are allowed
"""
all_classes = list(input_classes)
all_classes = sorted(all_classes, key=lambda cls: cls.__name__)
all_class_names = [cls.__name__ for cls in all_classes]
if len(all_class_names) == 1:
return 'is {0}'.format(all_class_names[0])
return "is one of [{0}]".format(", ".join(all_class_names))
def get_allof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
used to make instances
constant_args (dict):
metadata arguments:
_check_type
_path_to_item
_spec_property_naming
_configuration
_visited_composed_classes
Returns
composed_instances (list)
"""
composed_instances = []
for allof_class in self._composed_schemas['allOf']:
try:
allof_instance = allof_class(**model_args, **constant_args)
composed_instances.append(allof_instance)
except Exception as ex:
raise ApiValueError(
"Invalid inputs given to generate an instance of '%s'. The "
"input data was invalid for the allOf schema '%s' in the composed "
"schema '%s'. Error=%s" % (
allof_class.__name__,
allof_class.__name__,
self.__class__.__name__,
str(ex)
)
) from ex
return composed_instances
def get_oneof_instance(cls, model_kwargs, constant_kwargs, model_arg=None):
"""
Find the oneOf schema that matches the input data (e.g. payload).
If exactly one schema matches the input data, an instance of that schema
is returned.
If zero or more than one schema match the input data, an exception is raised.
In OAS 3.x, the payload MUST, by validation, match exactly one of the
schemas described by oneOf.
Args:
cls: the class we are handling
model_kwargs (dict): var_name to var_value
The input data, e.g. the payload that must match a oneOf schema
in the OpenAPI document.
constant_kwargs (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Kwargs:
model_arg: (int, float, bool, str, date, datetime, ModelSimple, None):
the value to assign to a primitive class or ModelSimple class
Notes:
- this is only passed in when oneOf includes types which are not object
- None is used to suppress handling of model_arg, nullable models are handled in __new__
Returns
oneof_instance (instance)
"""
if len(cls._composed_schemas['oneOf']) == 0:
return None
oneof_instances = []
# Iterate over each oneOf schema and determine if the input data
# matches the oneOf schemas.
for oneof_class in cls._composed_schemas['oneOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if oneof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
single_value_input = allows_single_value_input(oneof_class)
try:
if not single_value_input:
oneof_instance = oneof_class(**model_kwargs, **constant_kwargs)
else:
if issubclass(oneof_class, ModelSimple):
oneof_instance = oneof_class(model_arg, **constant_kwargs)
elif oneof_class in PRIMITIVE_TYPES:
oneof_instance = validate_and_convert_types(
model_arg,
(oneof_class,),
constant_kwargs['_path_to_item'],
constant_kwargs['_spec_property_naming'],
constant_kwargs['_check_type'],
configuration=constant_kwargs['_configuration']
)
oneof_instances.append(oneof_instance)
except Exception:
pass
if len(oneof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None "
"of the oneOf schemas matched the input data." %
cls.__name__
)
elif len(oneof_instances) > 1:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. Multiple "
"oneOf schemas matched the inputs, but a max of one is allowed." %
cls.__name__
)
return oneof_instances[0]
def get_anyof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
The input data, e.g. the payload that must match at least one
anyOf child schema in the OpenAPI document.
constant_args (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Returns
anyof_instances (list)
"""
anyof_instances = []
if len(self._composed_schemas['anyOf']) == 0:
return anyof_instances
for anyof_class in self._composed_schemas['anyOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if anyof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
try:
anyof_instance = anyof_class(**model_args, **constant_args)
anyof_instances.append(anyof_instance)
except Exception:
pass
if len(anyof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None of the "
"anyOf schemas matched the inputs." %
self.__class__.__name__
)
return anyof_instances
def get_discarded_args(self, composed_instances, model_args):
"""
Gathers the args that were discarded by configuration.discard_unknown_keys
"""
model_arg_keys = model_args.keys()
discarded_args = set()
# arguments passed to self were already converted to python names
# before __init__ was called
for instance in composed_instances:
if instance.__class__ in self._composed_schemas['allOf']:
try:
keys = instance.to_dict().keys()
discarded_keys = model_args - keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
else:
try:
all_keys = set(model_to_dict(instance, serialize=False).keys())
js_keys = model_to_dict(instance, serialize=True).keys()
all_keys.update(js_keys)
discarded_keys = model_arg_keys - all_keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
return discarded_args
def validate_get_composed_info(constant_args, model_args, self):
"""
For composed schemas, generate schema instances for
all schemas in the oneOf/anyOf/allOf definition. If additional
properties are allowed, also assign those properties on
all matched schemas that contain additionalProperties.
Openapi schemas are python classes.
Exceptions are raised if:
- 0 or > 1 oneOf schema matches the model_args input data
- no anyOf schema matches the model_args input data
- any of the allOf schemas do not match the model_args input data
Args:
constant_args (dict): these are the args that every model requires
model_args (dict): these are the required and optional spec args that
were passed in to make this model
self (class): the class that we are instantiating
This class contains self._composed_schemas
Returns:
composed_info (list): length three
composed_instances (list): the composed instances which are not
self
var_name_to_model_instances (dict): a dict going from var_name
to the model_instance which holds that var_name
the model_instance may be self or an instance of one of the
classes in self.composed_instances()
additional_properties_model_instances (list): a list of the
model instances which have the property
additional_properties_type. This list can include self
"""
# create composed_instances
composed_instances = []
allof_instances = get_allof_instances(self, model_args, constant_args)
composed_instances.extend(allof_instances)
oneof_instance = get_oneof_instance(self.__class__, model_args, constant_args)
if oneof_instance is not None:
composed_instances.append(oneof_instance)
anyof_instances = get_anyof_instances(self, model_args, constant_args)
composed_instances.extend(anyof_instances)
"""
set additional_properties_model_instances
additional properties must be evaluated at the schema level
so self's additional properties are most important
If self is a composed schema with:
- no properties defined in self
- additionalProperties: False
Then for object payloads every property is an additional property
and they are not allowed, so only empty dict is allowed
Properties must be set on all matching schemas
so when a property is assigned toa composed instance, it must be set on all
composed instances regardless of additionalProperties presence
keeping it to prevent breaking changes in v5.0.1
TODO remove cls._additional_properties_model_instances in 6.0.0
"""
additional_properties_model_instances = []
if self.additional_properties_type is not None:
additional_properties_model_instances = [self]
"""
no need to set properties on self in here, they will be set in __init__
By here all composed schema oneOf/anyOf/allOf instances have their properties set using
model_args
"""
discarded_args = get_discarded_args(self, composed_instances, model_args)
# map variable names to composed_instances
var_name_to_model_instances = {}
for prop_name in model_args:
if prop_name not in discarded_args:
var_name_to_model_instances[prop_name] = [self] + composed_instances
return [
composed_instances,
var_name_to_model_instances,
additional_properties_model_instances,
discarded_args
]
| 40.19418 | 115 | 0.628859 |
from datetime import date, datetime
import inspect
import io
import os
import pprint
import re
import tempfile
from dateutil.parser import parse
from ibutsu_client.exceptions import (
ApiKeyError,
ApiAttributeError,
ApiTypeError,
ApiValueError,
)
none_type = type(None)
file_type = io.IOBase
def convert_js_args_to_python_args(fn):
from functools import wraps
@wraps(fn)
def wrapped_init(_self, *args, **kwargs):
spec_property_naming = kwargs.get('_spec_property_naming', False)
if spec_property_naming:
kwargs = change_keys_js_to_python(kwargs, _self if isinstance(_self, type) else _self.__class__)
return fn(_self, *args, **kwargs)
return wrapped_init
class cached_property(object):
result_key = '_results'
def __init__(self, fn):
self._fn = fn
def __get__(self, instance, cls=None):
if self.result_key in vars(self):
return vars(self)[self.result_key]
else:
result = self._fn()
setattr(self, self.result_key, result)
return result
PRIMITIVE_TYPES = (list, float, int, bool, datetime, date, str, file_type)
def allows_single_value_input(cls):
if (
issubclass(cls, ModelSimple) or
cls in PRIMITIVE_TYPES
):
return True
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return False
return any(allows_single_value_input(c) for c in cls._composed_schemas['oneOf'])
return False
def composed_model_input_classes(cls):
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return []
class OpenApiModel(object):
def set_attribute(self, name, value):
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._spec_property_naming,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value,
self._configuration
)
self.__dict__['_data_store'][name] = value
def __repr__(self):
return self.to_str()
def __ne__(self, other):
return not self == other
def __setattr__(self, attr, value):
self[attr] = value
def __getattr__(self, attr):
return self.__getitem__(attr)
def __new__(cls, *args, **kwargs):
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# _visited_composed_classes = (Animal,)
return super(OpenApiModel, cls).__new__(cls)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return super(OpenApiModel, cls).__new__(cls)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = super(OpenApiModel, cls).__new__(cls)
self_inst.__init__(*args, **kwargs)
new_inst = new_cls.__new__(new_cls, *args, **kwargs)
new_inst.__init__(*args, **kwargs)
return new_inst
@classmethod
@convert_js_args_to_python_args
def _new_from_openapi_data(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# through Animal's discriminator because we passed in
return cls._from_openapi_data(*args, **kwargs)
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
return cls._from_openapi_data(*args, **kwargs)
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
self_inst = cls._from_openapi_data(*args, **kwargs)
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
return new_inst
class ModelSimple(OpenApiModel):
def __setitem__(self, name, value):
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_str(self):
return str(self.value)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
this_val = self._data_store['value']
that_val = other._data_store['value']
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
return vals_equal
class ModelNormal(OpenApiModel):
def __setitem__(self, name, value):
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_dict(self):
return model_to_dict(self, serialize=False)
def to_str(self):
return pprint.pformat(self.to_dict())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
class ModelComposed(OpenApiModel):
def __setitem__(self, name, value):
if name in self.required_properties:
self.__dict__[name] = value
return
if self.additional_properties_type is None:
if name not in self.openapi_types:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
self.set_attribute(name, value)
for model_instance in self._composed_instances:
setattr(model_instance, name, value)
if name not in self._var_name_to_model_instances:
self.__dict__['_var_name_to_model_instances'][name] = self._composed_instances + [self]
return None
__unset_attribute_value__ = object()
def get(self, name, default=None):
if name in self.required_properties:
return self.__dict__[name]
model_instances = self._var_name_to_model_instances.get(name)
values = []
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
v = model_instance._data_store[name]
if v not in values:
values.append(v)
len_values = len(values)
if len_values == 0:
return default
elif len_values == 1:
return values[0]
elif len_values > 1:
raise ApiValueError(
"Values stored for property {0} in {1} differ when looking "
"at self and self's composed instances. All values must be "
"the same".format(name, type(self).__name__),
[e for e in [self._path_to_item, name] if e]
)
def __getitem__(self, name):
value = self.get(name, self.__unset_attribute_value__)
if value is self.__unset_attribute_value__:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
return value
def __contains__(self, name):
if name in self.required_properties:
return name in self.__dict__
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
return True
return False
def to_dict(self):
return model_to_dict(self, serialize=False)
def to_str(self):
return pprint.pformat(self.to_dict())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
COERCION_INDEX_BY_TYPE = {
ModelComposed: 0,
ModelNormal: 1,
ModelSimple: 2,
none_type: 3, # The type of 'None'.
list: 4,
dict: 5,
float: 6,
int: 7,
bool: 8,
datetime: 9,
date: 10,
str: 11,
file_type: 12, # 'file_type' is an alias for the built-in 'file' or 'io.IOBase' type.
}
# these are used to limit what type conversions we try to do
# when we have a valid type already and we want to try converting
# to another type
UPCONVERSION_TYPE_PAIRS = (
(str, datetime),
(str, date),
(int, float), # A float may be serialized as an integer, e.g. '3' is a valid serialized float.
(list, ModelComposed),
(dict, ModelComposed),
(str, ModelComposed),
(int, ModelComposed),
(float, ModelComposed),
(list, ModelComposed),
(list, ModelNormal),
(dict, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
)
COERCIBLE_TYPE_PAIRS = {
False: ( # client instantiation of a model with client data
# (dict, ModelComposed),
# (list, ModelComposed),
# (dict, ModelNormal),
# (list, ModelNormal),
# (str, ModelSimple),
# (int, ModelSimple),
# (float, ModelSimple),
# (list, ModelSimple),
# (str, int),
# (str, float),
# (str, datetime),
# (str, date),
# (int, str),
# (float, str),
),
True: ( # server -> client data
(dict, ModelComposed),
(list, ModelComposed),
(dict, ModelNormal),
(list, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
# (str, int),
# (str, float),
(str, datetime),
(str, date),
# (int, str),
# (float, str),
(str, file_type)
),
}
def get_simple_class(input_value):
if isinstance(input_value, type):
# input_value is a class
return input_value
elif isinstance(input_value, tuple):
return tuple
elif isinstance(input_value, list):
return list
elif isinstance(input_value, dict):
return dict
elif isinstance(input_value, none_type):
return none_type
elif isinstance(input_value, file_type):
return file_type
elif isinstance(input_value, bool):
# this must be higher than the int check because
# isinstance(True, int) == True
return bool
elif isinstance(input_value, int):
return int
elif isinstance(input_value, datetime):
# this must be higher than the date check because
# isinstance(datetime_instance, date) == True
return datetime
elif isinstance(input_value, date):
return date
elif isinstance(input_value, str):
return str
return type(input_value)
def check_allowed_values(allowed_values, input_variable_path, input_values):
these_allowed_values = list(allowed_values[input_variable_path].values())
if (isinstance(input_values, list)
and not set(input_values).issubset(
set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values) - set(these_allowed_values))),
raise ApiValueError(
"Invalid values for `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (isinstance(input_values, dict)
and not set(
input_values.keys()).issubset(set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values.keys()) - set(these_allowed_values)))
raise ApiValueError(
"Invalid keys in `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (not isinstance(input_values, (list, dict))
and input_values not in these_allowed_values):
raise ApiValueError(
"Invalid value for `%s` (%s), must be one of %s" %
(
input_variable_path[0],
input_values,
these_allowed_values
)
)
def is_json_validation_enabled(schema_keyword, configuration=None):
return (configuration is None or
not hasattr(configuration, '_disabled_client_side_validations') or
schema_keyword not in configuration._disabled_client_side_validations)
def check_validations(
validations, input_variable_path, input_values,
configuration=None):
if input_values is None:
return
current_validations = validations[input_variable_path]
if (is_json_validation_enabled('multipleOf', configuration) and
'multiple_of' in current_validations and
isinstance(input_values, (int, float)) and
not (float(input_values) / current_validations['multiple_of']).is_integer()):
# Note 'multipleOf' will be as good as the floating point arithmetic.
raise ApiValueError(
"Invalid value for `%s`, value must be a multiple of "
"`%s`" % (
input_variable_path[0],
current_validations['multiple_of']
)
)
if (is_json_validation_enabled('maxLength', configuration) and
'max_length' in current_validations and
len(input_values) > current_validations['max_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['max_length']
)
)
if (is_json_validation_enabled('minLength', configuration) and
'min_length' in current_validations and
len(input_values) < current_validations['min_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be greater than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['min_length']
)
)
if (is_json_validation_enabled('maxItems', configuration) and
'max_items' in current_validations and
len(input_values) > current_validations['max_items']):
raise ApiValueError(
"Invalid value for `%s`, number of items must be less than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['max_items']
)
)
if (is_json_validation_enabled('minItems', configuration) and
'min_items' in current_validations and
len(input_values) < current_validations['min_items']):
raise ValueError(
"Invalid value for `%s`, number of items must be greater than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['min_items']
)
)
items = ('exclusive_maximum', 'inclusive_maximum', 'exclusive_minimum',
'inclusive_minimum')
if (any(item in current_validations for item in items)):
if isinstance(input_values, list):
max_val = max(input_values)
min_val = min(input_values)
elif isinstance(input_values, dict):
max_val = max(input_values.values())
min_val = min(input_values.values())
else:
max_val = input_values
min_val = input_values
if (is_json_validation_enabled('exclusiveMaximum', configuration) and
'exclusive_maximum' in current_validations and
max_val >= current_validations['exclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than `%s`" % (
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('maximum', configuration) and
'inclusive_maximum' in current_validations and
max_val > current_validations['inclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['inclusive_maximum']
)
)
if (is_json_validation_enabled('exclusiveMinimum', configuration) and
'exclusive_minimum' in current_validations and
min_val <= current_validations['exclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than `%s`" %
(
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('minimum', configuration) and
'inclusive_minimum' in current_validations and
min_val < current_validations['inclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than or equal "
"to `%s`" % (
input_variable_path[0],
current_validations['inclusive_minimum']
)
)
flags = current_validations.get('regex', {}).get('flags', 0)
if (is_json_validation_enabled('pattern', configuration) and
'regex' in current_validations and
not re.search(current_validations['regex']['pattern'],
input_values, flags=flags)):
err_msg = r"Invalid value for `%s`, must match regular expression `%s`" % (
input_variable_path[0],
current_validations['regex']['pattern']
)
if flags != 0:
# Don't print the regex flags if the flags are not
err_msg = r"%s with flags=`%s`" % (err_msg, flags)
raise ApiValueError(err_msg)
def order_response_types(required_types):
def index_getter(class_or_instance):
if isinstance(class_or_instance, list):
return COERCION_INDEX_BY_TYPE[list]
elif isinstance(class_or_instance, dict):
return COERCION_INDEX_BY_TYPE[dict]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelComposed)):
return COERCION_INDEX_BY_TYPE[ModelComposed]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelNormal)):
return COERCION_INDEX_BY_TYPE[ModelNormal]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelSimple)):
return COERCION_INDEX_BY_TYPE[ModelSimple]
elif class_or_instance in COERCION_INDEX_BY_TYPE:
return COERCION_INDEX_BY_TYPE[class_or_instance]
raise ApiValueError("Unsupported type: %s" % class_or_instance)
sorted_types = sorted(
required_types,
key=lambda class_or_instance: index_getter(class_or_instance)
)
return sorted_types
def remove_uncoercible(required_types_classes, current_item, spec_property_naming,
must_convert=True):
current_type_simple = get_simple_class(current_item)
results_classes = []
for required_type_class in required_types_classes:
required_type_class_simplified = required_type_class
if isinstance(required_type_class_simplified, type):
if issubclass(required_type_class_simplified, ModelComposed):
required_type_class_simplified = ModelComposed
elif issubclass(required_type_class_simplified, ModelNormal):
required_type_class_simplified = ModelNormal
elif issubclass(required_type_class_simplified, ModelSimple):
required_type_class_simplified = ModelSimple
if required_type_class_simplified == current_type_simple:
continue
class_pair = (current_type_simple, required_type_class_simplified)
if must_convert and class_pair in COERCIBLE_TYPE_PAIRS[spec_property_naming]:
results_classes.append(required_type_class)
elif class_pair in UPCONVERSION_TYPE_PAIRS:
results_classes.append(required_type_class)
return results_classes
def get_discriminated_classes(cls):
possible_classes = []
key = list(cls.discriminator.keys())[0]
if is_type_nullable(cls):
possible_classes.append(cls)
for discr_cls in cls.discriminator[key].values():
if hasattr(discr_cls, 'discriminator') and discr_cls.discriminator is not None:
possible_classes.extend(get_discriminated_classes(discr_cls))
else:
possible_classes.append(discr_cls)
return possible_classes
def get_possible_classes(cls, from_server_context):
possible_classes = [cls]
if from_server_context:
return possible_classes
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
possible_classes = []
possible_classes.extend(get_discriminated_classes(cls))
elif issubclass(cls, ModelComposed):
possible_classes.extend(composed_model_input_classes(cls))
return possible_classes
def get_required_type_classes(required_types_mixed, spec_property_naming):
valid_classes = []
child_req_types_by_current_type = {}
for required_type in required_types_mixed:
if isinstance(required_type, list):
valid_classes.append(list)
child_req_types_by_current_type[list] = required_type
elif isinstance(required_type, tuple):
valid_classes.append(tuple)
child_req_types_by_current_type[tuple] = required_type
elif isinstance(required_type, dict):
valid_classes.append(dict)
child_req_types_by_current_type[dict] = required_type[str]
else:
valid_classes.extend(get_possible_classes(required_type, spec_property_naming))
return tuple(valid_classes), child_req_types_by_current_type
def change_keys_js_to_python(input_dict, model_class):
if getattr(model_class, 'attribute_map', None) is None:
return input_dict
output_dict = {}
reversed_attr_map = {value: key for key, value in
model_class.attribute_map.items()}
for javascript_key, value in input_dict.items():
python_key = reversed_attr_map.get(javascript_key)
if python_key is None:
python_key = javascript_key
output_dict[python_key] = value
return output_dict
def get_type_error(var_value, path_to_item, valid_classes, key_type=False):
error_msg = type_error_message(
var_name=path_to_item[-1],
var_value=var_value,
valid_classes=valid_classes,
key_type=key_type
)
return ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=valid_classes,
key_type=key_type
)
def deserialize_primitive(data, klass, path_to_item):
additional_message = ""
try:
if klass in {datetime, date}:
additional_message = (
"If you need your parameter to have a fallback "
"string value, please set its type as `type: {}` in your "
"spec. That allows the value to be any type. "
)
if klass == datetime:
if len(data) < 8:
raise ValueError("This is not a datetime")
parsed_datetime = parse(data)
date_only = (
parsed_datetime.hour == 0 and
parsed_datetime.minute == 0 and
parsed_datetime.second == 0 and
parsed_datetime.tzinfo is None and
8 <= len(data) <= 10
)
if date_only:
raise ValueError("This is a date, not a datetime")
return parsed_datetime
elif klass == date:
if len(data) < 8:
raise ValueError("This is not a date")
return parse(data).date()
else:
converted_value = klass(data)
if isinstance(data, str) and klass == float:
if str(converted_value) != data:
raise ValueError('This is not a float')
return converted_value
except (OverflowError, ValueError) as ex:
raise ApiValueError(
"{0}Failed to parse {1} as {2}".format(
additional_message, repr(data), klass.__name__
),
path_to_item=path_to_item
) from ex
def get_discriminator_class(model_class,
discr_name,
discr_value, cls_visited):
if model_class in cls_visited:
return None
cls_visited.append(model_class)
used_model_class = None
if discr_name in model_class.discriminator:
class_name_to_discr_class = model_class.discriminator[discr_name]
used_model_class = class_name_to_discr_class.get(discr_value)
if used_model_class is None:
# So look in the ancestor or descendant discriminators
# The discriminator mapping may exist in a descendant (anyOf, oneOf)
# or ancestor (allOf).
# Ancestor example: in the GrandparentAnimal -> ParentPet -> ChildCat
# hierarchy, the discriminator mappings may be defined at any level
# in the hierarchy.
# Descendant example: mammal -> whale/zebra/Pig -> BasquePig/DanishPig
# if we try to make BasquePig from mammal, we need to travel through
# the oneOf descendant discriminators to find BasquePig
descendant_classes = model_class._composed_schemas.get('oneOf', ()) + \
model_class._composed_schemas.get('anyOf', ())
ancestor_classes = model_class._composed_schemas.get('allOf', ())
possible_classes = descendant_classes + ancestor_classes
for cls in possible_classes:
# Check if the schema has inherited discriminators.
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
used_model_class = get_discriminator_class(
cls, discr_name, discr_value, cls_visited)
if used_model_class is not None:
return used_model_class
return used_model_class
def deserialize_model(model_data, model_class, path_to_item, check_type,
configuration, spec_property_naming):
kw_args = dict(_check_type=check_type,
_path_to_item=path_to_item,
_configuration=configuration,
_spec_property_naming=spec_property_naming)
if issubclass(model_class, ModelSimple):
return model_class._new_from_openapi_data(model_data, **kw_args)
elif isinstance(model_data, list):
return model_class._new_from_openapi_data(*model_data, **kw_args)
if isinstance(model_data, dict):
kw_args.update(model_data)
return model_class._new_from_openapi_data(**kw_args)
elif isinstance(model_data, PRIMITIVE_TYPES):
return model_class._new_from_openapi_data(model_data, **kw_args)
def deserialize_file(response_data, configuration, content_disposition=None):
fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
f = open(path, "rb")
return f
def attempt_convert_item(input_value, valid_classes, path_to_item,
configuration, spec_property_naming, key_type=False,
must_convert=False, check_type=True):
valid_classes_ordered = order_response_types(valid_classes)
valid_classes_coercible = remove_uncoercible(
valid_classes_ordered, input_value, spec_property_naming)
if not valid_classes_coercible or key_type:
# we do not handle keytype errors, json will take care
# of this for us
if configuration is None or not configuration.discard_unknown_keys:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=key_type)
for valid_class in valid_classes_coercible:
try:
if issubclass(valid_class, OpenApiModel):
return deserialize_model(input_value, valid_class,
path_to_item, check_type,
configuration, spec_property_naming)
elif valid_class == file_type:
return deserialize_file(input_value, configuration)
return deserialize_primitive(input_value, valid_class,
path_to_item)
except (ApiTypeError, ApiValueError, ApiKeyError) as conversion_exc:
if must_convert:
raise conversion_exc
# if we have conversion errors when must_convert == False
# we ignore the exception and move on to the next class
continue
# we were unable to convert, must_convert == False
return input_value
def is_type_nullable(input_type):
if input_type is none_type:
return True
if issubclass(input_type, OpenApiModel) and input_type._nullable:
return True
if issubclass(input_type, ModelComposed):
# If oneOf/anyOf, check if the 'null' type is one of the allowed types.
for t in input_type._composed_schemas.get('oneOf', ()):
if is_type_nullable(t): return True
for t in input_type._composed_schemas.get('anyOf', ()):
if is_type_nullable(t): return True
return False
def is_valid_type(input_class_simple, valid_classes):
valid_type = input_class_simple in valid_classes
if not valid_type and (
issubclass(input_class_simple, OpenApiModel) or
input_class_simple is none_type):
for valid_class in valid_classes:
if input_class_simple is none_type and is_type_nullable(valid_class):
# Schema is oneOf/anyOf and the 'null' type is one of the allowed types.
return True
if not (issubclass(valid_class, OpenApiModel) and valid_class.discriminator):
continue
discr_propertyname_py = list(valid_class.discriminator.keys())[0]
discriminator_classes = (
valid_class.discriminator[discr_propertyname_py].values()
)
valid_type = is_valid_type(input_class_simple, discriminator_classes)
if valid_type:
return True
return valid_type
def validate_and_convert_types(input_value, required_types_mixed, path_to_item,
spec_property_naming, _check_type, configuration=None):
results = get_required_type_classes(required_types_mixed, spec_property_naming)
valid_classes, child_req_types_by_current_type = results
input_class_simple = get_simple_class(input_value)
valid_type = is_valid_type(input_class_simple, valid_classes)
if not valid_type:
if configuration:
# if input_value is not valid_type try to convert it
converted_instance = attempt_convert_item(
input_value,
valid_classes,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=True,
check_type=_check_type
)
return converted_instance
else:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=False)
# input_value's type is in valid_classes
if len(valid_classes) > 1 and configuration:
# there are valid classes which are not the current class
valid_classes_coercible = remove_uncoercible(
valid_classes, input_value, spec_property_naming, must_convert=False)
if valid_classes_coercible:
converted_instance = attempt_convert_item(
input_value,
valid_classes_coercible,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=False,
check_type=_check_type
)
return converted_instance
if child_req_types_by_current_type == {}:
# all types are of the required types and there are no more inner
# variables left to look at
return input_value
inner_required_types = child_req_types_by_current_type.get(
type(input_value)
)
if inner_required_types is None:
# for this type, there are not more inner variables left to look at
return input_value
if isinstance(input_value, list):
if input_value == []:
# allow an empty list
return input_value
for index, inner_value in enumerate(input_value):
inner_path = list(path_to_item)
inner_path.append(index)
input_value[index] = validate_and_convert_types(
inner_value,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
elif isinstance(input_value, dict):
if input_value == {}:
# allow an empty dict
return input_value
for inner_key, inner_val in input_value.items():
inner_path = list(path_to_item)
inner_path.append(inner_key)
if get_simple_class(inner_key) != str:
raise get_type_error(inner_key, inner_path, valid_classes,
key_type=True)
input_value[inner_key] = validate_and_convert_types(
inner_val,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
return input_value
def model_to_dict(model_instance, serialize=True):
result = {}
model_instances = [model_instance]
if model_instance._composed_schemas:
model_instances.extend(model_instance._composed_instances)
seen_json_attribute_names = set()
used_fallback_python_attribute_names = set()
py_to_json_map = {}
for model_instance in model_instances:
for attr, value in model_instance._data_store.items():
if serialize:
# we use get here because additional property key names do not
# exist in attribute_map
try:
attr = model_instance.attribute_map[attr]
py_to_json_map.update(model_instance.attribute_map)
seen_json_attribute_names.add(attr)
except KeyError:
used_fallback_python_attribute_names.add(attr)
if isinstance(value, list):
if not value:
# empty list or None
result[attr] = value
else:
res = []
for v in value:
if isinstance(v, PRIMITIVE_TYPES) or v is None:
res.append(v)
elif isinstance(v, ModelSimple):
res.append(v.value)
else:
res.append(model_to_dict(v, serialize=serialize))
result[attr] = res
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0],
model_to_dict(item[1], serialize=serialize))
if hasattr(item[1], '_data_store') else item,
value.items()
))
elif isinstance(value, ModelSimple):
result[attr] = value.value
elif hasattr(value, '_data_store'):
result[attr] = model_to_dict(value, serialize=serialize)
else:
result[attr] = value
if serialize:
for python_key in used_fallback_python_attribute_names:
json_key = py_to_json_map.get(python_key)
if json_key is None:
continue
if python_key == json_key:
continue
json_key_assigned_no_need_for_python_key = json_key in seen_json_attribute_names
if json_key_assigned_no_need_for_python_key:
del result[python_key]
return result
def type_error_message(var_value=None, var_name=None, valid_classes=None,
key_type=None):
key_or_value = 'value'
if key_type:
key_or_value = 'key'
valid_classes_phrase = get_valid_classes_phrase(valid_classes)
msg = (
"Invalid type for variable '{0}'. Required {1} type {2} and "
"passed type was {3}".format(
var_name,
key_or_value,
valid_classes_phrase,
type(var_value).__name__,
)
)
return msg
def get_valid_classes_phrase(input_classes):
all_classes = list(input_classes)
all_classes = sorted(all_classes, key=lambda cls: cls.__name__)
all_class_names = [cls.__name__ for cls in all_classes]
if len(all_class_names) == 1:
return 'is {0}'.format(all_class_names[0])
return "is one of [{0}]".format(", ".join(all_class_names))
def get_allof_instances(self, model_args, constant_args):
composed_instances = []
for allof_class in self._composed_schemas['allOf']:
try:
allof_instance = allof_class(**model_args, **constant_args)
composed_instances.append(allof_instance)
except Exception as ex:
raise ApiValueError(
"Invalid inputs given to generate an instance of '%s'. The "
"input data was invalid for the allOf schema '%s' in the composed "
"schema '%s'. Error=%s" % (
allof_class.__name__,
allof_class.__name__,
self.__class__.__name__,
str(ex)
)
) from ex
return composed_instances
def get_oneof_instance(cls, model_kwargs, constant_kwargs, model_arg=None):
if len(cls._composed_schemas['oneOf']) == 0:
return None
oneof_instances = []
# Iterate over each oneOf schema and determine if the input data
# matches the oneOf schemas.
for oneof_class in cls._composed_schemas['oneOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if oneof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
single_value_input = allows_single_value_input(oneof_class)
try:
if not single_value_input:
oneof_instance = oneof_class(**model_kwargs, **constant_kwargs)
else:
if issubclass(oneof_class, ModelSimple):
oneof_instance = oneof_class(model_arg, **constant_kwargs)
elif oneof_class in PRIMITIVE_TYPES:
oneof_instance = validate_and_convert_types(
model_arg,
(oneof_class,),
constant_kwargs['_path_to_item'],
constant_kwargs['_spec_property_naming'],
constant_kwargs['_check_type'],
configuration=constant_kwargs['_configuration']
)
oneof_instances.append(oneof_instance)
except Exception:
pass
if len(oneof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None "
"of the oneOf schemas matched the input data." %
cls.__name__
)
elif len(oneof_instances) > 1:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. Multiple "
"oneOf schemas matched the inputs, but a max of one is allowed." %
cls.__name__
)
return oneof_instances[0]
def get_anyof_instances(self, model_args, constant_args):
anyof_instances = []
if len(self._composed_schemas['anyOf']) == 0:
return anyof_instances
for anyof_class in self._composed_schemas['anyOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if anyof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
try:
anyof_instance = anyof_class(**model_args, **constant_args)
anyof_instances.append(anyof_instance)
except Exception:
pass
if len(anyof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None of the "
"anyOf schemas matched the inputs." %
self.__class__.__name__
)
return anyof_instances
def get_discarded_args(self, composed_instances, model_args):
model_arg_keys = model_args.keys()
discarded_args = set()
# arguments passed to self were already converted to python names
# before __init__ was called
for instance in composed_instances:
if instance.__class__ in self._composed_schemas['allOf']:
try:
keys = instance.to_dict().keys()
discarded_keys = model_args - keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
else:
try:
all_keys = set(model_to_dict(instance, serialize=False).keys())
js_keys = model_to_dict(instance, serialize=True).keys()
all_keys.update(js_keys)
discarded_keys = model_arg_keys - all_keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
return discarded_args
def validate_get_composed_info(constant_args, model_args, self):
# create composed_instances
composed_instances = []
allof_instances = get_allof_instances(self, model_args, constant_args)
composed_instances.extend(allof_instances)
oneof_instance = get_oneof_instance(self.__class__, model_args, constant_args)
if oneof_instance is not None:
composed_instances.append(oneof_instance)
anyof_instances = get_anyof_instances(self, model_args, constant_args)
composed_instances.extend(anyof_instances)
additional_properties_model_instances = []
if self.additional_properties_type is not None:
additional_properties_model_instances = [self]
discarded_args = get_discarded_args(self, composed_instances, model_args)
# map variable names to composed_instances
var_name_to_model_instances = {}
for prop_name in model_args:
if prop_name not in discarded_args:
var_name_to_model_instances[prop_name] = [self] + composed_instances
return [
composed_instances,
var_name_to_model_instances,
additional_properties_model_instances,
discarded_args
]
| true | true |
f7220aec41876620567ec4f5a2fa2a87f3c48aab | 4,306 | py | Python | server.py | jsagurton/homekit-neopixel-rpi | 0a0725bf5bb4b3c81c25d4909e2225b6c03411c0 | [
"MIT"
] | 2 | 2020-05-13T17:43:25.000Z | 2020-06-01T04:13:28.000Z | server.py | jsagurton/homekit-neopixel-rpi | 0a0725bf5bb4b3c81c25d4909e2225b6c03411c0 | [
"MIT"
] | null | null | null | server.py | jsagurton/homekit-neopixel-rpi | 0a0725bf5bb4b3c81c25d4909e2225b6c03411c0 | [
"MIT"
] | null | null | null | import time
import board
import neopixel
import threading
from flask import Flask
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D21
# The number of NeoPixels
num_pixels = 137
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1.0, auto_write=False,
pixel_order=ORDER)
app = Flask(__name__)
rgb=(255,255,255)
status = 0
enableRainbow = False
# I'm not entirely sure what to do with the ratio yet. Repeated brightness adjustments cause problems. Maybe max this until >=1 of the component values is 255?
rgbRatio=(255, 255, 255)
brightness = 1
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
global brightness
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos*3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos*3)
g = 0
b = int(pos*3)
else:
pos -= 170
r = 0
g = int(pos*3)
b = int(255 - pos*3)
r, g, b = int(brightness * r), int(brightness * g), int(brightness * b)
return (r, g, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def hex_to_rgb(value):
"""Return (red, green, blue) for the color given as #rrggbb."""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rainbow_cycle():
global enableRainbow
while enableRainbow:
for j in range(255):
# This is necessary because with longer strands this nested loop just takes foreverrrrrr, so breaking will force a re-eval. It's hacky, and could
# be done more cleanly probably. Consider refactoring in the future to move the thread object to be global, making it stoppable and then implementing
# more consistent checks instead of having random globals flying all over the place. Blame the wine.
if not enableRainbow:
break
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
off()
return
@app.route("/status")
def status():
global status
return str(status)
@app.route("/bright")
def bright():
global rgb
print(str(int(brightness*100)))
return str(int(brightness*100))
@app.route("/color")
def color():
global rgb
value = rgb_to_hex(rgb)
return str(value)
@app.route("/rainbow")
def rainbow():
global enableRainbow
global status
status = 1
global rgb
pixels.fill(rgb)
pixels.show()
if(enableRainbow==False):
enableRainbow=True
t = threading.Thread(target = rainbow_cycle)
t.start()
return "on"
# TODO: Test this actually works. Can this be condensed in to the other /bright route? Is it easier to just have one with no args and one with args?
# TODO: Handle case where brightness is 0.
# More Info on setBrightness() call: https://forums.adafruit.com/viewtopic.php?t=41143
@app.route("/setbright/<value>")
def setbright(value):
global rgb
global brightness
brightness = int(value) / 100
rgb = tuple(int(brightness * v) for v in rgbRatio)
return str(int(brightness*100))
@app.route("/on")
def on():
global status
status = 1
global rgb
pixels.fill(rgb)
pixels.show()
return "on"
@app.route("/off")
def off():
global status
status = 0
global enableRainbow
enableRainbow=False
pixels.fill((0,0,0))
pixels.show()
return "off"
@app.route("/set/<values>")
def set(values):
global enableRainbow
enableRainbow=False
h = values
#h = values.replace("NA","0").replace("N","1")
global rgb
global rgbRatio
#rgb=hex_to_rgb(h)
rgb=tuple(int(h[i:i+2], 16) for i in (0, 2 ,4))
# Figure out which of these is the highest value, and how far it needs to scale to get to 255
rgbRatio = tuple(int(v*255/max(rgb)) for v in rgb)
pixels.fill(rgb)
pixels.show()
return "ok"
| 27.602564 | 159 | 0.657687 | import time
import board
import neopixel
import threading
from flask import Flask
pixel_pin = board.D21
num_pixels = 137
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1.0, auto_write=False,
pixel_order=ORDER)
app = Flask(__name__)
rgb=(255,255,255)
status = 0
enableRainbow = False
rgbRatio=(255, 255, 255)
brightness = 1
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
global brightness
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos*3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos*3)
g = 0
b = int(pos*3)
else:
pos -= 170
r = 0
g = int(pos*3)
b = int(255 - pos*3)
r, g, b = int(brightness * r), int(brightness * g), int(brightness * b)
return (r, g, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)
def rgb_to_hex(rgb):
return '
def hex_to_rgb(value):
value = value.lstrip('
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rainbow_cycle():
global enableRainbow
while enableRainbow:
for j in range(255):
# This is necessary because with longer strands this nested loop just takes foreverrrrrr, so breaking will force a re-eval. It's hacky, and could
if not enableRainbow:
break
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
off()
return
@app.route("/status")
def status():
global status
return str(status)
@app.route("/bright")
def bright():
global rgb
print(str(int(brightness*100)))
return str(int(brightness*100))
@app.route("/color")
def color():
global rgb
value = rgb_to_hex(rgb)
return str(value)
@app.route("/rainbow")
def rainbow():
global enableRainbow
global status
status = 1
global rgb
pixels.fill(rgb)
pixels.show()
if(enableRainbow==False):
enableRainbow=True
t = threading.Thread(target = rainbow_cycle)
t.start()
return "on"
@app.route("/setbright/<value>")
def setbright(value):
global rgb
global brightness
brightness = int(value) / 100
rgb = tuple(int(brightness * v) for v in rgbRatio)
return str(int(brightness*100))
@app.route("/on")
def on():
global status
status = 1
global rgb
pixels.fill(rgb)
pixels.show()
return "on"
@app.route("/off")
def off():
global status
status = 0
global enableRainbow
enableRainbow=False
pixels.fill((0,0,0))
pixels.show()
return "off"
@app.route("/set/<values>")
def set(values):
global enableRainbow
enableRainbow=False
h = values
global rgb
global rgbRatio
rgb=tuple(int(h[i:i+2], 16) for i in (0, 2 ,4))
rgbRatio = tuple(int(v*255/max(rgb)) for v in rgb)
pixels.fill(rgb)
pixels.show()
return "ok"
| true | true |
f7220b27f22bbf7fe83a49aeb06fc164fc8a0b60 | 3,903 | py | Python | test/flags.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | test/flags.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | test/flags.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | 1 | 2020-07-26T03:57:45.000Z | 2020-07-26T03:57:45.000Z | '''
Several testcases around <Flags> and <Flag>.
'''
import sys
sys.path.append("c:/peach")
from Peach.Generators.dictionary import *
from Peach.Generators.static import *
import unittest
import utils
import struct
def suite():
suite = unittest.TestSuite()
suite.addTest(FlagsInputTestCase())
suite.addTest(FlagsOutputTestCase())
#suite.addTest(Flags1TestCase())
#suite.addTest(Flags2TestCase())
#suite.addTest(Flags3TestCase())
#suite.addTest(Flags4TestCase())
#suite.addTest(Flags5TestCase())
#suite.addTest(Flags6TestCase())
suite.addTest(Flags7TestCase())
suite.addTest(Flags8TestCase())
return suite
class FlagsInputTestCase(utils.PeachSendAndRecvTestCase):
def runTest(self):
# Test
gen = Flags2(None, 8, [ [0, 1, Static(1)], [1, 2, Static(2)], [3, 2, Static(3)], [5, 3, Static(4)] ])
value = struct.pack("B", int(str(gen.getValue())))
self.peachUtils.SetSendAndReceiveData(value)
self.peachUtils.RunPeachXml("flagsInput.xml")
ret = str(self.peachUtils.GetListenerData())
assert ret == '4', 'flagsInput.xml failed, instead [%s]' % repr(ret)
class FlagsOutputTestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
gen = Flags2(None, 8, [ [0, 1, Static(1)], [1, 2, Static(2)], [3, 2, Static(3)], [5, 3, Static(4)] ])
value = struct.pack("B", int(str(gen.getValue())))
self.peachUtils.RunPeachXml("flagsOutput.xml")
ret = struct.unpack("B", str(self.peachUtils.GetListenerData()))[0]
assert ret == 157, 'flagsOutput.xml failed, instead [%s]' % repr(ret)
#class Flags1TestCase(utils.PeachTcpTestCase):
#
# def runTest(self):
# # Test
#
# self.peachUtils.RunPeachXml("flags1.xml")
# ret = struct.unpack("B", str(self.peachUtils.GetListenerData()))[0]
#
# assert ret == 163, 'flags1.xml failed, instead [%s]' % repr(ret)
#
#class Flags2TestCase(utils.PeachTcpTestCase):
#
# def runTest(self):
# # Test
#
# self.peachUtils.RunPeachXml("flags2.xml")
# ret = struct.unpack("B", str(self.peachUtils.GetListenerData()))[0]
#
# assert ret == 131, 'flags2.xml failed, instead [%s]' % repr(ret)
#
#class Flags3TestCase(utils.PeachTcpTestCase):
#
# def runTest(self):
# # Test
#
# self.peachUtils.RunPeachXml("flags3.xml")
# ret = struct.unpack("!H", str(self.peachUtils.GetListenerData()))[0]
#
# assert ret == 65411, 'flags3.xml failed, instead [%s]' % repr(ret)
#
#class Flags4TestCase(utils.PeachTcpTestCase):
#
# def runTest(self):
# # Test
#
# self.peachUtils.RunPeachXml("flags4.xml")
# ret = struct.unpack("L", str(self.peachUtils.GetListenerData()))[0]
#
# assert ret == 2214560767, 'flags4.xml failed, instead [%s]' % repr(ret)
#
#class Flags5TestCase(utils.PeachTcpTestCase):
#
# def runTest(self):
# # Test
#
# self.peachUtils.RunPeachXml("flags5.xml")
# ret = struct.unpack("L", str(self.peachUtils.GetListenerData()))[0]
#
# assert ret == 33554432, 'flags5.xml failed, instead [%s]' % repr(ret)
#
#class Flags6TestCase(utils.PeachTcpTestCase):
#
# def runTest(self):
# # Test
#
# self.peachUtils.RunPeachXml("flags6.xml")
# ret = struct.unpack("B", str(self.peachUtils.GetListenerData()))[0]
#
# assert ret == 2, 'flags6.xml failed, instead [%s]' % repr(ret)
class Flags7TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("flags7.xml")
ret = self.peachUtils.GetListenerData()
assert ret == "\x28\x00\x28\x05\x8e\x01", 'flags7.xml failed, instead [%s]' % repr(ret)
class Flags8TestCase(utils.PeachTcpTestCase):
def runTest(self):
# Test
self.peachUtils.RunPeachXml("flags8.xml")
ret = self.peachUtils.GetListenerData()
assert ret == "\x0a\x00\x0a\x50\x63\x10", 'flags8.xml failed, instead [%s]' % repr(ret)
if __name__ == "__main__":
unittest.main()
# end
| 27.878571 | 104 | 0.658724 |
import sys
sys.path.append("c:/peach")
from Peach.Generators.dictionary import *
from Peach.Generators.static import *
import unittest
import utils
import struct
def suite():
suite = unittest.TestSuite()
suite.addTest(FlagsInputTestCase())
suite.addTest(FlagsOutputTestCase())
suite.addTest(Flags7TestCase())
suite.addTest(Flags8TestCase())
return suite
class FlagsInputTestCase(utils.PeachSendAndRecvTestCase):
def runTest(self):
gen = Flags2(None, 8, [ [0, 1, Static(1)], [1, 2, Static(2)], [3, 2, Static(3)], [5, 3, Static(4)] ])
value = struct.pack("B", int(str(gen.getValue())))
self.peachUtils.SetSendAndReceiveData(value)
self.peachUtils.RunPeachXml("flagsInput.xml")
ret = str(self.peachUtils.GetListenerData())
assert ret == '4', 'flagsInput.xml failed, instead [%s]' % repr(ret)
class FlagsOutputTestCase(utils.PeachTcpTestCase):
def runTest(self):
gen = Flags2(None, 8, [ [0, 1, Static(1)], [1, 2, Static(2)], [3, 2, Static(3)], [5, 3, Static(4)] ])
value = struct.pack("B", int(str(gen.getValue())))
self.peachUtils.RunPeachXml("flagsOutput.xml")
ret = struct.unpack("B", str(self.peachUtils.GetListenerData()))[0]
assert ret == 157, 'flagsOutput.xml failed, instead [%s]' % repr(ret)
class Flags7TestCase(utils.PeachTcpTestCase):
def runTest(self):
self.peachUtils.RunPeachXml("flags7.xml")
ret = self.peachUtils.GetListenerData()
assert ret == "\x28\x00\x28\x05\x8e\x01", 'flags7.xml failed, instead [%s]' % repr(ret)
class Flags8TestCase(utils.PeachTcpTestCase):
def runTest(self):
self.peachUtils.RunPeachXml("flags8.xml")
ret = self.peachUtils.GetListenerData()
assert ret == "\x0a\x00\x0a\x50\x63\x10", 'flags8.xml failed, instead [%s]' % repr(ret)
if __name__ == "__main__":
unittest.main()
| true | true |
f7220b6826ac6e6db12ecb47cf44face19cbbe4d | 644 | py | Python | src/python/pants/backend/experimental/docker/register.py | yoav-orca/pants | 995448e9add343975844c7a43d5d64618fc4e4d9 | [
"Apache-2.0"
] | 1,806 | 2015-01-05T07:31:00.000Z | 2022-03-31T11:35:41.000Z | src/python/pants/backend/experimental/docker/register.py | yoav-orca/pants | 995448e9add343975844c7a43d5d64618fc4e4d9 | [
"Apache-2.0"
] | 9,565 | 2015-01-02T19:01:59.000Z | 2022-03-31T23:25:16.000Z | src/python/pants/backend/experimental/docker/register.py | riisi/pants | b33327389fab67c47b919710ea32f20ca284b1a6 | [
"Apache-2.0"
] | 443 | 2015-01-06T20:17:57.000Z | 2022-03-31T05:28:17.000Z | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.codegen import export_codegen_goal
from pants.backend.docker.goals.tailor import rules as tailor_rules
from pants.backend.docker.rules import rules as docker_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.python.util_rules.pex import rules as pex_rules
def rules():
return (
*docker_rules(),
*export_codegen_goal.rules(),
*pex_rules(),
*tailor_rules(),
)
def target_types():
return (DockerImageTarget,)
| 29.272727 | 67 | 0.75 |
from pants.backend.codegen import export_codegen_goal
from pants.backend.docker.goals.tailor import rules as tailor_rules
from pants.backend.docker.rules import rules as docker_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.python.util_rules.pex import rules as pex_rules
def rules():
return (
*docker_rules(),
*export_codegen_goal.rules(),
*pex_rules(),
*tailor_rules(),
)
def target_types():
return (DockerImageTarget,)
| true | true |
f7220b72397b2f715a8c642f041b2601f7a9bea8 | 1,148 | py | Python | 15CSL76_Machine_Learning/lab/8-em-kmeans.py | Shivanisen16/vtulabs | 2bc41d856612840cf035b570e6256ffc5bcbab5d | [
"MIT"
] | 7 | 2019-04-30T13:25:01.000Z | 2021-01-04T13:02:20.000Z | 15CSL76_Machine_Learning/lab/8-em-kmeans.py | Shivanisen16/vtulabs | 2bc41d856612840cf035b570e6256ffc5bcbab5d | [
"MIT"
] | 4 | 2018-12-17T08:46:55.000Z | 2019-11-12T11:47:24.000Z | 15CSL76_Machine_Learning/lab/8-em-kmeans.py | cseas/vtulabs | 2bc41d856612840cf035b570e6256ffc5bcbab5d | [
"MIT"
] | 14 | 2018-12-03T06:55:18.000Z | 2021-01-04T02:49:12.000Z | import copy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
# Importing the dataset
data = pd.read_csv("ex.csv")
print("Input Data and Shape:")
print(data.head(3))
print("Shape:", data.shape)
# Getting the values and plotting it
f1 = data['V1'].values
f2 = data['V2'].values
X = np.array(list(zip(f1, f2)))
print('Graph for whole dataset')
plt.scatter(f1, f2, c='black', s=50)
plt.show()
##########################################
kmeans = KMeans(2, random_state=0)
labels = kmeans.fit(X).predict(X)
centroids = kmeans.cluster_centers_
print("Labels KMeans:", labels)
print('Graph using Kmeans Algorithm')
# plot all points, color the labels
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50)
# mark centroids
plt.scatter(centroids[:, 0], centroids[:, 1], marker='*', s=200, c='black')
plt.show()
# gmm demo
gmm = GaussianMixture(n_components=2)
labels = gmm.fit(X).predict(X)
print("\nLabels EM:", labels)
print('Graph using EM Algorithm')
plt.scatter(X[:, 0], X[:, 1], c=labels, s=40)
plt.show()
| 26.697674 | 76 | 0.655052 | import copy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
data = pd.read_csv("ex.csv")
print("Input Data and Shape:")
print(data.head(3))
print("Shape:", data.shape)
f1 = data['V1'].values
f2 = data['V2'].values
X = np.array(list(zip(f1, f2)))
print('Graph for whole dataset')
plt.scatter(f1, f2, c='black', s=50)
plt.show()
| true | true |
f7220c584b0c5c0a472d032b76d41c1b9f1c37f5 | 63,818 | py | Python | gunicorn/config.py | Alexa3001/gunicorn | c0d05dad3d759f8cbbc465ba4698e1e94ed67cd7 | [
"MIT"
] | null | null | null | gunicorn/config.py | Alexa3001/gunicorn | c0d05dad3d759f8cbbc465ba4698e1e94ed67cd7 | [
"MIT"
] | null | null | null | gunicorn/config.py | Alexa3001/gunicorn | c0d05dad3d759f8cbbc465ba4698e1e94ed67cd7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
# Please remember to run "make -C docs html" after update "desc" attributes.
import argparse
import copy
import grp
import inspect
import os
import pwd
import re
import shlex
import ssl
import sys
import textwrap
from gunicorn import __version__, util
from gunicorn.errors import ConfigError
from gunicorn.reloader import reloader_engines
KNOWN_SETTINGS = []
PLATFORM = sys.platform
def make_settings(ignore=None):
settings = {}
ignore = ignore or ()
for s in KNOWN_SETTINGS:
setting = s()
if setting.name in ignore:
continue
settings[setting.name] = setting.copy()
return settings
def auto_int(_, x):
# for compatible with octal numbers in python3
if re.match(r'0(\d)', x, re.IGNORECASE):
x = x.replace('0', '0o', 1)
return int(x, 0)
class Config(object):
def __init__(self, usage=None, prog=None):
self.settings = make_settings()
self.usage = usage
self.prog = prog or os.path.basename(sys.argv[0])
self.env_orig = os.environ.copy()
def __str__(self):
lines = []
kmax = max(len(k) for k in self.settings)
for k in sorted(self.settings):
v = self.settings[k].value
if callable(v):
v = "<{}()>".format(v.__qualname__)
lines.append("{k:{kmax}} = {v}".format(k=k, v=v, kmax=kmax))
return "\n".join(lines)
def __getattr__(self, name):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
return self.settings[name].get()
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super().__setattr__(name, value)
def set(self, name, value):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
self.settings[name].set(value)
def get_cmd_args_from_env(self):
if 'GUNICORN_CMD_ARGS' in self.env_orig:
return shlex.split(self.env_orig['GUNICORN_CMD_ARGS'])
return []
def parser(self):
kwargs = {
"usage": self.usage,
"prog": self.prog
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument("-v", "--version",
action="version", default=argparse.SUPPRESS,
version="%(prog)s (version " + __version__ + ")\n",
help="show program's version number and exit")
parser.add_argument("args", nargs="*", help=argparse.SUPPRESS)
keys = sorted(self.settings, key=self.settings.__getitem__)
for k in keys:
self.settings[k].add_option(parser)
return parser
@property
def worker_class_str(self):
uri = self.settings['worker_class'].get()
# are we using a threaded worker?
is_sync = uri.endswith('SyncWorker') or uri == 'sync'
if is_sync and self.threads > 1:
return "gthread"
return uri
@property
def worker_class(self):
uri = self.settings['worker_class'].get()
# are we using a threaded worker?
is_sync = uri.endswith('SyncWorker') or uri == 'sync'
if is_sync and self.threads > 1:
uri = "gunicorn.workers.gthread.ThreadWorker"
worker_class = util.load_class(uri)
if hasattr(worker_class, "setup"):
worker_class.setup()
return worker_class
@property
def address(self):
s = self.settings['bind'].get()
return [util.parse_address(util.bytes_to_str(bind)) for bind in s]
@property
def uid(self):
return self.settings['user'].get()
@property
def gid(self):
return self.settings['group'].get()
@property
def proc_name(self):
pn = self.settings['proc_name'].get()
if pn is not None:
return pn
else:
return self.settings['default_proc_name'].get()
@property
def logger_class(self):
uri = self.settings['logger_class'].get()
if uri == "simple":
# support the default
uri = LoggerClass.default
# if default logger is in use, and statsd is on, automagically switch
# to the statsd logger
if uri == LoggerClass.default:
if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None:
uri = "gunicorn.instrument.statsd.Statsd"
logger_class = util.load_class(
uri,
default="gunicorn.glogging.Logger",
section="gunicorn.loggers")
if hasattr(logger_class, "install"):
logger_class.install()
return logger_class
@property
def is_ssl(self):
return self.certfile or self.keyfile
@property
def ssl_options(self):
opts = {}
for name, value in self.settings.items():
if value.section == 'SSL':
opts[name] = value.get()
return opts
@property
def env(self):
raw_env = self.settings['raw_env'].get()
env = {}
if not raw_env:
return env
for e in raw_env:
s = util.bytes_to_str(e)
try:
k, v = s.split('=', 1)
except ValueError:
raise RuntimeError("environment setting %r invalid" % s)
env[k] = v
return env
@property
def sendfile(self):
if self.settings['sendfile'].get() is not None:
return False
if 'SENDFILE' in os.environ:
sendfile = os.environ['SENDFILE'].lower()
return sendfile in ['y', '1', 'yes', 'true']
return True
@property
def reuse_port(self):
return self.settings['reuse_port'].get()
@property
def paste_global_conf(self):
raw_global_conf = self.settings['raw_paste_global_conf'].get()
if raw_global_conf is None:
return None
global_conf = {}
for e in raw_global_conf:
s = util.bytes_to_str(e)
try:
k, v = re.split(r'(?<!\\)=', s, 1)
except ValueError:
raise RuntimeError("environment setting %r invalid" % s)
k = k.replace('\\=', '=')
v = v.replace('\\=', '=')
global_conf[k] = v
return global_conf
class SettingMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super().__new__
parents = [b for b in bases if isinstance(b, SettingMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS)
attrs["validator"] = staticmethod(attrs["validator"])
new_class = super_new(cls, name, bases, attrs)
new_class.fmt_desc(attrs.get("desc", ""))
KNOWN_SETTINGS.append(new_class)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
setattr(cls, "short", desc.splitlines()[0])
class Setting(object):
name = None
value = None
section = None
cli = None
validator = None
type = None
meta = None
action = None
default = None
short = None
desc = None
nargs = None
const = None
def __init__(self):
if self.default is not None:
self.set(self.default)
def add_option(self, parser):
if not self.cli:
return
args = tuple(self.cli)
help_txt = "%s [%s]" % (self.short, self.default)
help_txt = help_txt.replace("%", "%%")
kwargs = {
"dest": self.name,
"action": self.action or "store",
"type": self.type or str,
"default": None,
"help": help_txt
}
if self.meta is not None:
kwargs['metavar'] = self.meta
if kwargs["action"] != "store":
kwargs.pop("type")
if self.nargs is not None:
kwargs["nargs"] = self.nargs
if self.const is not None:
kwargs["const"] = self.const
parser.add_argument(*args, **kwargs)
def copy(self):
return copy.copy(self)
def get(self):
return self.value
def set(self, val):
if not callable(self.validator):
raise TypeError('Invalid validator: %s' % self.name)
self.value = self.validator(val)
def __lt__(self, other):
return (self.section == other.section and
self.order < other.order)
__cmp__ = __lt__
def __repr__(self):
return "<%s.%s object at %x with value %r>" % (
self.__class__.__module__,
self.__class__.__name__,
id(self),
self.value,
)
Setting = SettingMeta('Setting', (Setting,), {})
def validate_bool(val):
if val is None:
return
if isinstance(val, bool):
return val
if not isinstance(val, str):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_dict(val):
if not isinstance(val, dict):
raise TypeError("Value is not a dictionary: %s " % val)
return val
def validate_pos_int(val):
if not isinstance(val, int):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_ssl_version(val):
ssl_versions = {}
for protocol in [p for p in dir(ssl) if p.startswith("PROTOCOL_")]:
ssl_versions[protocol[9:]] = getattr(ssl, protocol)
if val in ssl_versions:
# string matching PROTOCOL_...
return ssl_versions[val]
try:
intval = validate_pos_int(val)
if intval in ssl_versions.values():
# positive int matching a protocol int constant
return intval
except (ValueError, TypeError):
# negative integer or not an integer
# drop this in favour of the more descriptive ValueError below
pass
raise ValueError("Invalid ssl_version: %s. Valid options: %s"
% (val, ', '.join(ssl_versions)))
def validate_string(val):
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_file_exists(val):
if val is None:
return None
if not os.path.exists(val):
raise ValueError("File %s does not exists." % val)
return val
def validate_list_string(val):
if not val:
return []
# legacy syntax
if isinstance(val, str):
val = [val]
return [validate_string(v) for v in val]
def validate_list_of_existing_files(val):
return [validate_file_exists(v) for v in validate_list_string(val)]
def validate_string_to_list(val):
val = validate_string(val)
if not val:
return []
return [v.strip() for v in val.split(",") if v]
def validate_class(val):
if inspect.isfunction(val) or inspect.ismethod(val):
val = val()
if inspect.isclass(val):
return val
return validate_string(val)
def validate_callable(arity):
def _validate_callable(val):
if isinstance(val, str):
try:
mod_name, obj_name = val.rsplit(".", 1)
except ValueError:
raise TypeError("Value '%s' is not import string. "
"Format: module[.submodules...].object" % val)
try:
mod = __import__(mod_name, fromlist=[obj_name])
val = getattr(mod, obj_name)
except ImportError as e:
raise TypeError(str(e))
except AttributeError:
raise TypeError("Can not load '%s' from '%s'"
"" % (obj_name, mod_name))
if not callable(val):
raise TypeError("Value is not callable: %s" % val)
if arity != -1 and arity != util.get_arity(val):
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def validate_user(val):
if val is None:
return os.geteuid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return pwd.getpwnam(val).pw_uid
except KeyError:
raise ConfigError("No such user: '%s'" % val)
def validate_group(val):
if val is None:
return os.getegid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return grp.getgrnam(val).gr_gid
except KeyError:
raise ConfigError("No such group: '%s'" % val)
def validate_post_request(val):
val = validate_callable(-1)(val)
largs = util.get_arity(val)
if largs == 4:
return val
elif largs == 3:
return lambda worker, req, env, _r: val(worker, req, env)
elif largs == 2:
return lambda worker, req, _e, _r: val(worker, req)
else:
raise TypeError("Value must have an arity of: 4")
def validate_chdir(val):
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("can't chdir to %r" % val)
return path
def validate_hostport(val):
val = validate_string(val)
if val is None:
return None
elements = val.split(":")
if len(elements) == 2:
return (elements[0], int(elements[1]))
else:
raise TypeError("Value must consist of: hostname:port")
def validate_reload_engine(val):
if val not in reloader_engines:
raise ConfigError("Invalid reload_engine: %r" % val)
return val
def get_default_config_file():
config_path = os.path.join(os.path.abspath(os.getcwd()),
'gunicorn.conf.py')
if os.path.exists(config_path):
return config_path
return None
class ConfigFile(Setting):
name = "config"
section = "Config File"
cli = ["-c", "--config"]
meta = "CONFIG"
validator = validate_string
default = "./gunicorn.conf.py"
desc = """\
The Gunicorn config file.
A string of the form ``PATH``, ``file:PATH``, or ``python:MODULE_NAME``.
Only has an effect when specified on the command line or as part of an
application specific configuration.
By default, a file named ``gunicorn.conf.py`` will be read from the same
directory where gunicorn is being run.
.. versionchanged:: 19.4
Loading the config from a Python module requires the ``python:``
prefix.
"""
class WSGIApp(Setting):
name = "wsgi_app"
section = "Config File"
meta = "STRING"
validator = validate_string
default = None
desc = """\
A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``.
.. versionadded:: 20.1.0
"""
class Bind(Setting):
name = "bind"
action = "append"
section = "Server Socket"
cli = ["-b", "--bind"]
meta = "ADDRESS"
validator = validate_list_string
if 'PORT' in os.environ:
default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))]
else:
default = ['127.0.0.1:8000']
desc = """\
The socket to bind.
A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``,
``fd://FD``. An IP is a valid ``HOST``.
.. versionchanged:: 20.0
Support for ``fd://FD`` got added.
Multiple addresses can be bound. ex.::
$ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app
will bind the `test:app` application on localhost both on ipv6
and ipv4 interfaces.
If the ``PORT`` environment variable is defined, the default
is ``['0.0.0.0:$PORT']``. If it is not defined, the default
is ``['127.0.0.1:8000']``.
"""
class Backlog(Setting):
name = "backlog"
section = "Server Socket"
cli = ["--backlog"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2048
desc = """\
The maximum number of pending connections.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class Workers(Setting):
name = "workers"
section = "Worker Processes"
cli = ["-w", "--workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = int(os.environ.get("WEB_CONCURRENCY", 1))
desc = """\
The number of worker processes for handling requests.
A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
You'll want to vary this a bit to find the best for your particular
application's work load.
By default, the value of the ``WEB_CONCURRENCY`` environment variable,
which is set by some Platform-as-a-Service providers such as Heroku. If
it is not defined, the default is ``1``.
"""
class WorkerClass(Setting):
name = "worker_class"
section = "Worker Processes"
cli = ["-k", "--worker-class"]
meta = "STRING"
validator = validate_class
default = "sync"
desc = """\
The type of workers to use.
The default class (``sync``) should handle most "normal" types of
workloads. You'll want to read :doc:`design` for information on when
you might want to choose one of the other worker classes. Required
libraries may be installed using setuptools' ``extras_require`` feature.
A string referring to one of the following bundled classes:
* ``sync``
* ``eventlet`` - Requires eventlet >= 0.24.1 (or install it via
``pip install gunicorn[eventlet]``)
* ``gevent`` - Requires gevent >= 1.4 (or install it via
``pip install gunicorn[gevent]``)
* ``tornado`` - Requires tornado >= 0.2 (or install it via
``pip install gunicorn[tornado]``)
* ``gthread`` - Python 2 requires the futures package to be installed
(or install it via ``pip install gunicorn[gthread]``)
Optionally, you can provide your own worker by giving Gunicorn a
Python path to a subclass of ``gunicorn.workers.base.Worker``.
This alternative syntax will load the gevent class:
``gunicorn.workers.ggevent.GeventWorker``.
"""
class WorkerThreads(Setting):
name = "threads"
section = "Worker Processes"
cli = ["--threads"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1
desc = """\
The number of worker threads for handling requests.
Run each worker with the specified number of threads.
A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
You'll want to vary this a bit to find the best for your particular
application's work load.
If it is not defined, the default is ``1``.
This setting only affects the Gthread worker type.
.. note::
If you try to use the ``sync`` worker type and set the ``threads``
setting to more than 1, the ``gthread`` worker type will be used
instead.
"""
class WorkerConnections(Setting):
name = "worker_connections"
section = "Worker Processes"
cli = ["--worker-connections"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1000
desc = """\
The maximum number of simultaneous clients.
This setting only affects the Eventlet and Gevent worker types.
"""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
cli = ["--max-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a worker
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class MaxRequestsJitter(Setting):
name = "max_requests_jitter"
section = "Worker Processes"
cli = ["--max-requests-jitter"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum jitter to add to the *max_requests* setting.
The jitter causes the restart per worker to be randomized by
``randint(0, max_requests_jitter)``. This is intended to stagger worker
restarts to avoid all workers restarting at the same time.
.. versionadded:: 19.2
"""
class WaitForNewWorkers(Setting):
name = "wait_for_new_workers"
section = "Worker Processes"
cli = ["--wait-for-new-workers"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Wait for a new worker to become ready before killing an old worker.
"""
class MaxRestartingWorkers(Setting):
name = "max_restarting_workers"
section = "Worker Processes"
cli = ["--max-restarting-workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of workers which can be restarted at the same time.
"""
class WarmupRequests(Setting):
name = "warmup_requests"
section = "Worker Processes"
cli = ["--warmup-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The number of requests a new worker needs to handle until the old worker can be killed.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
cli = ["-t", "--timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Workers silent for more than this many seconds are killed and restarted.
Value is a positive number or 0. Setting it to 0 has the effect of
infinite timeouts by disabling timeouts for all workers entirely.
Generally, the default of thirty seconds should suffice. Only set this
noticeably higher if you're sure of the repercussions for sync workers.
For the non sync workers it just means that the worker process is still
communicating and is not tied to the length of time required to handle a
single request.
"""
class GracefulTimeout(Setting):
name = "graceful_timeout"
section = "Worker Processes"
cli = ["--graceful-timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Timeout for graceful workers restart.
After receiving a restart signal, workers have this much time to finish
serving requests. Workers still alive after the timeout (starting from
the receipt of the restart signal) are force killed.
"""
class Keepalive(Setting):
name = "keepalive"
section = "Worker Processes"
cli = ["--keep-alive"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2
desc = """\
The number of seconds to wait for requests on a Keep-Alive connection.
Generally set in the 1-5 seconds range for servers with direct connection
to the client (e.g. when you don't have separate load balancer). When
Gunicorn is deployed behind a load balancer, it often makes sense to
set this to a higher value.
.. note::
``sync`` worker does not support persistent connections and will
ignore this option.
"""
class LimitRequestLine(Setting):
name = "limit_request_line"
section = "Security"
cli = ["--limit-request-line"]
meta = "INT"
validator = validate_pos_int
type = int
default = 4094
desc = """\
The maximum size of HTTP request line in bytes.
This parameter is used to limit the allowed size of a client's
HTTP request-line. Since the request-line consists of the HTTP
method, URI, and protocol version, this directive places a
restriction on the length of a request-URI allowed for a request
on the server. A server needs this value to be large enough to
hold any of its resource names, including any information that
might be passed in the query part of a GET request. Value is a number
from 0 (unlimited) to 8190.
This parameter can be used to prevent any DDOS attack.
"""
class LimitRequestFields(Setting):
name = "limit_request_fields"
section = "Security"
cli = ["--limit-request-fields"]
meta = "INT"
validator = validate_pos_int
type = int
default = 100
desc = """\
Limit the number of HTTP headers fields in a request.
This parameter is used to limit the number of headers in a request to
prevent DDOS attack. Used with the *limit_request_field_size* it allows
more safety. By default this value is 100 and can't be larger than
32768.
"""
class LimitRequestFieldSize(Setting):
name = "limit_request_field_size"
section = "Security"
cli = ["--limit-request-field_size"]
meta = "INT"
validator = validate_pos_int
type = int
default = 8190
desc = """\
Limit the allowed size of an HTTP request header field.
Value is a positive number or 0. Setting it to 0 will allow unlimited
header field sizes.
.. warning::
Setting this parameter to a very high or unlimited value can open
up for DDOS attacks.
"""
class EnrichResponse(Setting):
name = "enrich_response"
section = 'Debugging'
cli = ['--enrich-response']
validator = validate_bool
action = 'store_true'
default = False
desc = '''\
Add extra information in the http response body. Works only for sync worker type.
While handling a request, a few timestamps are taken (in microseconds, since 1st of January, 1970):
* ``spawning time`` - when worker object is initialized (this is before forking the new process)
* ``time 1`` - immediately after entering "handle_request"
* ``time 2`` - just before getting the response
* ``time 3`` - immediately after getting the response
The following information is inserted into the response body:
* ``spawn``: spawning time
* ``t1``: time1
* ``d1``: time2 - time1
* ``d2``: time3 - time2
* ``pid``: the pid of the worker handling the request
* ``nr``: number of requests handled by this worker so far
* ``max``: number of requests planned for this worker (this can be exceeded a little bit because of the rolling restarting strategy)
The new response is a json with two keys:
"res" contains the original response
"info" contains the extra information
'''
class Reload(Setting):
name = "reload"
section = 'Debugging'
cli = ['--reload']
validator = validate_bool
action = 'store_true'
default = False
desc = '''\
Restart workers when code changes.
This setting is intended for development. It will cause workers to be
restarted whenever application code changes.
The reloader is incompatible with application preloading. When using a
paste configuration be sure that the server block does not import any
application code or the reload will not work as designed.
The default behavior is to attempt inotify with a fallback to file
system polling. Generally, inotify should be preferred if available
because it consumes less system resources.
.. note::
In order to use the inotify reloader, you must have the ``inotify``
package installed.
'''
class ReloadEngine(Setting):
name = "reload_engine"
section = "Debugging"
cli = ["--reload-engine"]
meta = "STRING"
validator = validate_reload_engine
default = "auto"
desc = """\
The implementation that should be used to power :ref:`reload`.
Valid engines are:
* ``'auto'``
* ``'poll'``
* ``'inotify'`` (requires inotify)
.. versionadded:: 19.7
"""
class ReloadExtraFiles(Setting):
name = "reload_extra_files"
action = "append"
section = "Debugging"
cli = ["--reload-extra-file"]
meta = "FILES"
validator = validate_list_of_existing_files
default = []
desc = """\
Extends :ref:`reload` option to also watch and reload on additional files
(e.g., templates, configurations, specifications, etc.).
.. versionadded:: 19.8
"""
class Spew(Setting):
name = "spew"
section = "Debugging"
cli = ["--spew"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Install a trace function that spews every line executed by the server.
This is the nuclear option.
"""
class ConfigCheck(Setting):
name = "check_config"
section = "Debugging"
cli = ["--check-config"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Check the configuration and exit. The exit status is 0 if the
configuration is correct, and 1 if the configuration is incorrect.
"""
class PrintConfig(Setting):
name = "print_config"
section = "Debugging"
cli = ["--print-config"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Print the configuration settings as fully resolved. Implies :ref:`check-config`.
"""
class PreloadApp(Setting):
name = "preload_app"
section = "Server Mechanics"
cli = ["--preload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Load application code before the worker processes are forked.
By preloading an application you can save some RAM resources as well as
speed up server boot times. Although, if you defer application loading
to each worker process, you can reload your application code easily by
restarting workers.
"""
class Sendfile(Setting):
name = "sendfile"
section = "Server Mechanics"
cli = ["--no-sendfile"]
validator = validate_bool
action = "store_const"
const = False
desc = """\
Disables the use of ``sendfile()``.
If not set, the value of the ``SENDFILE`` environment variable is used
to enable or disable its usage.
.. versionadded:: 19.2
.. versionchanged:: 19.4
Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow
disabling.
.. versionchanged:: 19.6
added support for the ``SENDFILE`` environment variable
"""
class ReusePort(Setting):
name = "reuse_port"
section = "Server Mechanics"
cli = ["--reuse-port"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Set the ``SO_REUSEPORT`` flag on the listening socket.
.. versionadded:: 19.8
"""
class Chdir(Setting):
name = "chdir"
section = "Server Mechanics"
cli = ["--chdir"]
validator = validate_chdir
default = util.getcwd()
desc = """\
Change directory to specified directory before loading apps.
"""
class Daemon(Setting):
name = "daemon"
section = "Server Mechanics"
cli = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the Gunicorn process.
Detaches the server from the controlling terminal and enters the
background.
"""
class Env(Setting):
name = "raw_env"
action = "append"
section = "Server Mechanics"
cli = ["-e", "--env"]
meta = "ENV"
validator = validate_list_string
default = []
desc = """\
Set environment variables in the execution environment.
Should be a list of strings in the ``key=value`` format.
For example on the command line:
.. code-block:: console
$ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app
Or in the configuration file:
.. code-block:: python
raw_env = ["FOO=1"]
"""
class Pidfile(Setting):
name = "pidfile"
section = "Server Mechanics"
cli = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class WorkerTmpDir(Setting):
name = "worker_tmp_dir"
section = "Server Mechanics"
cli = ["--worker-tmp-dir"]
meta = "DIR"
validator = validate_string
default = None
desc = """\
A directory to use for the worker heartbeat temporary file.
If not set, the default temporary directory will be used.
.. note::
The current heartbeat system involves calling ``os.fchmod`` on
temporary file handlers and may block a worker for arbitrary time
if the directory is on a disk-backed filesystem.
See :ref:`blocking-os-fchmod` for more detailed information
and a solution for avoiding this problem.
"""
class User(Setting):
name = "user"
section = "Server Mechanics"
cli = ["-u", "--user"]
meta = "USER"
validator = validate_user
default = os.geteuid()
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not
change the worker process user.
"""
class Group(Setting):
name = "group"
section = "Server Mechanics"
cli = ["-g", "--group"]
meta = "GROUP"
validator = validate_group
default = os.getegid()
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to ``pwd.getgrnam(value)`` or ``None`` to not
change the worker processes group.
"""
class Umask(Setting):
name = "umask"
section = "Server Mechanics"
cli = ["-m", "--umask"]
meta = "INT"
validator = validate_pos_int
type = auto_int
default = 0
desc = """\
A bit mask for the file mode on files written by Gunicorn.
Note that this affects unix socket permissions.
A valid value for the ``os.umask(mode)`` call or a string compatible
with ``int(value, 0)`` (``0`` means Python guesses the base, so values
like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal
representations)
"""
class Initgroups(Setting):
name = "initgroups"
section = "Server Mechanics"
cli = ["--initgroups"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
If true, set the worker process's group access list with all of the
groups of which the specified username is a member, plus the specified
group id.
.. versionadded:: 19.7
"""
class TmpUploadDir(Setting):
name = "tmp_upload_dir"
section = "Server Mechanics"
meta = "DIR"
validator = validate_string
default = None
desc = """\
Directory to store temporary request data as they are read.
This may disappear in the near future.
This path should be writable by the process permissions set for Gunicorn
workers. If not specified, Gunicorn will choose a system generated
temporary directory.
"""
class SecureSchemeHeader(Setting):
name = "secure_scheme_headers"
section = "Server Mechanics"
validator = validate_dict
default = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-PROTO": "https",
"X-FORWARDED-SSL": "on"
}
desc = """\
A dictionary containing headers and values that the front-end proxy
uses to indicate HTTPS requests. If the source IP is permitted by
``forwarded-allow-ips`` (below), *and* at least one request header matches
a key-value pair listed in this dictionary, then Gunicorn will set
``wsgi.url_scheme`` to ``https``, so your application can tell that the
request is secure.
If the other headers listed in this dictionary are not present in the request, they will be ignored,
but if the other headers are present and do not match the provided values, then
the request will fail to parse. See the note below for more detailed examples of this behaviour.
The dictionary should map upper-case header names to exact string
values. The value comparisons are case-sensitive, unlike the header
names, so make sure they're exactly what your front-end proxy sends
when handling HTTPS requests.
It is important that your front-end proxy configuration ensures that
the headers defined here can not be passed directly from the client.
"""
class ForwardedAllowIPS(Setting):
name = "forwarded_allow_ips"
section = "Server Mechanics"
cli = ["--forwarded-allow-ips"]
meta = "STRING"
validator = validate_string_to_list
default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1")
desc = """\
Front-end's IPs from which allowed to handle set secure headers.
(comma separate).
Set to ``*`` to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment).
By default, the value of the ``FORWARDED_ALLOW_IPS`` environment
variable. If it is not defined, the default is ``"127.0.0.1"``.
.. note::
The interplay between the request headers, the value of ``forwarded_allow_ips``, and the value of
``secure_scheme_headers`` is complex. Various scenarios are documented below to further elaborate. In each case, we
have a request from the remote address 134.213.44.18, and the default value of ``secure_scheme_headers``:
.. code::
secure_scheme_headers = {
'X-FORWARDED-PROTOCOL': 'ssl',
'X-FORWARDED-PROTO': 'https',
'X-FORWARDED-SSL': 'on'
}
.. list-table::
:header-rows: 1
:align: center
:widths: auto
* - ``forwarded-allow-ips``
- Secure Request Headers
- Result
- Explanation
* - .. code::
["127.0.0.1"]
- .. code::
X-Forwarded-Proto: https
- .. code::
wsgi.url_scheme = "http"
- IP address was not allowed
* - .. code::
"*"
- <none>
- .. code::
wsgi.url_scheme = "http"
- IP address allowed, but no secure headers provided
* - .. code::
"*"
- .. code::
X-Forwarded-Proto: https
- .. code::
wsgi.url_scheme = "https"
- IP address allowed, one request header matched
* - .. code::
["134.213.44.18"]
- .. code::
X-Forwarded-Ssl: on
X-Forwarded-Proto: http
- ``InvalidSchemeHeaders()`` raised
- IP address allowed, but the two secure headers disagreed on if HTTPS was used
"""
class AccessLog(Setting):
name = "accesslog"
section = "Logging"
cli = ["--access-logfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The Access log file to write to.
``'-'`` means log to stdout.
"""
class DisableRedirectAccessToSyslog(Setting):
name = "disable_redirect_access_to_syslog"
section = "Logging"
cli = ["--disable-redirect-access-to-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Disable redirect access logs to syslog.
.. versionadded:: 19.8
"""
class AccessLogFormat(Setting):
name = "access_log_format"
section = "Logging"
cli = ["--access-logformat"]
meta = "STRING"
validator = validate_string
default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
desc = """\
The access log format.
=========== ===========
Identifier Description
=========== ===========
h remote address
l ``'-'``
u user name
t date of the request
r status line (e.g. ``GET / HTTP/1.1``)
m request method
U URL path without query string
q query string
H protocol
s status
B response length
b response length or ``'-'`` (CLF format)
f referer
a user agent
T request time in seconds
M request time in milliseconds
D request time in microseconds
L request time in decimal seconds
p process ID
{header}i request header
{header}o response header
{variable}e environment variable
=========== ===========
Use lowercase for header and environment variable names, and put
``{...}x`` names inside ``%(...)s``. For example::
%({x-forwarded-for}i)s
"""
class ErrorLog(Setting):
name = "errorlog"
section = "Logging"
cli = ["--error-logfile", "--log-file"]
meta = "FILE"
validator = validate_string
default = '-'
desc = """\
The Error log file to write to.
Using ``'-'`` for FILE makes gunicorn log to stderr.
.. versionchanged:: 19.2
Log to stderr by default.
"""
class Loglevel(Setting):
name = "loglevel"
section = "Logging"
cli = ["--log-level"]
meta = "LEVEL"
validator = validate_string
default = "info"
desc = """\
The granularity of Error log outputs.
Valid level names are:
* ``'debug'``
* ``'info'``
* ``'warning'``
* ``'error'``
* ``'critical'``
"""
class CaptureOutput(Setting):
name = "capture_output"
section = "Logging"
cli = ["--capture-output"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Redirect stdout/stderr to specified file in :ref:`errorlog`.
.. versionadded:: 19.6
"""
class LoggerClass(Setting):
name = "logger_class"
section = "Logging"
cli = ["--logger-class"]
meta = "STRING"
validator = validate_class
default = "gunicorn.glogging.Logger"
desc = """\
The logger you want to use to log events in Gunicorn.
The default class (``gunicorn.glogging.Logger``) handles most
normal usages in logging. It provides error and access logging.
You can provide your own logger by giving Gunicorn a Python path to a
class that quacks like ``gunicorn.glogging.Logger``.
"""
class LogConfig(Setting):
name = "logconfig"
section = "Logging"
cli = ["--log-config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The log config file to use.
Gunicorn uses the standard Python logging module's Configuration
file format.
"""
class LogConfigDict(Setting):
name = "logconfig_dict"
section = "Logging"
validator = validate_dict
default = {}
desc = """\
The log config dictionary to use, using the standard Python
logging module's dictionary configuration format. This option
takes precedence over the :ref:`logconfig` option, which uses the
older file configuration format.
Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig
.. versionadded:: 19.8
"""
class SyslogTo(Setting):
name = "syslog_addr"
section = "Logging"
cli = ["--log-syslog-to"]
meta = "SYSLOG_ADDR"
validator = validate_string
if PLATFORM == "darwin":
default = "unix:///var/run/syslog"
elif PLATFORM in ('freebsd', 'dragonfly', ):
default = "unix:///var/run/log"
elif PLATFORM == "openbsd":
default = "unix:///dev/log"
else:
default = "udp://localhost:514"
desc = """\
Address to send syslog messages.
Address is a string of the form:
* ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream``
for the stream driver or ``dgram`` for the dgram driver.
``stream`` is the default.
* ``udp://HOST:PORT`` : for UDP sockets
* ``tcp://HOST:PORT`` : for TCP sockets
"""
class Syslog(Setting):
name = "syslog"
section = "Logging"
cli = ["--log-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Send *Gunicorn* logs to syslog.
.. versionchanged:: 19.8
You can now disable sending access logs by using the
:ref:`disable-redirect-access-to-syslog` setting.
"""
class SyslogPrefix(Setting):
name = "syslog_prefix"
section = "Logging"
cli = ["--log-syslog-prefix"]
meta = "SYSLOG_PREFIX"
validator = validate_string
default = None
desc = """\
Makes Gunicorn use the parameter as program-name in the syslog entries.
All entries will be prefixed by ``gunicorn.<prefix>``. By default the
program name is the name of the process.
"""
class SyslogFacility(Setting):
name = "syslog_facility"
section = "Logging"
cli = ["--log-syslog-facility"]
meta = "SYSLOG_FACILITY"
validator = validate_string
default = "user"
desc = """\
Syslog facility name
"""
class EnableStdioInheritance(Setting):
name = "enable_stdio_inheritance"
section = "Logging"
cli = ["-R", "--enable-stdio-inheritance"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable stdio inheritance.
Enable inheritance for stdio file descriptors in daemon mode.
Note: To disable the Python stdout buffering, you can to set the user
environment variable ``PYTHONUNBUFFERED`` .
"""
# statsD monitoring
class StatsdHost(Setting):
name = "statsd_host"
section = "Logging"
cli = ["--statsd-host"]
meta = "STATSD_ADDR"
default = None
validator = validate_hostport
desc = """\
``host:port`` of the statsd server to log to.
.. versionadded:: 19.1
"""
# Datadog Statsd (dogstatsd) tags. https://docs.datadoghq.com/developers/dogstatsd/
class DogstatsdTags(Setting):
name = "dogstatsd_tags"
section = "Logging"
cli = ["--dogstatsd-tags"]
meta = "DOGSTATSD_TAGS"
default = ""
validator = validate_string
desc = """\
A comma-delimited list of datadog statsd (dogstatsd) tags to append to
statsd metrics.
.. versionadded:: 20
"""
class StatsdPrefix(Setting):
name = "statsd_prefix"
section = "Logging"
cli = ["--statsd-prefix"]
meta = "STATSD_PREFIX"
default = ""
validator = validate_string
desc = """\
Prefix to use when emitting statsd metrics (a trailing ``.`` is added,
if not provided).
.. versionadded:: 19.2
"""
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
cli = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Gunicorn you'll probably want to set a
name to tell them apart. This requires that you install the setproctitle
module.
If not set, the *default_proc_name* setting will be used.
"""
class DefaultProcName(Setting):
name = "default_proc_name"
section = "Process Naming"
validator = validate_string
default = "gunicorn"
desc = """\
Internal setting that is adjusted for each type of application.
"""
class PythonPath(Setting):
name = "pythonpath"
section = "Server Mechanics"
cli = ["--pythonpath"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A comma-separated list of directories to add to the Python path.
e.g.
``'/home/djangoprojects/myproject,/home/python/mylibrary'``.
"""
class Paste(Setting):
name = "paste"
section = "Server Mechanics"
cli = ["--paste", "--paster"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
Load a PasteDeploy config file. The argument may contain a ``#``
symbol followed by the name of an app section from the config file,
e.g. ``production.ini#admin``.
At this time, using alternate server blocks is not supported. Use the
command line arguments to control server configuration instead.
"""
class OnStarting(Setting):
name = "on_starting"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def on_starting(server):
pass
default = staticmethod(on_starting)
desc = """\
Called just before the master process is initialized.
The callable needs to accept a single instance variable for the Arbiter.
"""
class OnReload(Setting):
name = "on_reload"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def on_reload(server):
pass
default = staticmethod(on_reload)
desc = """\
Called to recycle workers during a reload via SIGHUP.
The callable needs to accept a single instance variable for the Arbiter.
"""
class WhenReady(Setting):
name = "when_ready"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def when_ready(server):
pass
default = staticmethod(when_ready)
desc = """\
Called just after the server is started.
The callable needs to accept a single instance variable for the Arbiter.
"""
class Prefork(Setting):
name = "pre_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def pre_fork(server, worker):
pass
default = staticmethod(pre_fork)
desc = """\
Called just before a worker is forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class Postfork(Setting):
name = "post_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def post_fork(server, worker):
pass
default = staticmethod(post_fork)
desc = """\
Called just after a worker has been forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class PostWorkerInit(Setting):
name = "post_worker_init"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def post_worker_init(worker):
pass
default = staticmethod(post_worker_init)
desc = """\
Called just after a worker has initialized the application.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class WorkerInt(Setting):
name = "worker_int"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def worker_int(worker):
pass
default = staticmethod(worker_int)
desc = """\
Called just after a worker exited on SIGINT or SIGQUIT.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class WorkerAbort(Setting):
name = "worker_abort"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def worker_abort(worker):
pass
default = staticmethod(worker_abort)
desc = """\
Called when a worker received the SIGABRT signal.
This call generally happens on timeout.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class PreExec(Setting):
name = "pre_exec"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def pre_exec(server):
pass
default = staticmethod(pre_exec)
desc = """\
Called just before a new master process is forked.
The callable needs to accept a single instance variable for the Arbiter.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def pre_request(worker, req):
worker.log.debug("%s %s" % (req.method, req.path))
default = staticmethod(pre_request)
desc = """\
Called just before a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class PostRequest(Setting):
name = "post_request"
section = "Server Hooks"
validator = validate_post_request
type = callable
def post_request(worker, req, environ, resp):
pass
default = staticmethod(post_request)
desc = """\
Called after a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class ChildExit(Setting):
name = "child_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def child_exit(server, worker):
pass
default = staticmethod(child_exit)
desc = """\
Called just after a worker has been exited, in the master process.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
.. versionadded:: 19.7
"""
class WorkerExit(Setting):
name = "worker_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def worker_exit(server, worker):
pass
default = staticmethod(worker_exit)
desc = """\
Called just after a worker has been exited, in the worker process.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
"""
class NumWorkersChanged(Setting):
name = "nworkers_changed"
section = "Server Hooks"
validator = validate_callable(3)
type = callable
def nworkers_changed(server, new_value, old_value):
pass
default = staticmethod(nworkers_changed)
desc = """\
Called just after *num_workers* has been changed.
The callable needs to accept an instance variable of the Arbiter and
two integers of number of workers after and before change.
If the number of workers is set for the first time, *old_value* would
be ``None``.
"""
class OnExit(Setting):
name = "on_exit"
section = "Server Hooks"
validator = validate_callable(1)
def on_exit(server):
pass
default = staticmethod(on_exit)
desc = """\
Called just before exiting Gunicorn.
The callable needs to accept a single instance variable for the Arbiter.
"""
class ProxyProtocol(Setting):
name = "proxy_protocol"
section = "Server Mechanics"
cli = ["--proxy-protocol"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable detect PROXY protocol (PROXY mode).
Allow using HTTP and Proxy together. It may be useful for work with
stunnel as HTTPS frontend and Gunicorn as HTTP server.
PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
Example for stunnel config::
[https]
protocol = proxy
accept = 443
connect = 80
cert = /etc/ssl/certs/stunnel.pem
key = /etc/ssl/certs/stunnel.key
"""
class ProxyAllowFrom(Setting):
name = "proxy_allow_ips"
section = "Server Mechanics"
cli = ["--proxy-allow-from"]
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed accept proxy requests (comma separate).
Set to ``*`` to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment)
"""
class KeyFile(Setting):
name = "keyfile"
section = "SSL"
cli = ["--keyfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL key file
"""
class CertFile(Setting):
name = "certfile"
section = "SSL"
cli = ["--certfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL certificate file
"""
class SSLVersion(Setting):
name = "ssl_version"
section = "SSL"
cli = ["--ssl-version"]
validator = validate_ssl_version
if hasattr(ssl, "PROTOCOL_TLS"):
default = ssl.PROTOCOL_TLS
else:
default = ssl.PROTOCOL_SSLv23
desc = """\
SSL version to use (see stdlib ssl module's)
.. versionchanged:: 20.0.1
The default value has been changed from ``ssl.PROTOCOL_SSLv23`` to
``ssl.PROTOCOL_TLS`` when Python >= 3.6 .
"""
default = ssl.PROTOCOL_SSLv23
desc = """\
SSL version to use.
============= ============
--ssl-version Description
============= ============
SSLv3 SSLv3 is not-secure and is strongly discouraged.
SSLv23 Alias for TLS. Deprecated in Python 3.6, use TLS.
TLS Negotiate highest possible version between client/server.
Can yield SSL. (Python 3.6+)
TLSv1 TLS 1.0
TLSv1_1 TLS 1.1 (Python 3.4+)
TLSv1_2 TLS 1.2 (Python 3.4+)
TLS_SERVER Auto-negotiate the highest protocol version like TLS,
but only support server-side SSLSocket connections.
(Python 3.6+)
============= ============
.. versionchanged:: 19.7
The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to
``ssl.PROTOCOL_SSLv23``.
.. versionchanged:: 20.0
This setting now accepts string names based on ``ssl.PROTOCOL_``
constants.
"""
class CertReqs(Setting):
name = "cert_reqs"
section = "SSL"
cli = ["--cert-reqs"]
validator = validate_pos_int
default = ssl.CERT_NONE
desc = """\
Whether client certificate is required (see stdlib ssl module's)
"""
class CACerts(Setting):
name = "ca_certs"
section = "SSL"
cli = ["--ca-certs"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
CA certificates file
"""
class SuppressRaggedEOFs(Setting):
name = "suppress_ragged_eofs"
section = "SSL"
cli = ["--suppress-ragged-eofs"]
action = "store_true"
default = True
validator = validate_bool
desc = """\
Suppress ragged EOFs (see stdlib ssl module's)
"""
class DoHandshakeOnConnect(Setting):
name = "do_handshake_on_connect"
section = "SSL"
cli = ["--do-handshake-on-connect"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Whether to perform SSL handshake on socket connect (see stdlib ssl module's)
"""
class Ciphers(Setting):
name = "ciphers"
section = "SSL"
cli = ["--ciphers"]
validator = validate_string
default = None
desc = """\
SSL Cipher suite to use, in the format of an OpenSSL cipher list.
By default we use the default cipher list from Python's ``ssl`` module,
which contains ciphers considered strong at the time of each Python
release.
As a recommended alternative, the Open Web App Security Project (OWASP)
offers `a vetted set of strong cipher strings rated A+ to C-
<https://www.owasp.org/index.php/TLS_Cipher_String_Cheat_Sheet>`_.
OWASP provides details on user-agent compatibility at each security level.
See the `OpenSSL Cipher List Format Documentation
<https://www.openssl.org/docs/manmaster/man1/ciphers.html#CIPHER-LIST-FORMAT>`_
for details on the format of an OpenSSL cipher list.
"""
class PasteGlobalConf(Setting):
name = "raw_paste_global_conf"
action = "append"
section = "Server Mechanics"
cli = ["--paste-global"]
meta = "CONF"
validator = validate_list_string
default = []
desc = """\
Set a PasteDeploy global config variable in ``key=value`` form.
The option can be specified multiple times.
The variables are passed to the the PasteDeploy entrypoint. Example::
$ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2
.. versionadded:: 19.7
"""
class StripHeaderSpaces(Setting):
name = "strip_header_spaces"
section = "Server Mechanics"
cli = ["--strip-header-spaces"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Strip spaces present between the header name and the the ``:``.
This is known to induce vulnerabilities and is not compliant with the HTTP/1.1 standard.
See https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn.
Use with care and only if necessary.
"""
| 28.225564 | 144 | 0.597684 |
import argparse
import copy
import grp
import inspect
import os
import pwd
import re
import shlex
import ssl
import sys
import textwrap
from gunicorn import __version__, util
from gunicorn.errors import ConfigError
from gunicorn.reloader import reloader_engines
KNOWN_SETTINGS = []
PLATFORM = sys.platform
def make_settings(ignore=None):
settings = {}
ignore = ignore or ()
for s in KNOWN_SETTINGS:
setting = s()
if setting.name in ignore:
continue
settings[setting.name] = setting.copy()
return settings
def auto_int(_, x):
if re.match(r'0(\d)', x, re.IGNORECASE):
x = x.replace('0', '0o', 1)
return int(x, 0)
class Config(object):
def __init__(self, usage=None, prog=None):
self.settings = make_settings()
self.usage = usage
self.prog = prog or os.path.basename(sys.argv[0])
self.env_orig = os.environ.copy()
def __str__(self):
lines = []
kmax = max(len(k) for k in self.settings)
for k in sorted(self.settings):
v = self.settings[k].value
if callable(v):
v = "<{}()>".format(v.__qualname__)
lines.append("{k:{kmax}} = {v}".format(k=k, v=v, kmax=kmax))
return "\n".join(lines)
def __getattr__(self, name):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
return self.settings[name].get()
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super().__setattr__(name, value)
def set(self, name, value):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
self.settings[name].set(value)
def get_cmd_args_from_env(self):
if 'GUNICORN_CMD_ARGS' in self.env_orig:
return shlex.split(self.env_orig['GUNICORN_CMD_ARGS'])
return []
def parser(self):
kwargs = {
"usage": self.usage,
"prog": self.prog
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument("-v", "--version",
action="version", default=argparse.SUPPRESS,
version="%(prog)s (version " + __version__ + ")\n",
help="show program's version number and exit")
parser.add_argument("args", nargs="*", help=argparse.SUPPRESS)
keys = sorted(self.settings, key=self.settings.__getitem__)
for k in keys:
self.settings[k].add_option(parser)
return parser
@property
def worker_class_str(self):
uri = self.settings['worker_class'].get()
# are we using a threaded worker?
is_sync = uri.endswith('SyncWorker') or uri == 'sync'
if is_sync and self.threads > 1:
return "gthread"
return uri
@property
def worker_class(self):
uri = self.settings['worker_class'].get()
# are we using a threaded worker?
is_sync = uri.endswith('SyncWorker') or uri == 'sync'
if is_sync and self.threads > 1:
uri = "gunicorn.workers.gthread.ThreadWorker"
worker_class = util.load_class(uri)
if hasattr(worker_class, "setup"):
worker_class.setup()
return worker_class
@property
def address(self):
s = self.settings['bind'].get()
return [util.parse_address(util.bytes_to_str(bind)) for bind in s]
@property
def uid(self):
return self.settings['user'].get()
@property
def gid(self):
return self.settings['group'].get()
@property
def proc_name(self):
pn = self.settings['proc_name'].get()
if pn is not None:
return pn
else:
return self.settings['default_proc_name'].get()
@property
def logger_class(self):
uri = self.settings['logger_class'].get()
if uri == "simple":
# support the default
uri = LoggerClass.default
# if default logger is in use, and statsd is on, automagically switch
# to the statsd logger
if uri == LoggerClass.default:
if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None:
uri = "gunicorn.instrument.statsd.Statsd"
logger_class = util.load_class(
uri,
default="gunicorn.glogging.Logger",
section="gunicorn.loggers")
if hasattr(logger_class, "install"):
logger_class.install()
return logger_class
@property
def is_ssl(self):
return self.certfile or self.keyfile
@property
def ssl_options(self):
opts = {}
for name, value in self.settings.items():
if value.section == 'SSL':
opts[name] = value.get()
return opts
@property
def env(self):
raw_env = self.settings['raw_env'].get()
env = {}
if not raw_env:
return env
for e in raw_env:
s = util.bytes_to_str(e)
try:
k, v = s.split('=', 1)
except ValueError:
raise RuntimeError("environment setting %r invalid" % s)
env[k] = v
return env
@property
def sendfile(self):
if self.settings['sendfile'].get() is not None:
return False
if 'SENDFILE' in os.environ:
sendfile = os.environ['SENDFILE'].lower()
return sendfile in ['y', '1', 'yes', 'true']
return True
@property
def reuse_port(self):
return self.settings['reuse_port'].get()
@property
def paste_global_conf(self):
raw_global_conf = self.settings['raw_paste_global_conf'].get()
if raw_global_conf is None:
return None
global_conf = {}
for e in raw_global_conf:
s = util.bytes_to_str(e)
try:
k, v = re.split(r'(?<!\\)=', s, 1)
except ValueError:
raise RuntimeError("environment setting %r invalid" % s)
k = k.replace('\\=', '=')
v = v.replace('\\=', '=')
global_conf[k] = v
return global_conf
class SettingMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super().__new__
parents = [b for b in bases if isinstance(b, SettingMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS)
attrs["validator"] = staticmethod(attrs["validator"])
new_class = super_new(cls, name, bases, attrs)
new_class.fmt_desc(attrs.get("desc", ""))
KNOWN_SETTINGS.append(new_class)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
setattr(cls, "short", desc.splitlines()[0])
class Setting(object):
name = None
value = None
section = None
cli = None
validator = None
type = None
meta = None
action = None
default = None
short = None
desc = None
nargs = None
const = None
def __init__(self):
if self.default is not None:
self.set(self.default)
def add_option(self, parser):
if not self.cli:
return
args = tuple(self.cli)
help_txt = "%s [%s]" % (self.short, self.default)
help_txt = help_txt.replace("%", "%%")
kwargs = {
"dest": self.name,
"action": self.action or "store",
"type": self.type or str,
"default": None,
"help": help_txt
}
if self.meta is not None:
kwargs['metavar'] = self.meta
if kwargs["action"] != "store":
kwargs.pop("type")
if self.nargs is not None:
kwargs["nargs"] = self.nargs
if self.const is not None:
kwargs["const"] = self.const
parser.add_argument(*args, **kwargs)
def copy(self):
return copy.copy(self)
def get(self):
return self.value
def set(self, val):
if not callable(self.validator):
raise TypeError('Invalid validator: %s' % self.name)
self.value = self.validator(val)
def __lt__(self, other):
return (self.section == other.section and
self.order < other.order)
__cmp__ = __lt__
def __repr__(self):
return "<%s.%s object at %x with value %r>" % (
self.__class__.__module__,
self.__class__.__name__,
id(self),
self.value,
)
Setting = SettingMeta('Setting', (Setting,), {})
def validate_bool(val):
if val is None:
return
if isinstance(val, bool):
return val
if not isinstance(val, str):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_dict(val):
if not isinstance(val, dict):
raise TypeError("Value is not a dictionary: %s " % val)
return val
def validate_pos_int(val):
if not isinstance(val, int):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_ssl_version(val):
ssl_versions = {}
for protocol in [p for p in dir(ssl) if p.startswith("PROTOCOL_")]:
ssl_versions[protocol[9:]] = getattr(ssl, protocol)
if val in ssl_versions:
# string matching PROTOCOL_...
return ssl_versions[val]
try:
intval = validate_pos_int(val)
if intval in ssl_versions.values():
# positive int matching a protocol int constant
return intval
except (ValueError, TypeError):
# negative integer or not an integer
# drop this in favour of the more descriptive ValueError below
pass
raise ValueError("Invalid ssl_version: %s. Valid options: %s"
% (val, ', '.join(ssl_versions)))
def validate_string(val):
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_file_exists(val):
if val is None:
return None
if not os.path.exists(val):
raise ValueError("File %s does not exists." % val)
return val
def validate_list_string(val):
if not val:
return []
# legacy syntax
if isinstance(val, str):
val = [val]
return [validate_string(v) for v in val]
def validate_list_of_existing_files(val):
return [validate_file_exists(v) for v in validate_list_string(val)]
def validate_string_to_list(val):
val = validate_string(val)
if not val:
return []
return [v.strip() for v in val.split(",") if v]
def validate_class(val):
if inspect.isfunction(val) or inspect.ismethod(val):
val = val()
if inspect.isclass(val):
return val
return validate_string(val)
def validate_callable(arity):
def _validate_callable(val):
if isinstance(val, str):
try:
mod_name, obj_name = val.rsplit(".", 1)
except ValueError:
raise TypeError("Value '%s' is not import string. "
"Format: module[.submodules...].object" % val)
try:
mod = __import__(mod_name, fromlist=[obj_name])
val = getattr(mod, obj_name)
except ImportError as e:
raise TypeError(str(e))
except AttributeError:
raise TypeError("Can not load '%s' from '%s'"
"" % (obj_name, mod_name))
if not callable(val):
raise TypeError("Value is not callable: %s" % val)
if arity != -1 and arity != util.get_arity(val):
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def validate_user(val):
if val is None:
return os.geteuid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return pwd.getpwnam(val).pw_uid
except KeyError:
raise ConfigError("No such user: '%s'" % val)
def validate_group(val):
if val is None:
return os.getegid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return grp.getgrnam(val).gr_gid
except KeyError:
raise ConfigError("No such group: '%s'" % val)
def validate_post_request(val):
val = validate_callable(-1)(val)
largs = util.get_arity(val)
if largs == 4:
return val
elif largs == 3:
return lambda worker, req, env, _r: val(worker, req, env)
elif largs == 2:
return lambda worker, req, _e, _r: val(worker, req)
else:
raise TypeError("Value must have an arity of: 4")
def validate_chdir(val):
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("can't chdir to %r" % val)
return path
def validate_hostport(val):
val = validate_string(val)
if val is None:
return None
elements = val.split(":")
if len(elements) == 2:
return (elements[0], int(elements[1]))
else:
raise TypeError("Value must consist of: hostname:port")
def validate_reload_engine(val):
if val not in reloader_engines:
raise ConfigError("Invalid reload_engine: %r" % val)
return val
def get_default_config_file():
config_path = os.path.join(os.path.abspath(os.getcwd()),
'gunicorn.conf.py')
if os.path.exists(config_path):
return config_path
return None
class ConfigFile(Setting):
name = "config"
section = "Config File"
cli = ["-c", "--config"]
meta = "CONFIG"
validator = validate_string
default = "./gunicorn.conf.py"
desc = """\
The Gunicorn config file.
A string of the form ``PATH``, ``file:PATH``, or ``python:MODULE_NAME``.
Only has an effect when specified on the command line or as part of an
application specific configuration.
By default, a file named ``gunicorn.conf.py`` will be read from the same
directory where gunicorn is being run.
.. versionchanged:: 19.4
Loading the config from a Python module requires the ``python:``
prefix.
"""
class WSGIApp(Setting):
name = "wsgi_app"
section = "Config File"
meta = "STRING"
validator = validate_string
default = None
desc = """\
A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``.
.. versionadded:: 20.1.0
"""
class Bind(Setting):
name = "bind"
action = "append"
section = "Server Socket"
cli = ["-b", "--bind"]
meta = "ADDRESS"
validator = validate_list_string
if 'PORT' in os.environ:
default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))]
else:
default = ['127.0.0.1:8000']
desc = """\
The socket to bind.
A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``,
``fd://FD``. An IP is a valid ``HOST``.
.. versionchanged:: 20.0
Support for ``fd://FD`` got added.
Multiple addresses can be bound. ex.::
$ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app
will bind the `test:app` application on localhost both on ipv6
and ipv4 interfaces.
If the ``PORT`` environment variable is defined, the default
is ``['0.0.0.0:$PORT']``. If it is not defined, the default
is ``['127.0.0.1:8000']``.
"""
class Backlog(Setting):
name = "backlog"
section = "Server Socket"
cli = ["--backlog"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2048
desc = """\
The maximum number of pending connections.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class Workers(Setting):
name = "workers"
section = "Worker Processes"
cli = ["-w", "--workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = int(os.environ.get("WEB_CONCURRENCY", 1))
desc = """\
The number of worker processes for handling requests.
A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
You'll want to vary this a bit to find the best for your particular
application's work load.
By default, the value of the ``WEB_CONCURRENCY`` environment variable,
which is set by some Platform-as-a-Service providers such as Heroku. If
it is not defined, the default is ``1``.
"""
class WorkerClass(Setting):
name = "worker_class"
section = "Worker Processes"
cli = ["-k", "--worker-class"]
meta = "STRING"
validator = validate_class
default = "sync"
desc = """\
The type of workers to use.
The default class (``sync``) should handle most "normal" types of
workloads. You'll want to read :doc:`design` for information on when
you might want to choose one of the other worker classes. Required
libraries may be installed using setuptools' ``extras_require`` feature.
A string referring to one of the following bundled classes:
* ``sync``
* ``eventlet`` - Requires eventlet >= 0.24.1 (or install it via
``pip install gunicorn[eventlet]``)
* ``gevent`` - Requires gevent >= 1.4 (or install it via
``pip install gunicorn[gevent]``)
* ``tornado`` - Requires tornado >= 0.2 (or install it via
``pip install gunicorn[tornado]``)
* ``gthread`` - Python 2 requires the futures package to be installed
(or install it via ``pip install gunicorn[gthread]``)
Optionally, you can provide your own worker by giving Gunicorn a
Python path to a subclass of ``gunicorn.workers.base.Worker``.
This alternative syntax will load the gevent class:
``gunicorn.workers.ggevent.GeventWorker``.
"""
class WorkerThreads(Setting):
name = "threads"
section = "Worker Processes"
cli = ["--threads"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1
desc = """\
The number of worker threads for handling requests.
Run each worker with the specified number of threads.
A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
You'll want to vary this a bit to find the best for your particular
application's work load.
If it is not defined, the default is ``1``.
This setting only affects the Gthread worker type.
.. note::
If you try to use the ``sync`` worker type and set the ``threads``
setting to more than 1, the ``gthread`` worker type will be used
instead.
"""
class WorkerConnections(Setting):
name = "worker_connections"
section = "Worker Processes"
cli = ["--worker-connections"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1000
desc = """\
The maximum number of simultaneous clients.
This setting only affects the Eventlet and Gevent worker types.
"""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
cli = ["--max-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a worker
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class MaxRequestsJitter(Setting):
name = "max_requests_jitter"
section = "Worker Processes"
cli = ["--max-requests-jitter"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum jitter to add to the *max_requests* setting.
The jitter causes the restart per worker to be randomized by
``randint(0, max_requests_jitter)``. This is intended to stagger worker
restarts to avoid all workers restarting at the same time.
.. versionadded:: 19.2
"""
class WaitForNewWorkers(Setting):
name = "wait_for_new_workers"
section = "Worker Processes"
cli = ["--wait-for-new-workers"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Wait for a new worker to become ready before killing an old worker.
"""
class MaxRestartingWorkers(Setting):
name = "max_restarting_workers"
section = "Worker Processes"
cli = ["--max-restarting-workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of workers which can be restarted at the same time.
"""
class WarmupRequests(Setting):
name = "warmup_requests"
section = "Worker Processes"
cli = ["--warmup-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The number of requests a new worker needs to handle until the old worker can be killed.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
cli = ["-t", "--timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Workers silent for more than this many seconds are killed and restarted.
Value is a positive number or 0. Setting it to 0 has the effect of
infinite timeouts by disabling timeouts for all workers entirely.
Generally, the default of thirty seconds should suffice. Only set this
noticeably higher if you're sure of the repercussions for sync workers.
For the non sync workers it just means that the worker process is still
communicating and is not tied to the length of time required to handle a
single request.
"""
class GracefulTimeout(Setting):
name = "graceful_timeout"
section = "Worker Processes"
cli = ["--graceful-timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Timeout for graceful workers restart.
After receiving a restart signal, workers have this much time to finish
serving requests. Workers still alive after the timeout (starting from
the receipt of the restart signal) are force killed.
"""
class Keepalive(Setting):
name = "keepalive"
section = "Worker Processes"
cli = ["--keep-alive"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2
desc = """\
The number of seconds to wait for requests on a Keep-Alive connection.
Generally set in the 1-5 seconds range for servers with direct connection
to the client (e.g. when you don't have separate load balancer). When
Gunicorn is deployed behind a load balancer, it often makes sense to
set this to a higher value.
.. note::
``sync`` worker does not support persistent connections and will
ignore this option.
"""
class LimitRequestLine(Setting):
name = "limit_request_line"
section = "Security"
cli = ["--limit-request-line"]
meta = "INT"
validator = validate_pos_int
type = int
default = 4094
desc = """\
The maximum size of HTTP request line in bytes.
This parameter is used to limit the allowed size of a client's
HTTP request-line. Since the request-line consists of the HTTP
method, URI, and protocol version, this directive places a
restriction on the length of a request-URI allowed for a request
on the server. A server needs this value to be large enough to
hold any of its resource names, including any information that
might be passed in the query part of a GET request. Value is a number
from 0 (unlimited) to 8190.
This parameter can be used to prevent any DDOS attack.
"""
class LimitRequestFields(Setting):
name = "limit_request_fields"
section = "Security"
cli = ["--limit-request-fields"]
meta = "INT"
validator = validate_pos_int
type = int
default = 100
desc = """\
Limit the number of HTTP headers fields in a request.
This parameter is used to limit the number of headers in a request to
prevent DDOS attack. Used with the *limit_request_field_size* it allows
more safety. By default this value is 100 and can't be larger than
32768.
"""
class LimitRequestFieldSize(Setting):
name = "limit_request_field_size"
section = "Security"
cli = ["--limit-request-field_size"]
meta = "INT"
validator = validate_pos_int
type = int
default = 8190
desc = """\
Limit the allowed size of an HTTP request header field.
Value is a positive number or 0. Setting it to 0 will allow unlimited
header field sizes.
.. warning::
Setting this parameter to a very high or unlimited value can open
up for DDOS attacks.
"""
class EnrichResponse(Setting):
name = "enrich_response"
section = 'Debugging'
cli = ['--enrich-response']
validator = validate_bool
action = 'store_true'
default = False
desc = '''\
Add extra information in the http response body. Works only for sync worker type.
While handling a request, a few timestamps are taken (in microseconds, since 1st of January, 1970):
* ``spawning time`` - when worker object is initialized (this is before forking the new process)
* ``time 1`` - immediately after entering "handle_request"
* ``time 2`` - just before getting the response
* ``time 3`` - immediately after getting the response
The following information is inserted into the response body:
* ``spawn``: spawning time
* ``t1``: time1
* ``d1``: time2 - time1
* ``d2``: time3 - time2
* ``pid``: the pid of the worker handling the request
* ``nr``: number of requests handled by this worker so far
* ``max``: number of requests planned for this worker (this can be exceeded a little bit because of the rolling restarting strategy)
The new response is a json with two keys:
"res" contains the original response
"info" contains the extra information
'''
class Reload(Setting):
name = "reload"
section = 'Debugging'
cli = ['--reload']
validator = validate_bool
action = 'store_true'
default = False
desc = '''\
Restart workers when code changes.
This setting is intended for development. It will cause workers to be
restarted whenever application code changes.
The reloader is incompatible with application preloading. When using a
paste configuration be sure that the server block does not import any
application code or the reload will not work as designed.
The default behavior is to attempt inotify with a fallback to file
system polling. Generally, inotify should be preferred if available
because it consumes less system resources.
.. note::
In order to use the inotify reloader, you must have the ``inotify``
package installed.
'''
class ReloadEngine(Setting):
name = "reload_engine"
section = "Debugging"
cli = ["--reload-engine"]
meta = "STRING"
validator = validate_reload_engine
default = "auto"
desc = """\
The implementation that should be used to power :ref:`reload`.
Valid engines are:
* ``'auto'``
* ``'poll'``
* ``'inotify'`` (requires inotify)
.. versionadded:: 19.7
"""
class ReloadExtraFiles(Setting):
name = "reload_extra_files"
action = "append"
section = "Debugging"
cli = ["--reload-extra-file"]
meta = "FILES"
validator = validate_list_of_existing_files
default = []
desc = """\
Extends :ref:`reload` option to also watch and reload on additional files
(e.g., templates, configurations, specifications, etc.).
.. versionadded:: 19.8
"""
class Spew(Setting):
name = "spew"
section = "Debugging"
cli = ["--spew"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Install a trace function that spews every line executed by the server.
This is the nuclear option.
"""
class ConfigCheck(Setting):
name = "check_config"
section = "Debugging"
cli = ["--check-config"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Check the configuration and exit. The exit status is 0 if the
configuration is correct, and 1 if the configuration is incorrect.
"""
class PrintConfig(Setting):
name = "print_config"
section = "Debugging"
cli = ["--print-config"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Print the configuration settings as fully resolved. Implies :ref:`check-config`.
"""
class PreloadApp(Setting):
name = "preload_app"
section = "Server Mechanics"
cli = ["--preload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Load application code before the worker processes are forked.
By preloading an application you can save some RAM resources as well as
speed up server boot times. Although, if you defer application loading
to each worker process, you can reload your application code easily by
restarting workers.
"""
class Sendfile(Setting):
name = "sendfile"
section = "Server Mechanics"
cli = ["--no-sendfile"]
validator = validate_bool
action = "store_const"
const = False
desc = """\
Disables the use of ``sendfile()``.
If not set, the value of the ``SENDFILE`` environment variable is used
to enable or disable its usage.
.. versionadded:: 19.2
.. versionchanged:: 19.4
Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow
disabling.
.. versionchanged:: 19.6
added support for the ``SENDFILE`` environment variable
"""
class ReusePort(Setting):
name = "reuse_port"
section = "Server Mechanics"
cli = ["--reuse-port"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Set the ``SO_REUSEPORT`` flag on the listening socket.
.. versionadded:: 19.8
"""
class Chdir(Setting):
name = "chdir"
section = "Server Mechanics"
cli = ["--chdir"]
validator = validate_chdir
default = util.getcwd()
desc = """\
Change directory to specified directory before loading apps.
"""
class Daemon(Setting):
name = "daemon"
section = "Server Mechanics"
cli = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the Gunicorn process.
Detaches the server from the controlling terminal and enters the
background.
"""
class Env(Setting):
name = "raw_env"
action = "append"
section = "Server Mechanics"
cli = ["-e", "--env"]
meta = "ENV"
validator = validate_list_string
default = []
desc = """\
Set environment variables in the execution environment.
Should be a list of strings in the ``key=value`` format.
For example on the command line:
.. code-block:: console
$ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app
Or in the configuration file:
.. code-block:: python
raw_env = ["FOO=1"]
"""
class Pidfile(Setting):
name = "pidfile"
section = "Server Mechanics"
cli = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class WorkerTmpDir(Setting):
name = "worker_tmp_dir"
section = "Server Mechanics"
cli = ["--worker-tmp-dir"]
meta = "DIR"
validator = validate_string
default = None
desc = """\
A directory to use for the worker heartbeat temporary file.
If not set, the default temporary directory will be used.
.. note::
The current heartbeat system involves calling ``os.fchmod`` on
temporary file handlers and may block a worker for arbitrary time
if the directory is on a disk-backed filesystem.
See :ref:`blocking-os-fchmod` for more detailed information
and a solution for avoiding this problem.
"""
class User(Setting):
name = "user"
section = "Server Mechanics"
cli = ["-u", "--user"]
meta = "USER"
validator = validate_user
default = os.geteuid()
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not
change the worker process user.
"""
class Group(Setting):
name = "group"
section = "Server Mechanics"
cli = ["-g", "--group"]
meta = "GROUP"
validator = validate_group
default = os.getegid()
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to ``pwd.getgrnam(value)`` or ``None`` to not
change the worker processes group.
"""
class Umask(Setting):
name = "umask"
section = "Server Mechanics"
cli = ["-m", "--umask"]
meta = "INT"
validator = validate_pos_int
type = auto_int
default = 0
desc = """\
A bit mask for the file mode on files written by Gunicorn.
Note that this affects unix socket permissions.
A valid value for the ``os.umask(mode)`` call or a string compatible
with ``int(value, 0)`` (``0`` means Python guesses the base, so values
like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal
representations)
"""
class Initgroups(Setting):
name = "initgroups"
section = "Server Mechanics"
cli = ["--initgroups"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
If true, set the worker process's group access list with all of the
groups of which the specified username is a member, plus the specified
group id.
.. versionadded:: 19.7
"""
class TmpUploadDir(Setting):
name = "tmp_upload_dir"
section = "Server Mechanics"
meta = "DIR"
validator = validate_string
default = None
desc = """\
Directory to store temporary request data as they are read.
This may disappear in the near future.
This path should be writable by the process permissions set for Gunicorn
workers. If not specified, Gunicorn will choose a system generated
temporary directory.
"""
class SecureSchemeHeader(Setting):
name = "secure_scheme_headers"
section = "Server Mechanics"
validator = validate_dict
default = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-PROTO": "https",
"X-FORWARDED-SSL": "on"
}
desc = """\
A dictionary containing headers and values that the front-end proxy
uses to indicate HTTPS requests. If the source IP is permitted by
``forwarded-allow-ips`` (below), *and* at least one request header matches
a key-value pair listed in this dictionary, then Gunicorn will set
``wsgi.url_scheme`` to ``https``, so your application can tell that the
request is secure.
If the other headers listed in this dictionary are not present in the request, they will be ignored,
but if the other headers are present and do not match the provided values, then
the request will fail to parse. See the note below for more detailed examples of this behaviour.
The dictionary should map upper-case header names to exact string
values. The value comparisons are case-sensitive, unlike the header
names, so make sure they're exactly what your front-end proxy sends
when handling HTTPS requests.
It is important that your front-end proxy configuration ensures that
the headers defined here can not be passed directly from the client.
"""
class ForwardedAllowIPS(Setting):
name = "forwarded_allow_ips"
section = "Server Mechanics"
cli = ["--forwarded-allow-ips"]
meta = "STRING"
validator = validate_string_to_list
default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1")
desc = """\
Front-end's IPs from which allowed to handle set secure headers.
(comma separate).
Set to ``*`` to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment).
By default, the value of the ``FORWARDED_ALLOW_IPS`` environment
variable. If it is not defined, the default is ``"127.0.0.1"``.
.. note::
The interplay between the request headers, the value of ``forwarded_allow_ips``, and the value of
``secure_scheme_headers`` is complex. Various scenarios are documented below to further elaborate. In each case, we
have a request from the remote address 134.213.44.18, and the default value of ``secure_scheme_headers``:
.. code::
secure_scheme_headers = {
'X-FORWARDED-PROTOCOL': 'ssl',
'X-FORWARDED-PROTO': 'https',
'X-FORWARDED-SSL': 'on'
}
.. list-table::
:header-rows: 1
:align: center
:widths: auto
* - ``forwarded-allow-ips``
- Secure Request Headers
- Result
- Explanation
* - .. code::
["127.0.0.1"]
- .. code::
X-Forwarded-Proto: https
- .. code::
wsgi.url_scheme = "http"
- IP address was not allowed
* - .. code::
"*"
- <none>
- .. code::
wsgi.url_scheme = "http"
- IP address allowed, but no secure headers provided
* - .. code::
"*"
- .. code::
X-Forwarded-Proto: https
- .. code::
wsgi.url_scheme = "https"
- IP address allowed, one request header matched
* - .. code::
["134.213.44.18"]
- .. code::
X-Forwarded-Ssl: on
X-Forwarded-Proto: http
- ``InvalidSchemeHeaders()`` raised
- IP address allowed, but the two secure headers disagreed on if HTTPS was used
"""
class AccessLog(Setting):
name = "accesslog"
section = "Logging"
cli = ["--access-logfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The Access log file to write to.
``'-'`` means log to stdout.
"""
class DisableRedirectAccessToSyslog(Setting):
name = "disable_redirect_access_to_syslog"
section = "Logging"
cli = ["--disable-redirect-access-to-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Disable redirect access logs to syslog.
.. versionadded:: 19.8
"""
class AccessLogFormat(Setting):
name = "access_log_format"
section = "Logging"
cli = ["--access-logformat"]
meta = "STRING"
validator = validate_string
default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
desc = """\
The access log format.
=========== ===========
Identifier Description
=========== ===========
h remote address
l ``'-'``
u user name
t date of the request
r status line (e.g. ``GET / HTTP/1.1``)
m request method
U URL path without query string
q query string
H protocol
s status
B response length
b response length or ``'-'`` (CLF format)
f referer
a user agent
T request time in seconds
M request time in milliseconds
D request time in microseconds
L request time in decimal seconds
p process ID
{header}i request header
{header}o response header
{variable}e environment variable
=========== ===========
Use lowercase for header and environment variable names, and put
``{...}x`` names inside ``%(...)s``. For example::
%({x-forwarded-for}i)s
"""
class ErrorLog(Setting):
name = "errorlog"
section = "Logging"
cli = ["--error-logfile", "--log-file"]
meta = "FILE"
validator = validate_string
default = '-'
desc = """\
The Error log file to write to.
Using ``'-'`` for FILE makes gunicorn log to stderr.
.. versionchanged:: 19.2
Log to stderr by default.
"""
class Loglevel(Setting):
name = "loglevel"
section = "Logging"
cli = ["--log-level"]
meta = "LEVEL"
validator = validate_string
default = "info"
desc = """\
The granularity of Error log outputs.
Valid level names are:
* ``'debug'``
* ``'info'``
* ``'warning'``
* ``'error'``
* ``'critical'``
"""
class CaptureOutput(Setting):
name = "capture_output"
section = "Logging"
cli = ["--capture-output"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Redirect stdout/stderr to specified file in :ref:`errorlog`.
.. versionadded:: 19.6
"""
class LoggerClass(Setting):
name = "logger_class"
section = "Logging"
cli = ["--logger-class"]
meta = "STRING"
validator = validate_class
default = "gunicorn.glogging.Logger"
desc = """\
The logger you want to use to log events in Gunicorn.
The default class (``gunicorn.glogging.Logger``) handles most
normal usages in logging. It provides error and access logging.
You can provide your own logger by giving Gunicorn a Python path to a
class that quacks like ``gunicorn.glogging.Logger``.
"""
class LogConfig(Setting):
name = "logconfig"
section = "Logging"
cli = ["--log-config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The log config file to use.
Gunicorn uses the standard Python logging module's Configuration
file format.
"""
class LogConfigDict(Setting):
name = "logconfig_dict"
section = "Logging"
validator = validate_dict
default = {}
desc = """\
The log config dictionary to use, using the standard Python
logging module's dictionary configuration format. This option
takes precedence over the :ref:`logconfig` option, which uses the
older file configuration format.
Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig
.. versionadded:: 19.8
"""
class SyslogTo(Setting):
name = "syslog_addr"
section = "Logging"
cli = ["--log-syslog-to"]
meta = "SYSLOG_ADDR"
validator = validate_string
if PLATFORM == "darwin":
default = "unix:///var/run/syslog"
elif PLATFORM in ('freebsd', 'dragonfly', ):
default = "unix:///var/run/log"
elif PLATFORM == "openbsd":
default = "unix:///dev/log"
else:
default = "udp://localhost:514"
desc = """\
Address to send syslog messages.
Address is a string of the form:
* ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream``
for the stream driver or ``dgram`` for the dgram driver.
``stream`` is the default.
* ``udp://HOST:PORT`` : for UDP sockets
* ``tcp://HOST:PORT`` : for TCP sockets
"""
class Syslog(Setting):
name = "syslog"
section = "Logging"
cli = ["--log-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Send *Gunicorn* logs to syslog.
.. versionchanged:: 19.8
You can now disable sending access logs by using the
:ref:`disable-redirect-access-to-syslog` setting.
"""
class SyslogPrefix(Setting):
name = "syslog_prefix"
section = "Logging"
cli = ["--log-syslog-prefix"]
meta = "SYSLOG_PREFIX"
validator = validate_string
default = None
desc = """\
Makes Gunicorn use the parameter as program-name in the syslog entries.
All entries will be prefixed by ``gunicorn.<prefix>``. By default the
program name is the name of the process.
"""
class SyslogFacility(Setting):
name = "syslog_facility"
section = "Logging"
cli = ["--log-syslog-facility"]
meta = "SYSLOG_FACILITY"
validator = validate_string
default = "user"
desc = """\
Syslog facility name
"""
class EnableStdioInheritance(Setting):
name = "enable_stdio_inheritance"
section = "Logging"
cli = ["-R", "--enable-stdio-inheritance"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable stdio inheritance.
Enable inheritance for stdio file descriptors in daemon mode.
Note: To disable the Python stdout buffering, you can to set the user
environment variable ``PYTHONUNBUFFERED`` .
"""
class StatsdHost(Setting):
name = "statsd_host"
section = "Logging"
cli = ["--statsd-host"]
meta = "STATSD_ADDR"
default = None
validator = validate_hostport
desc = """\
``host:port`` of the statsd server to log to.
.. versionadded:: 19.1
"""
class DogstatsdTags(Setting):
name = "dogstatsd_tags"
section = "Logging"
cli = ["--dogstatsd-tags"]
meta = "DOGSTATSD_TAGS"
default = ""
validator = validate_string
desc = """\
A comma-delimited list of datadog statsd (dogstatsd) tags to append to
statsd metrics.
.. versionadded:: 20
"""
class StatsdPrefix(Setting):
name = "statsd_prefix"
section = "Logging"
cli = ["--statsd-prefix"]
meta = "STATSD_PREFIX"
default = ""
validator = validate_string
desc = """\
Prefix to use when emitting statsd metrics (a trailing ``.`` is added,
if not provided).
.. versionadded:: 19.2
"""
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
cli = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Gunicorn you'll probably want to set a
name to tell them apart. This requires that you install the setproctitle
module.
If not set, the *default_proc_name* setting will be used.
"""
class DefaultProcName(Setting):
name = "default_proc_name"
section = "Process Naming"
validator = validate_string
default = "gunicorn"
desc = """\
Internal setting that is adjusted for each type of application.
"""
class PythonPath(Setting):
name = "pythonpath"
section = "Server Mechanics"
cli = ["--pythonpath"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A comma-separated list of directories to add to the Python path.
e.g.
``'/home/djangoprojects/myproject,/home/python/mylibrary'``.
"""
class Paste(Setting):
name = "paste"
section = "Server Mechanics"
cli = ["--paste", "--paster"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
Load a PasteDeploy config file. The argument may contain a ``#``
symbol followed by the name of an app section from the config file,
e.g. ``production.ini#admin``.
At this time, using alternate server blocks is not supported. Use the
command line arguments to control server configuration instead.
"""
class OnStarting(Setting):
name = "on_starting"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def on_starting(server):
pass
default = staticmethod(on_starting)
desc = """\
Called just before the master process is initialized.
The callable needs to accept a single instance variable for the Arbiter.
"""
class OnReload(Setting):
name = "on_reload"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def on_reload(server):
pass
default = staticmethod(on_reload)
desc = """\
Called to recycle workers during a reload via SIGHUP.
The callable needs to accept a single instance variable for the Arbiter.
"""
class WhenReady(Setting):
name = "when_ready"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def when_ready(server):
pass
default = staticmethod(when_ready)
desc = """\
Called just after the server is started.
The callable needs to accept a single instance variable for the Arbiter.
"""
class Prefork(Setting):
name = "pre_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def pre_fork(server, worker):
pass
default = staticmethod(pre_fork)
desc = """\
Called just before a worker is forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class Postfork(Setting):
name = "post_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def post_fork(server, worker):
pass
default = staticmethod(post_fork)
desc = """\
Called just after a worker has been forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class PostWorkerInit(Setting):
name = "post_worker_init"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def post_worker_init(worker):
pass
default = staticmethod(post_worker_init)
desc = """\
Called just after a worker has initialized the application.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class WorkerInt(Setting):
name = "worker_int"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def worker_int(worker):
pass
default = staticmethod(worker_int)
desc = """\
Called just after a worker exited on SIGINT or SIGQUIT.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class WorkerAbort(Setting):
name = "worker_abort"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def worker_abort(worker):
pass
default = staticmethod(worker_abort)
desc = """\
Called when a worker received the SIGABRT signal.
This call generally happens on timeout.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class PreExec(Setting):
name = "pre_exec"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def pre_exec(server):
pass
default = staticmethod(pre_exec)
desc = """\
Called just before a new master process is forked.
The callable needs to accept a single instance variable for the Arbiter.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def pre_request(worker, req):
worker.log.debug("%s %s" % (req.method, req.path))
default = staticmethod(pre_request)
desc = """\
Called just before a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class PostRequest(Setting):
name = "post_request"
section = "Server Hooks"
validator = validate_post_request
type = callable
def post_request(worker, req, environ, resp):
pass
default = staticmethod(post_request)
desc = """\
Called after a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class ChildExit(Setting):
name = "child_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def child_exit(server, worker):
pass
default = staticmethod(child_exit)
desc = """\
Called just after a worker has been exited, in the master process.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
.. versionadded:: 19.7
"""
class WorkerExit(Setting):
name = "worker_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def worker_exit(server, worker):
pass
default = staticmethod(worker_exit)
desc = """\
Called just after a worker has been exited, in the worker process.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
"""
class NumWorkersChanged(Setting):
name = "nworkers_changed"
section = "Server Hooks"
validator = validate_callable(3)
type = callable
def nworkers_changed(server, new_value, old_value):
pass
default = staticmethod(nworkers_changed)
desc = """\
Called just after *num_workers* has been changed.
The callable needs to accept an instance variable of the Arbiter and
two integers of number of workers after and before change.
If the number of workers is set for the first time, *old_value* would
be ``None``.
"""
class OnExit(Setting):
name = "on_exit"
section = "Server Hooks"
validator = validate_callable(1)
def on_exit(server):
pass
default = staticmethod(on_exit)
desc = """\
Called just before exiting Gunicorn.
The callable needs to accept a single instance variable for the Arbiter.
"""
class ProxyProtocol(Setting):
name = "proxy_protocol"
section = "Server Mechanics"
cli = ["--proxy-protocol"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable detect PROXY protocol (PROXY mode).
Allow using HTTP and Proxy together. It may be useful for work with
stunnel as HTTPS frontend and Gunicorn as HTTP server.
PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
Example for stunnel config::
[https]
protocol = proxy
accept = 443
connect = 80
cert = /etc/ssl/certs/stunnel.pem
key = /etc/ssl/certs/stunnel.key
"""
class ProxyAllowFrom(Setting):
name = "proxy_allow_ips"
section = "Server Mechanics"
cli = ["--proxy-allow-from"]
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed accept proxy requests (comma separate).
Set to ``*`` to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment)
"""
class KeyFile(Setting):
name = "keyfile"
section = "SSL"
cli = ["--keyfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL key file
"""
class CertFile(Setting):
name = "certfile"
section = "SSL"
cli = ["--certfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL certificate file
"""
class SSLVersion(Setting):
name = "ssl_version"
section = "SSL"
cli = ["--ssl-version"]
validator = validate_ssl_version
if hasattr(ssl, "PROTOCOL_TLS"):
default = ssl.PROTOCOL_TLS
else:
default = ssl.PROTOCOL_SSLv23
desc = """\
SSL version to use (see stdlib ssl module's)
.. versionchanged:: 20.0.1
The default value has been changed from ``ssl.PROTOCOL_SSLv23`` to
``ssl.PROTOCOL_TLS`` when Python >= 3.6 .
"""
default = ssl.PROTOCOL_SSLv23
desc = """\
SSL version to use.
============= ============
--ssl-version Description
============= ============
SSLv3 SSLv3 is not-secure and is strongly discouraged.
SSLv23 Alias for TLS. Deprecated in Python 3.6, use TLS.
TLS Negotiate highest possible version between client/server.
Can yield SSL. (Python 3.6+)
TLSv1 TLS 1.0
TLSv1_1 TLS 1.1 (Python 3.4+)
TLSv1_2 TLS 1.2 (Python 3.4+)
TLS_SERVER Auto-negotiate the highest protocol version like TLS,
but only support server-side SSLSocket connections.
(Python 3.6+)
============= ============
.. versionchanged:: 19.7
The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to
``ssl.PROTOCOL_SSLv23``.
.. versionchanged:: 20.0
This setting now accepts string names based on ``ssl.PROTOCOL_``
constants.
"""
class CertReqs(Setting):
name = "cert_reqs"
section = "SSL"
cli = ["--cert-reqs"]
validator = validate_pos_int
default = ssl.CERT_NONE
desc = """\
Whether client certificate is required (see stdlib ssl module's)
"""
class CACerts(Setting):
name = "ca_certs"
section = "SSL"
cli = ["--ca-certs"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
CA certificates file
"""
class SuppressRaggedEOFs(Setting):
name = "suppress_ragged_eofs"
section = "SSL"
cli = ["--suppress-ragged-eofs"]
action = "store_true"
default = True
validator = validate_bool
desc = """\
Suppress ragged EOFs (see stdlib ssl module's)
"""
class DoHandshakeOnConnect(Setting):
name = "do_handshake_on_connect"
section = "SSL"
cli = ["--do-handshake-on-connect"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Whether to perform SSL handshake on socket connect (see stdlib ssl module's)
"""
class Ciphers(Setting):
name = "ciphers"
section = "SSL"
cli = ["--ciphers"]
validator = validate_string
default = None
desc = """\
SSL Cipher suite to use, in the format of an OpenSSL cipher list.
By default we use the default cipher list from Python's ``ssl`` module,
which contains ciphers considered strong at the time of each Python
release.
As a recommended alternative, the Open Web App Security Project (OWASP)
offers `a vetted set of strong cipher strings rated A+ to C-
<https://www.owasp.org/index.php/TLS_Cipher_String_Cheat_Sheet>`_.
OWASP provides details on user-agent compatibility at each security level.
See the `OpenSSL Cipher List Format Documentation
<https://www.openssl.org/docs/manmaster/man1/ciphers.html#CIPHER-LIST-FORMAT>`_
for details on the format of an OpenSSL cipher list.
"""
class PasteGlobalConf(Setting):
name = "raw_paste_global_conf"
action = "append"
section = "Server Mechanics"
cli = ["--paste-global"]
meta = "CONF"
validator = validate_list_string
default = []
desc = """\
Set a PasteDeploy global config variable in ``key=value`` form.
The option can be specified multiple times.
The variables are passed to the the PasteDeploy entrypoint. Example::
$ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2
.. versionadded:: 19.7
"""
class StripHeaderSpaces(Setting):
name = "strip_header_spaces"
section = "Server Mechanics"
cli = ["--strip-header-spaces"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Strip spaces present between the header name and the the ``:``.
This is known to induce vulnerabilities and is not compliant with the HTTP/1.1 standard.
See https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn.
Use with care and only if necessary.
"""
| true | true |
f7220def58979cdc9b193f290962e4ce04a90b11 | 4,857 | py | Python | tests/functional/test_echeck_credit.py | isunnapud/vantiv-sdk-for-python | 85ea6ba160c2436a305b9326cbb7d6c8f127c57c | [
"MIT"
] | 4 | 2017-11-16T16:02:06.000Z | 2021-05-04T14:40:08.000Z | tests/functional/test_echeck_credit.py | isunnapud/vantiv-sdk-for-python | 85ea6ba160c2436a305b9326cbb7d6c8f127c57c | [
"MIT"
] | 7 | 2017-08-23T15:04:38.000Z | 2020-04-07T20:20:15.000Z | tests/functional/test_echeck_credit.py | isunnapud/vantiv-sdk-for-python | 85ea6ba160c2436a305b9326cbb7d6c8f127c57c | [
"MIT"
] | 13 | 2017-10-17T22:07:57.000Z | 2022-03-29T17:33:42.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Vantiv eCommerce
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the 'Software'), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import unittest
package_root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
sys.path.insert(0, package_root)
from vantivsdk import *
conf = utils.Configuration()
class TestEcheckCredit(unittest.TestCase):
def test_simple_echeck_credit(self):
transaction = fields.echeckCredit()
transaction.cnpTxnId = 123456789101112
transaction.amount = 12
transaction.secondaryAmount = 50
transaction.id = 'ThisIsID'
response = online.request(transaction, conf)
self.assertEquals('000', response['echeckCreditResponse']['response'])
self.assertEquals('sandbox', response['echeckCreditResponse']['location'])
def test_echeck_credit_with_echeck(self):
transaction = fields.echeckCredit()
transaction.reportGroup = 'Planets'
transaction.orderId = '12344'
transaction.amount = 106
transaction.orderSource = 'ecommerce'
transaction.id = 'ThisIsID'
echeck = fields.echeckType()
echeck.accNum = '12345657890'
echeck.routingNum = '123456789'
echeck.checkNum = '123455'
echeck.accType = 'Checking'
transaction.echeck = echeck
billtoaddress = fields.contact()
billtoaddress.firstName = 'Peter'
billtoaddress.lastName = 'Green'
billtoaddress.companyName = 'Green Co'
billtoaddress.phone = '999-999-9999'
transaction.billToAddress = billtoaddress
# Sandbox issue.
# response = online.request(transaction, conf)
# self.assertEquals('000', response['echeckCreditResponse']['response'])
def test_echeck_credit_with_token(self):
transaction = fields.echeckCredit()
transaction.reportGroup = 'Planets'
transaction.orderId = '12344'
transaction.amount = 106
transaction.orderSource = 'ecommerce'
transaction.id = 'ThisIsID'
token = fields.echeckTokenType()
token.cnpToken = '1234565789012'
token.routingNum = '123456789'
token.accType = 'Checking'
transaction.echeckToken = token
billtoaddress = fields.contact()
billtoaddress.firstName = 'Peter'
billtoaddress.lastName = 'Green'
billtoaddress.companyName = 'Green Co'
billtoaddress.phone = '999-999-9999'
transaction.billToAddress = billtoaddress
response = online.request(transaction, conf)
self.assertEquals('000', response['echeckCreditResponse']['response'])
self.assertEquals('sandbox', response['echeckCreditResponse']['location'])
def test_echeck_credit_with_echeck_and_secondary_amount(self):
transaction = fields.echeckCredit()
transaction.reportGroup = 'Planets'
transaction.orderId = '12344'
transaction.amount = 106
transaction.secondaryAmount = 50
transaction.orderSource = 'ecommerce'
transaction.id = 'ThisIsID'
echeck = fields.echeckType()
echeck.accNum = '12345657890'
echeck.routingNum = '123456789'
echeck.checkNum = '123455'
echeck.accType = 'Checking'
transaction.echeck = echeck
billtoaddress = fields.contact()
billtoaddress.firstName = 'Peter'
billtoaddress.lastName = 'Green'
billtoaddress.companyName = 'Green Co'
billtoaddress.phone = '999-999-9999'
transaction.billToAddress = billtoaddress
response = online.request(transaction, conf)
self.assertEquals('000', response['echeckCreditResponse']['response'])
self.assertEquals('sandbox', response['echeckCreditResponse']['location'])
if __name__ == '__main__':
unittest.main()
| 37.651163 | 91 | 0.690138 |
import os
import sys
import unittest
package_root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
sys.path.insert(0, package_root)
from vantivsdk import *
conf = utils.Configuration()
class TestEcheckCredit(unittest.TestCase):
def test_simple_echeck_credit(self):
transaction = fields.echeckCredit()
transaction.cnpTxnId = 123456789101112
transaction.amount = 12
transaction.secondaryAmount = 50
transaction.id = 'ThisIsID'
response = online.request(transaction, conf)
self.assertEquals('000', response['echeckCreditResponse']['response'])
self.assertEquals('sandbox', response['echeckCreditResponse']['location'])
def test_echeck_credit_with_echeck(self):
transaction = fields.echeckCredit()
transaction.reportGroup = 'Planets'
transaction.orderId = '12344'
transaction.amount = 106
transaction.orderSource = 'ecommerce'
transaction.id = 'ThisIsID'
echeck = fields.echeckType()
echeck.accNum = '12345657890'
echeck.routingNum = '123456789'
echeck.checkNum = '123455'
echeck.accType = 'Checking'
transaction.echeck = echeck
billtoaddress = fields.contact()
billtoaddress.firstName = 'Peter'
billtoaddress.lastName = 'Green'
billtoaddress.companyName = 'Green Co'
billtoaddress.phone = '999-999-9999'
transaction.billToAddress = billtoaddress
def test_echeck_credit_with_token(self):
transaction = fields.echeckCredit()
transaction.reportGroup = 'Planets'
transaction.orderId = '12344'
transaction.amount = 106
transaction.orderSource = 'ecommerce'
transaction.id = 'ThisIsID'
token = fields.echeckTokenType()
token.cnpToken = '1234565789012'
token.routingNum = '123456789'
token.accType = 'Checking'
transaction.echeckToken = token
billtoaddress = fields.contact()
billtoaddress.firstName = 'Peter'
billtoaddress.lastName = 'Green'
billtoaddress.companyName = 'Green Co'
billtoaddress.phone = '999-999-9999'
transaction.billToAddress = billtoaddress
response = online.request(transaction, conf)
self.assertEquals('000', response['echeckCreditResponse']['response'])
self.assertEquals('sandbox', response['echeckCreditResponse']['location'])
def test_echeck_credit_with_echeck_and_secondary_amount(self):
transaction = fields.echeckCredit()
transaction.reportGroup = 'Planets'
transaction.orderId = '12344'
transaction.amount = 106
transaction.secondaryAmount = 50
transaction.orderSource = 'ecommerce'
transaction.id = 'ThisIsID'
echeck = fields.echeckType()
echeck.accNum = '12345657890'
echeck.routingNum = '123456789'
echeck.checkNum = '123455'
echeck.accType = 'Checking'
transaction.echeck = echeck
billtoaddress = fields.contact()
billtoaddress.firstName = 'Peter'
billtoaddress.lastName = 'Green'
billtoaddress.companyName = 'Green Co'
billtoaddress.phone = '999-999-9999'
transaction.billToAddress = billtoaddress
response = online.request(transaction, conf)
self.assertEquals('000', response['echeckCreditResponse']['response'])
self.assertEquals('sandbox', response['echeckCreditResponse']['location'])
if __name__ == '__main__':
unittest.main()
| true | true |
f7220e84c9b8d8afe3e8a2e6bda57cd5127f2c56 | 8,471 | py | Python | tests/notifier/notifiers/gcs_violations_test.py | mcunha/forseti-security | cbf25f6173c1a25d4e43a9738eca73f927361cb8 | [
"Apache-2.0"
] | 1 | 2018-10-06T23:16:59.000Z | 2018-10-06T23:16:59.000Z | tests/notifier/notifiers/gcs_violations_test.py | mcunha/forseti-security | cbf25f6173c1a25d4e43a9738eca73f927361cb8 | [
"Apache-2.0"
] | null | null | null | tests/notifier/notifiers/gcs_violations_test.py | mcunha/forseti-security | cbf25f6173c1a25d4e43a9738eca73f927361cb8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the GCS Violations upload notifier."""
import mock
import unittest
from datetime import datetime
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers import base_notification
from google.cloud.forseti.notifier.notifiers import gcs_violations
from tests.notifier.notifiers.test_data import fake_violations
from tests.unittest_utils import ForsetiTestCase
class GcsViolationsnotifierTest(ForsetiTestCase):
"""Tests for gcs_violations_notifier."""
def setUp(self):
"""Setup."""
self.fake_utcnow = datetime(
year=1900, month=1, day=1, hour=0, minute=0, second=0,
microsecond=0)
self.fake_global_conf = {
'db_host': 'x',
'db_name': 'y',
'db_user': 'z',
}
self.fake_notifier_conf = {
'gcs_path': 'gs://blah'
}
@mock.patch(
'google.cloud.forseti.notifier.notifiers.base_notification.date_time',
autospec=True)
def test_get_output_filename(self, mock_date_time):
"""Test_get_output_filename()."""
mock_date_time.get_utc_now_datetime = mock.MagicMock()
mock_date_time.get_utc_now_datetime.return_value = self.fake_utcnow
expected_timestamp = self.fake_utcnow.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
actual_filename = gvp._get_output_filename(
string_formats.VIOLATION_CSV_FMT)
self.assertEquals(
string_formats.VIOLATION_CSV_FMT.format(
gvp.resource, gvp.inventory_index_id, expected_timestamp),
actual_filename)
@mock.patch(
'google.cloud.forseti.notifier.notifiers.base_notification.date_time',
autospec=True)
def test_get_output_filename_with_json(self, mock_date_time):
"""Test _get_output_filename()."""
mock_date_time.get_utc_now_datetime = mock.MagicMock()
mock_date_time.get_utc_now_datetime.return_value = self.fake_utcnow
expected_timestamp = self.fake_utcnow.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
actual_filename = gvp._get_output_filename(
string_formats.VIOLATION_JSON_FMT)
self.assertEquals(
string_formats.VIOLATION_JSON_FMT.format(
gvp.resource, gvp.inventory_index_id, expected_timestamp),
actual_filename)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.os')
def test_run(self, mock_os, mock_tempfile, mock_storage):
"""Test run()."""
fake_tmpname = 'tmp_name'
fake_output_name = 'abc'
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
gvp._get_output_filename = mock.MagicMock(return_value=fake_output_name)
gcs_path = '{}/{}'.format(
gvp.notification_config['gcs_path'], fake_output_name)
mock_tmp_csv = mock.MagicMock()
mock_tempfile.return_value = mock_tmp_csv
mock_tmp_csv.name = fake_tmpname
mock_tmp_csv.write = mock.MagicMock()
gvp.run()
mock_tmp_csv.write.assert_called()
mock_storage.return_value.put_text_file.assert_called_once_with(
fake_tmpname, gcs_path)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_json(self, mock_write_csv, mock_json_stringify,
mock_storage):
"""Test run() with json file format."""
notifier_config = fake_violations.NOTIFIER_CONFIGS_GCS_JSON
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
mock_json_stringify.return_value = 'test123'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
gvp.run()
self.assertTrue(gvp._get_output_filename.called)
self.assertEquals(
string_formats.VIOLATION_JSON_FMT,
gvp._get_output_filename.call_args[0][0])
self.assertFalse(mock_write_csv.called)
self.assertTrue(mock_json_stringify.called)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_csv(self, mock_csv_writer, mock_parser, mock_storage):
"""Test run() with default file format (CSV)."""
notifier_config = fake_violations.NOTIFIER_CONFIGS_GCS_DEFAULT
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
gvp.run()
self.assertTrue(gvp._get_output_filename.called)
self.assertEquals(
string_formats.VIOLATION_CSV_FMT,
gvp._get_output_filename.call_args[0][0])
self.assertTrue(mock_csv_writer.called)
self.assertFalse(mock_parser.called)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_invalid_data_format(self, mock_write_csv,
mock_json_stringify, mock_storage):
"""Test run() with json file format."""
notifier_config = (
fake_violations.NOTIFIER_CONFIGS_GCS_INVALID_DATA_FORMAT)
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
mock_json_stringify.return_value = 'test123'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
with self.assertRaises(base_notification.InvalidDataFormatError):
gvp.run()
self.assertFalse(gvp._get_output_filename.called)
self.assertFalse(mock_write_csv.called)
self.assertFalse(mock_json_stringify.called)
if __name__ == '__main__':
unittest.main()
| 38.330317 | 94 | 0.673356 |
import mock
import unittest
from datetime import datetime
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers import base_notification
from google.cloud.forseti.notifier.notifiers import gcs_violations
from tests.notifier.notifiers.test_data import fake_violations
from tests.unittest_utils import ForsetiTestCase
class GcsViolationsnotifierTest(ForsetiTestCase):
def setUp(self):
self.fake_utcnow = datetime(
year=1900, month=1, day=1, hour=0, minute=0, second=0,
microsecond=0)
self.fake_global_conf = {
'db_host': 'x',
'db_name': 'y',
'db_user': 'z',
}
self.fake_notifier_conf = {
'gcs_path': 'gs://blah'
}
@mock.patch(
'google.cloud.forseti.notifier.notifiers.base_notification.date_time',
autospec=True)
def test_get_output_filename(self, mock_date_time):
mock_date_time.get_utc_now_datetime = mock.MagicMock()
mock_date_time.get_utc_now_datetime.return_value = self.fake_utcnow
expected_timestamp = self.fake_utcnow.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
actual_filename = gvp._get_output_filename(
string_formats.VIOLATION_CSV_FMT)
self.assertEquals(
string_formats.VIOLATION_CSV_FMT.format(
gvp.resource, gvp.inventory_index_id, expected_timestamp),
actual_filename)
@mock.patch(
'google.cloud.forseti.notifier.notifiers.base_notification.date_time',
autospec=True)
def test_get_output_filename_with_json(self, mock_date_time):
mock_date_time.get_utc_now_datetime = mock.MagicMock()
mock_date_time.get_utc_now_datetime.return_value = self.fake_utcnow
expected_timestamp = self.fake_utcnow.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
actual_filename = gvp._get_output_filename(
string_formats.VIOLATION_JSON_FMT)
self.assertEquals(
string_formats.VIOLATION_JSON_FMT.format(
gvp.resource, gvp.inventory_index_id, expected_timestamp),
actual_filename)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.os')
def test_run(self, mock_os, mock_tempfile, mock_storage):
fake_tmpname = 'tmp_name'
fake_output_name = 'abc'
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
gvp._get_output_filename = mock.MagicMock(return_value=fake_output_name)
gcs_path = '{}/{}'.format(
gvp.notification_config['gcs_path'], fake_output_name)
mock_tmp_csv = mock.MagicMock()
mock_tempfile.return_value = mock_tmp_csv
mock_tmp_csv.name = fake_tmpname
mock_tmp_csv.write = mock.MagicMock()
gvp.run()
mock_tmp_csv.write.assert_called()
mock_storage.return_value.put_text_file.assert_called_once_with(
fake_tmpname, gcs_path)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_json(self, mock_write_csv, mock_json_stringify,
mock_storage):
notifier_config = fake_violations.NOTIFIER_CONFIGS_GCS_JSON
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
mock_json_stringify.return_value = 'test123'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
gvp.run()
self.assertTrue(gvp._get_output_filename.called)
self.assertEquals(
string_formats.VIOLATION_JSON_FMT,
gvp._get_output_filename.call_args[0][0])
self.assertFalse(mock_write_csv.called)
self.assertTrue(mock_json_stringify.called)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_csv(self, mock_csv_writer, mock_parser, mock_storage):
notifier_config = fake_violations.NOTIFIER_CONFIGS_GCS_DEFAULT
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
gvp.run()
self.assertTrue(gvp._get_output_filename.called)
self.assertEquals(
string_formats.VIOLATION_CSV_FMT,
gvp._get_output_filename.call_args[0][0])
self.assertTrue(mock_csv_writer.called)
self.assertFalse(mock_parser.called)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_invalid_data_format(self, mock_write_csv,
mock_json_stringify, mock_storage):
notifier_config = (
fake_violations.NOTIFIER_CONFIGS_GCS_INVALID_DATA_FORMAT)
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
mock_json_stringify.return_value = 'test123'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
with self.assertRaises(base_notification.InvalidDataFormatError):
gvp.run()
self.assertFalse(gvp._get_output_filename.called)
self.assertFalse(mock_write_csv.called)
self.assertFalse(mock_json_stringify.called)
if __name__ == '__main__':
unittest.main()
| true | true |
f7220ea8c6b758df8275d973e8e49958e05eaf25 | 8,061 | py | Python | srcgen/tests/test_c.py | maedoc/inducer-f2py | fce51e6603f7474632d452437dbb8b194d6c879d | [
"BSD-3-Clause"
] | 57 | 2015-10-20T15:58:29.000Z | 2022-03-19T08:52:04.000Z | srcgen/tests/test_c.py | maedoc/inducer-f2py | fce51e6603f7474632d452437dbb8b194d6c879d | [
"BSD-3-Clause"
] | 12 | 2016-03-14T16:02:42.000Z | 2022-03-23T11:15:50.000Z | srcgen/tests/test_c.py | maedoc/inducer-f2py | fce51e6603f7474632d452437dbb8b194d6c879d | [
"BSD-3-Clause"
] | 12 | 2015-12-30T15:41:23.000Z | 2022-03-01T17:14:42.000Z | from __future__ import absolute_import
import sys
import difflib
from six.moves import range
def equal_strings(text1, text2, _cache=set()):
if text1==text2:
return True
d = difflib.Differ()
l = list(d.compare(text1.splitlines(1), text2.splitlines(1)))
d = {}
for i in range(len(l)):
if l[i][0]!=' ':
for j in range(i-2, i+3):
if not 0<=j<len(l):
continue
if j in d:
continue
d[j] = '%03d: %r\n' % (j, l[j])
if (text1, text2) in _cache:
return False
_cache.add((text1, text2))
l = [v for i,v in sorted(d.items())]
sys.stdout.flush()
sys.stderr.write('\n')
sys.stderr.writelines(l)
sys.stderr.flush()
return False
from srcgen.basic import Line, Block
from srcgen.c import *
def test_include():
h = Include('hey.h')
h1 = Include('hey.h', is_std_header=True)
assert equal_strings(h.realize(), '#include "hey.h"')
assert h1.realize()=='#include <hey.h>'
def test_typedef_enum():
s = TypeDefEnum('foo')
assert equal_strings(s.realize(), '''typedef enum {\n} foo''')
s.add('A')
assert equal_strings(s.realize(), '''\
typedef enum {
A
} foo''')
s.add(Declarator('B', '2'))
s.add('C')
assert equal_strings(s.realize(), '''\
typedef enum {
A,
B = 2,
C
} foo''')
def test_typedef_struct():
s = TypeDefStruct('foo')
assert equal_strings(s.realize(), '''typedef struct {\n} foo''')
s.add(Declaration('a', TypeSpec('long')))
assert equal_strings(s.realize(), '''\
typedef struct {
long a;
} foo''')
s.add('b')
assert equal_strings(s.realize(), '''\
typedef struct {
long a;
int b;
} foo''')
assert equal_strings(s.get_view('type_spec'),'foo')
assert equal_strings(s.get_view('short_type_spec'),'Sli_')
s = TypeDefStruct(None)
assert equal_strings(s.realize(), '''typedef struct {\n} S__type''')
s.add(Declaration('a', TypeSpec('long')))
assert equal_strings(s.realize(), '''\
typedef struct {
long a;
} Sl__type''')
def test_typedef_function():
s = TypeDefFunction('foo')
assert equal_strings(s.realize(), 'typedef void (*foo)(void)')
assert equal_strings(s.get_view('short_type_spec'), 'F_')
s.add('int')
assert equal_strings(s.realize(), 'typedef int (*foo)(void)')
assert equal_strings(s.get_view('short_type_spec'), 'Fi_')
s.add('long')
assert equal_strings(s.realize(), 'typedef int (*foo)(long)')
assert equal_strings(s.get_view('short_type_spec'), 'Fil_')
s.add('float')
assert equal_strings(s.realize(), 'typedef int (*foo)(long, float)')
assert equal_strings(s.get_view('short_type_spec'), 'Filf_')
s = TypeDefFunction(None)
assert equal_strings(s.realize(), 'typedef void (*F__type)(void)')
s.add('int')
assert equal_strings(s.realize(), 'typedef int (*Fi__type)(void)')
def test_typedef_typespec():
s = TypeDefTypeSpec('foo')
assert equal_strings(s.realize(), 'typedef int foo')
assert equal_strings(s.get_view('short_type_spec'), 'Ti_')
s.save()
s.add('long')
assert equal_strings(s.realize(), 'typedef long foo')
assert equal_strings(s.get_view('short_type_spec'), 'Tl_')
s.restore()
s.add('long**')
assert equal_strings(s.realize(), 'typedef long** foo')
assert equal_strings(s.get_view('short_type_spec'), 'Tppl_')
s.restore()
s.add('double[2][3]')
assert equal_strings(s.realize(), 'typedef double[2][3] foo')
assert equal_strings(s.get_view('short_type_spec'), 'Td2j3_')
s = TypeDefTypeSpec(None)
assert equal_strings(s.realize(), 'typedef int Ti__type')
assert equal_strings(s.get_view('short_type_spec'), 'Ti_')
s.save()
s.add('long*')
assert equal_strings(s.realize(), 'typedef long* Tpl__type')
def test_scalar_declarator():
d = Declarator('a')
assert d.realize()=='a'
d.add('2')
assert d.realize()=='a = 2'
def test_string_declarator():
d = Declarator('a', is_string=True)
l = Line('2')
assert d.realize()=='a'
d.add(l)
assert d.realize()=='a[] = "2"'
l.add('b')
assert d.realize()=='a[] = "2b"'
d.add('hey')
assert d.realize()=='a[] = "2b\\n"\n"hey"'
def test_seq_declarator():
d = Declarator('a', is_scalar=False)
assert d.realize()=='a'
d.add('2')
d.add('3')
assert d.realize()=='a = {2, 3}'
def test_type_spec():
t = TypeSpec('int')
assert t.realize()=='int'
assert equal_strings(t.short_type_spec,'i')
assert equal_strings(TypeSpec('long**').short_type_spec,'ppl')
assert equal_strings(TypeSpec('float[2]').short_type_spec,'f2')
assert equal_strings(TypeSpec('float[2][34]').short_type_spec,'f2j34')
def test_declaration():
d = Declaration('a')
assert d.realize()=='int a'
d.add('b')
assert d.realize()=='int a, b'
d.add(Declarator('c', '3'))
assert equal_strings(d.realize(), 'int a, b, c = 3')
def test_function():
f = Function('foo')
assert equal_strings(f.realize(),"""\
void
foo(void) {
}""")
f.add(Argument('a'))
assert equal_strings(f.realize(),"""\
void
foo(int a) {
}""")
f.add(Argument('b', TypeSpec('long')))
assert equal_strings(f.realize(),"""\
void
foo(int a, long b) {
}""")
f.add(Variable('c', TypeSpec('double'), 'd'))
assert equal_strings(f.realize(),"""\
void
foo(int a, long b) {
double c, d;
}""")
f.add('c = 2;')
assert equal_strings(f.realize(),"""\
void
foo(int a, long b) {
double c, d;
c = 2;
}""")
f.add('{\n d = 3;\n ;\n}')
assert equal_strings(f.realize(),"""\
void
foo(int a, long b) {
double c, d;
c = 2;
{
d = 3;
;
}
}""")
def test_csource():
s = SourceFile('foo.c')
s.save()
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
''')
s.add(Include('foo.h'))
s.add(Include('foo.h'))
s.add(Include('string.h'))
assert equal_strings(s.get_view('string'), '''\
#ifdef __cplusplus
extern "C" {
#endif
/* Includes */
#include "foo.h"
#include <string.h>
#ifdef __cplusplus
}
#endif
''')
s.restore()
f = Function('foo')
s.add(f)
assert equal_strings(s.get_view('string'), '''\
#ifdef __cplusplus
extern "C" {
#endif
/* ProtoTypes */
void foo(void);
/* Definitions */
void
foo(void) {
}
#ifdef __cplusplus
}
#endif
''')
s.add(f)
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
/* ProtoTypes */
void foo(void);
/* Definitions */
void
foo(void) {
}
#ifdef __cplusplus
}
#endif
''')
s.restore()
s.add(f)
f2 = Function('bar',TypeSpec('int'), Argument('a', TypeSpec('float')))
f2.add(Variable(Declarator('c','2'), TypeSpec('double')))
s.add(f2)
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
/* ProtoTypes */
void foo(void);
int bar(float);
/* Definitions */
void
foo(void) {
}
int
bar(float a) {
double c = 2;
}
#ifdef __cplusplus
}
#endif
''')
s.restore()
foo = TypeDefStruct('foo',
Declaration('a', TypeSpec('double')),
Declaration('b', TypeSpec('double')))
s.add(foo)
bar = TypeDefStruct('bar',
Declaration('c', TypeSpec('foo*')),
Declaration('d', TypeSpec('int')),
)
s.add(bar)
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
/* TypeDefs */
typedef struct {
double a;
double b;
} foo;
typedef struct {
foo* c;
int d;
} bar;
#ifdef __cplusplus
}
#endif
''')
assert equal_strings(bar.get_view('type_spec'), 'bar')
assert equal_strings(bar.get_view('short_type_spec', s), 'SpSdd_i_')
s.restore()
s.add(Declaration('a', Keyword('static'), TypeSpec('double*'), Keyword('static')))
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
/* Definitions */
static double* a;
#ifdef __cplusplus
}
#endif
''')
| 22.835694 | 86 | 0.602283 | from __future__ import absolute_import
import sys
import difflib
from six.moves import range
def equal_strings(text1, text2, _cache=set()):
if text1==text2:
return True
d = difflib.Differ()
l = list(d.compare(text1.splitlines(1), text2.splitlines(1)))
d = {}
for i in range(len(l)):
if l[i][0]!=' ':
for j in range(i-2, i+3):
if not 0<=j<len(l):
continue
if j in d:
continue
d[j] = '%03d: %r\n' % (j, l[j])
if (text1, text2) in _cache:
return False
_cache.add((text1, text2))
l = [v for i,v in sorted(d.items())]
sys.stdout.flush()
sys.stderr.write('\n')
sys.stderr.writelines(l)
sys.stderr.flush()
return False
from srcgen.basic import Line, Block
from srcgen.c import *
def test_include():
h = Include('hey.h')
h1 = Include('hey.h', is_std_header=True)
assert equal_strings(h.realize(), '#include "hey.h"')
assert h1.realize()=='#include <hey.h>'
def test_typedef_enum():
s = TypeDefEnum('foo')
assert equal_strings(s.realize(), '''typedef enum {\n} foo''')
s.add('A')
assert equal_strings(s.realize(), '''\
typedef enum {
A
} foo''')
s.add(Declarator('B', '2'))
s.add('C')
assert equal_strings(s.realize(), '''\
typedef enum {
A,
B = 2,
C
} foo''')
def test_typedef_struct():
s = TypeDefStruct('foo')
assert equal_strings(s.realize(), '''typedef struct {\n} foo''')
s.add(Declaration('a', TypeSpec('long')))
assert equal_strings(s.realize(), '''\
typedef struct {
long a;
} foo''')
s.add('b')
assert equal_strings(s.realize(), '''\
typedef struct {
long a;
int b;
} foo''')
assert equal_strings(s.get_view('type_spec'),'foo')
assert equal_strings(s.get_view('short_type_spec'),'Sli_')
s = TypeDefStruct(None)
assert equal_strings(s.realize(), '''typedef struct {\n} S__type''')
s.add(Declaration('a', TypeSpec('long')))
assert equal_strings(s.realize(), '''\
typedef struct {
long a;
} Sl__type''')
def test_typedef_function():
s = TypeDefFunction('foo')
assert equal_strings(s.realize(), 'typedef void (*foo)(void)')
assert equal_strings(s.get_view('short_type_spec'), 'F_')
s.add('int')
assert equal_strings(s.realize(), 'typedef int (*foo)(void)')
assert equal_strings(s.get_view('short_type_spec'), 'Fi_')
s.add('long')
assert equal_strings(s.realize(), 'typedef int (*foo)(long)')
assert equal_strings(s.get_view('short_type_spec'), 'Fil_')
s.add('float')
assert equal_strings(s.realize(), 'typedef int (*foo)(long, float)')
assert equal_strings(s.get_view('short_type_spec'), 'Filf_')
s = TypeDefFunction(None)
assert equal_strings(s.realize(), 'typedef void (*F__type)(void)')
s.add('int')
assert equal_strings(s.realize(), 'typedef int (*Fi__type)(void)')
def test_typedef_typespec():
s = TypeDefTypeSpec('foo')
assert equal_strings(s.realize(), 'typedef int foo')
assert equal_strings(s.get_view('short_type_spec'), 'Ti_')
s.save()
s.add('long')
assert equal_strings(s.realize(), 'typedef long foo')
assert equal_strings(s.get_view('short_type_spec'), 'Tl_')
s.restore()
s.add('long**')
assert equal_strings(s.realize(), 'typedef long** foo')
assert equal_strings(s.get_view('short_type_spec'), 'Tppl_')
s.restore()
s.add('double[2][3]')
assert equal_strings(s.realize(), 'typedef double[2][3] foo')
assert equal_strings(s.get_view('short_type_spec'), 'Td2j3_')
s = TypeDefTypeSpec(None)
assert equal_strings(s.realize(), 'typedef int Ti__type')
assert equal_strings(s.get_view('short_type_spec'), 'Ti_')
s.save()
s.add('long*')
assert equal_strings(s.realize(), 'typedef long* Tpl__type')
def test_scalar_declarator():
d = Declarator('a')
assert d.realize()=='a'
d.add('2')
assert d.realize()=='a = 2'
def test_string_declarator():
d = Declarator('a', is_string=True)
l = Line('2')
assert d.realize()=='a'
d.add(l)
assert d.realize()=='a[] = "2"'
l.add('b')
assert d.realize()=='a[] = "2b"'
d.add('hey')
assert d.realize()=='a[] = "2b\\n"\n"hey"'
def test_seq_declarator():
d = Declarator('a', is_scalar=False)
assert d.realize()=='a'
d.add('2')
d.add('3')
assert d.realize()=='a = {2, 3}'
def test_type_spec():
t = TypeSpec('int')
assert t.realize()=='int'
assert equal_strings(t.short_type_spec,'i')
assert equal_strings(TypeSpec('long**').short_type_spec,'ppl')
assert equal_strings(TypeSpec('float[2]').short_type_spec,'f2')
assert equal_strings(TypeSpec('float[2][34]').short_type_spec,'f2j34')
def test_declaration():
d = Declaration('a')
assert d.realize()=='int a'
d.add('b')
assert d.realize()=='int a, b'
d.add(Declarator('c', '3'))
assert equal_strings(d.realize(), 'int a, b, c = 3')
def test_function():
f = Function('foo')
assert equal_strings(f.realize(),"""\
void
foo(void) {
}""")
f.add(Argument('a'))
assert equal_strings(f.realize(),"""\
void
foo(int a) {
}""")
f.add(Argument('b', TypeSpec('long')))
assert equal_strings(f.realize(),"""\
void
foo(int a, long b) {
}""")
f.add(Variable('c', TypeSpec('double'), 'd'))
assert equal_strings(f.realize(),"""\
void
foo(int a, long b) {
double c, d;
}""")
f.add('c = 2;')
assert equal_strings(f.realize(),"""\
void
foo(int a, long b) {
double c, d;
c = 2;
}""")
f.add('{\n d = 3;\n ;\n}')
assert equal_strings(f.realize(),"""\
void
foo(int a, long b) {
double c, d;
c = 2;
{
d = 3;
;
}
}""")
def test_csource():
s = SourceFile('foo.c')
s.save()
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
''')
s.add(Include('foo.h'))
s.add(Include('foo.h'))
s.add(Include('string.h'))
assert equal_strings(s.get_view('string'), '''\
#ifdef __cplusplus
extern "C" {
#endif
/* Includes */
#include "foo.h"
#include <string.h>
#ifdef __cplusplus
}
#endif
''')
s.restore()
f = Function('foo')
s.add(f)
assert equal_strings(s.get_view('string'), '''\
#ifdef __cplusplus
extern "C" {
#endif
/* ProtoTypes */
void foo(void);
/* Definitions */
void
foo(void) {
}
#ifdef __cplusplus
}
#endif
''')
s.add(f)
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
/* ProtoTypes */
void foo(void);
/* Definitions */
void
foo(void) {
}
#ifdef __cplusplus
}
#endif
''')
s.restore()
s.add(f)
f2 = Function('bar',TypeSpec('int'), Argument('a', TypeSpec('float')))
f2.add(Variable(Declarator('c','2'), TypeSpec('double')))
s.add(f2)
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
/* ProtoTypes */
void foo(void);
int bar(float);
/* Definitions */
void
foo(void) {
}
int
bar(float a) {
double c = 2;
}
#ifdef __cplusplus
}
#endif
''')
s.restore()
foo = TypeDefStruct('foo',
Declaration('a', TypeSpec('double')),
Declaration('b', TypeSpec('double')))
s.add(foo)
bar = TypeDefStruct('bar',
Declaration('c', TypeSpec('foo*')),
Declaration('d', TypeSpec('int')),
)
s.add(bar)
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
/* TypeDefs */
typedef struct {
double a;
double b;
} foo;
typedef struct {
foo* c;
int d;
} bar;
#ifdef __cplusplus
}
#endif
''')
assert equal_strings(bar.get_view('type_spec'), 'bar')
assert equal_strings(bar.get_view('short_type_spec', s), 'SpSdd_i_')
s.restore()
s.add(Declaration('a', Keyword('static'), TypeSpec('double*'), Keyword('static')))
assert equal_strings(s.get_view('string'),'''\
#ifdef __cplusplus
extern "C" {
#endif
/* Definitions */
static double* a;
#ifdef __cplusplus
}
#endif
''')
| true | true |
f7220f35f4559a6f1844d9fe02d22a0e119ad531 | 11,625 | py | Python | tests/charmhelpers/contrib/openstack/amulet/deployment.py | ChrisMacNaughton/layer-ceph-mon | b21bc310d76f093c8496a2bf0340133c07a72270 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/charmhelpers/contrib/openstack/amulet/deployment.py | ChrisMacNaughton/layer-ceph-mon | b21bc310d76f093c8496a2bf0340133c07a72270 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/charmhelpers/contrib/openstack/amulet/deployment.py | ChrisMacNaughton/layer-ceph-mon | b21bc310d76f093c8496a2bf0340133c07a72270 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the ~openstack-charmers
base_charms = {
'mysql': ['precise', 'trusty'],
'mongodb': ['precise', 'trusty'],
'nrpe': ['precise', 'trusty'],
}
for svc in other_services:
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if svc['name'] in base_charms:
# NOTE: not all charms have support for all series we
# want/need to test against, so fix to most recent
# that each base charm supports
target_series = self.series
if self.series not in base_charms[svc['name']]:
target_series = base_charms[svc['name']][-1]
svc['location'] = 'cs:{}/{}'.format(target_series,
svc['name'])
elif self.stable:
svc['location'] = 'cs:{}/{}'.format(self.series,
svc['name'])
else:
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
self.series,
svc['name']
)
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
# Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=1800):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
self.log.info('Waiting for extended status on units...')
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty, self.trusty_mitaka,
self.xenial_mitaka) = range(14)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty,
('xenial', None): self.xenial_mitaka}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier
pools = [
'data',
'metadata',
'rbd',
'cinder',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools
| 39.273649 | 79 | 0.578409 |
import logging
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
self.log.info('OpenStackAmuletDeployment: determine branch locations')
base_charms = {
'mysql': ['precise', 'trusty'],
'mongodb': ['precise', 'trusty'],
'nrpe': ['precise', 'trusty'],
}
for svc in other_services:
if svc.get('location'):
continue
if svc['name'] in base_charms:
target_series = self.series
if self.series not in base_charms[svc['name']]:
target_series = base_charms[svc['name']][-1]
svc['location'] = 'cs:{}/{}'.format(target_series,
svc['name'])
elif self.stable:
svc['location'] = 'cs:{}/{}'.format(self.series,
svc['name'])
else:
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
self.series,
svc['name']
)
return other_services
def _add_services(self, this_service, other_services):
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=1800):
self.log.info('Waiting for extended status on units...')
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty, self.trusty_mitaka,
self.xenial_mitaka) = range(14)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty,
('xenial', None): self.xenial_mitaka}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
releases = OrderedDict([
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_ceph_expected_pools(self, radosgw=False):
if self._get_openstack_release() >= self.trusty_kilo:
pools = [
'rbd',
'cinder',
'glance'
]
else:
pools = [
'data',
'metadata',
'rbd',
'cinder',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools
| true | true |
f722121f430d09ff4c11053e6f33d6dc7fe709f8 | 2,694 | py | Python | keras/integration_test/preprocessing_applied_in_dataset_test.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | keras/integration_test/preprocessing_applied_in_dataset_test.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | keras/integration_test/preprocessing_applied_in_dataset_test.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demonstrate Keras preprocessing layers applied in tf.data.Dataset.map."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.integration_test import preprocessing_test_utils as utils
ds_combinations = tf.__internal__.distribute.combinations
multi_process_runner = tf.__internal__.distribute.multi_process_runner
test_combinations = tf.__internal__.test.combinations
# Note: Strategy combinations are not (yet) public APIs, so they are subject
# to API changes and backward-compatibility is not guaranteed. Note that we
# skip parameter server strategy here, as parameter server strategy requires
# a DatasetCreator when training on a tf.data.Dataset.
STRATEGIES = [
ds_combinations.default_strategy,
ds_combinations.mirrored_strategy_with_cpu_1_and_2,
ds_combinations.mirrored_strategy_with_two_gpus,
ds_combinations.tpu_strategy,
ds_combinations.cloud_tpu_strategy,
ds_combinations.multi_worker_mirrored_2x1_cpu,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]
@ds_combinations.generate(
test_combinations.combine(strategy=STRATEGIES, mode="eager")
)
class PreprocessingAppliedInDatasetTest(tf.test.TestCase):
"""Demonstrate Keras preprocessing layers applied in tf.data.Dataset.map."""
def testDistributedModelFit(self, strategy):
with strategy.scope():
preprocessing_model = utils.make_preprocessing_model(
self.get_temp_dir()
)
training_model = utils.make_training_model()
training_model.compile(optimizer="sgd", loss="binary_crossentropy")
dataset = utils.make_dataset()
dataset = dataset.batch(utils.BATCH_SIZE)
dataset = dataset.map(lambda x, y: (preprocessing_model(x), y))
training_model.fit(dataset, epochs=2)
if __name__ == "__main__":
multi_process_runner.test_main()
| 40.818182 | 80 | 0.749814 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.integration_test import preprocessing_test_utils as utils
ds_combinations = tf.__internal__.distribute.combinations
multi_process_runner = tf.__internal__.distribute.multi_process_runner
test_combinations = tf.__internal__.test.combinations
STRATEGIES = [
ds_combinations.default_strategy,
ds_combinations.mirrored_strategy_with_cpu_1_and_2,
ds_combinations.mirrored_strategy_with_two_gpus,
ds_combinations.tpu_strategy,
ds_combinations.cloud_tpu_strategy,
ds_combinations.multi_worker_mirrored_2x1_cpu,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]
@ds_combinations.generate(
test_combinations.combine(strategy=STRATEGIES, mode="eager")
)
class PreprocessingAppliedInDatasetTest(tf.test.TestCase):
def testDistributedModelFit(self, strategy):
with strategy.scope():
preprocessing_model = utils.make_preprocessing_model(
self.get_temp_dir()
)
training_model = utils.make_training_model()
training_model.compile(optimizer="sgd", loss="binary_crossentropy")
dataset = utils.make_dataset()
dataset = dataset.batch(utils.BATCH_SIZE)
dataset = dataset.map(lambda x, y: (preprocessing_model(x), y))
training_model.fit(dataset, epochs=2)
if __name__ == "__main__":
multi_process_runner.test_main()
| true | true |
f72212bba8f6ba0a4c14cf6dd8b08907a1f2e61c | 3,187 | py | Python | pentest-tool/pentest/wordlist.py | Micr067/pentestdb | 6aa06e1406589567d51ab63a88bfe47416e906e9 | [
"Apache-2.0"
] | 686 | 2016-02-06T15:11:12.000Z | 2022-03-30T10:55:29.000Z | pentest-tool/pentest/wordlist.py | WinDyXuu/pentestdb | 6aa06e1406589567d51ab63a88bfe47416e906e9 | [
"Apache-2.0"
] | 6 | 2016-08-14T15:13:31.000Z | 2020-03-03T14:01:28.000Z | pentest-tool/pentest/wordlist.py | WinDyXuu/pentestdb | 6aa06e1406589567d51ab63a88bfe47416e906e9 | [
"Apache-2.0"
] | 284 | 2015-12-19T07:42:05.000Z | 2022-03-13T11:58:38.000Z | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Pentestdb, a database for penetration test.
Copyright (c) 2015 alpha1e0
=====================================================================
字典管理。将字典加入到数据库中去重、对重复项加权打分;从数据库导出字典文件
'''
import os
import argparse
from libs.commons import Output
from libs.commons import WordList
from libs.commons import PenError
from libs.orm import Model, DBManage
from libs.orm import StringField
from libs.orm import IntegerField
class WordListModel(Model):
_table = "wordlist"
_database = ""
word = StringField(primarykey=True,notnull=True,ddl="vchar(64)",vrange="1-64")
score = IntegerField(notnull=True,ddl="integer")
class WordListManage(object):
def __init__(self, dbfile):
self.dbfile = dbfile
WordListModel._database = self.dbfile
def dump(self, size, outFile):
'''
从数据库中导出字典文件
'''
if not os.path.exists(self.dbfile):
raise PenError("Wordlist file '{0}' dose not exists".format(self.dbfile))
result = WordListModel.orderby("score",desc=True).limit(int(size)).getsraw("word")
with open(outFile, "w") as _file:
for row in result:
try:
_file.write(row['word']+"\n")
except UnicodeEncodeError:
continue
def _insertLine(self, line):
queryResult = WordListModel.where(word=line).getsraw()
if queryResult:
WordListModel.where(word=line).update(score=queryResult[0]['score']+1)
else:
WordListModel.insert(word=line,score=1)
def load(self, dictFile):
'''
导入字典文件到数据库
'''
if not os.path.exists(self.dbfile):
raise PenError("Wordlist file '{0}' dose not exists".format(self.dbfile))
for line in WordList(dictFile):
self._insertLine(line.strip())
def createDB(self):
'''
创建数据库
'''
WordListModel.create()
def main():
dbparse = argparse.ArgumentParser(description=u"字典数据库处理: 字典导入到数据库,数据库导出字典")
dbparse.add_argument("database", help=u"指定数据库文件")
dbparse.add_argument("-d", "--dump", help=u"从数据库导出字典文件")
dbparse.add_argument("-s", "--size", type=int, help=u"指定导出字典文件的大小")
dbparse.add_argument("-l", "--load", help=u"将指定的字典文件导入数据库")
dbparse.add_argument("--create", action="store_true", help=u"创建数据库")
args = dbparse.parse_args()
try:
dbmanage = WordListManage(args.database)
with Output(u"字典管理") as out:
if args.dump:
size = args.size if args.size else 1000
dbmanage.dump(size, args.dump)
out.yellow(u"生成字典文件'{0}'成功".format(args.dump))
elif args.load:
dbmanage.load(args.load)
out.yellow(u"字典数据库'{0}'更新成功".format(args.database))
if args.create:
dbmanage.createDB()
out.yellow(u"创建数据库'{0}'成功".format(dbmanage.dbfile))
except PenError as error:
Output.error(str(error))
except Exception as error:
Output.error(u"未知错误,{0}".format(str(error)))
if __name__ == "__main__":
main()
| 28.20354 | 90 | 0.595858 |
import os
import argparse
from libs.commons import Output
from libs.commons import WordList
from libs.commons import PenError
from libs.orm import Model, DBManage
from libs.orm import StringField
from libs.orm import IntegerField
class WordListModel(Model):
_table = "wordlist"
_database = ""
word = StringField(primarykey=True,notnull=True,ddl="vchar(64)",vrange="1-64")
score = IntegerField(notnull=True,ddl="integer")
class WordListManage(object):
def __init__(self, dbfile):
self.dbfile = dbfile
WordListModel._database = self.dbfile
def dump(self, size, outFile):
if not os.path.exists(self.dbfile):
raise PenError("Wordlist file '{0}' dose not exists".format(self.dbfile))
result = WordListModel.orderby("score",desc=True).limit(int(size)).getsraw("word")
with open(outFile, "w") as _file:
for row in result:
try:
_file.write(row['word']+"\n")
except UnicodeEncodeError:
continue
def _insertLine(self, line):
queryResult = WordListModel.where(word=line).getsraw()
if queryResult:
WordListModel.where(word=line).update(score=queryResult[0]['score']+1)
else:
WordListModel.insert(word=line,score=1)
def load(self, dictFile):
if not os.path.exists(self.dbfile):
raise PenError("Wordlist file '{0}' dose not exists".format(self.dbfile))
for line in WordList(dictFile):
self._insertLine(line.strip())
def createDB(self):
WordListModel.create()
def main():
dbparse = argparse.ArgumentParser(description=u"字典数据库处理: 字典导入到数据库,数据库导出字典")
dbparse.add_argument("database", help=u"指定数据库文件")
dbparse.add_argument("-d", "--dump", help=u"从数据库导出字典文件")
dbparse.add_argument("-s", "--size", type=int, help=u"指定导出字典文件的大小")
dbparse.add_argument("-l", "--load", help=u"将指定的字典文件导入数据库")
dbparse.add_argument("--create", action="store_true", help=u"创建数据库")
args = dbparse.parse_args()
try:
dbmanage = WordListManage(args.database)
with Output(u"字典管理") as out:
if args.dump:
size = args.size if args.size else 1000
dbmanage.dump(size, args.dump)
out.yellow(u"生成字典文件'{0}'成功".format(args.dump))
elif args.load:
dbmanage.load(args.load)
out.yellow(u"字典数据库'{0}'更新成功".format(args.database))
if args.create:
dbmanage.createDB()
out.yellow(u"创建数据库'{0}'成功".format(dbmanage.dbfile))
except PenError as error:
Output.error(str(error))
except Exception as error:
Output.error(u"未知错误,{0}".format(str(error)))
if __name__ == "__main__":
main()
| true | true |
f7221587402cacb28eaffcdbd2393c5f8fe1d6ff | 714 | py | Python | sesame/train_model.py | moha31x/SESAME | 0956cbf081d1a033855173e989da6e21f0a13215 | [
"MIT"
] | null | null | null | sesame/train_model.py | moha31x/SESAME | 0956cbf081d1a033855173e989da6e21f0a13215 | [
"MIT"
] | null | null | null | sesame/train_model.py | moha31x/SESAME | 0956cbf081d1a033855173e989da6e21f0a13215 | [
"MIT"
] | null | null | null | '''
This is a script to train a model with a variety of estimators
'''
import pickle
import pandas as pd
from sklearn.neural_network import MLPRegressor
from config import Config
# Creating a path to save our model
Config.models_path.mkdir(parents=True, exist_ok=True)
# Loading the training and testing features into a pandas DataFrame
x_train = pd.read_csv(str(Config.features_path / 'train_features.csv'))
y_train = pd.read_csv(str(Config.features_path / 'train_target.csv'))
# Instantiating and fitting the algorithm
model = MLPRegressor(max_iter=800, alpha=0.4371)
model = model.fit(x_train, y_train.to_numpy().ravel())
# Saving the model into a pickle file
pickle.dump(model, open('model.pickle', 'wb'))
| 32.454545 | 71 | 0.777311 | import pickle
import pandas as pd
from sklearn.neural_network import MLPRegressor
from config import Config
Config.models_path.mkdir(parents=True, exist_ok=True)
x_train = pd.read_csv(str(Config.features_path / 'train_features.csv'))
y_train = pd.read_csv(str(Config.features_path / 'train_target.csv'))
model = MLPRegressor(max_iter=800, alpha=0.4371)
model = model.fit(x_train, y_train.to_numpy().ravel())
pickle.dump(model, open('model.pickle', 'wb'))
| true | true |
f72215a4786136431026f7459d3643ac23aae05c | 898 | py | Python | src/match_pattern/pattern_matcher/test/interface.py | elsid/master | b3624a6fb3a007fff005c0811f05c5802344b46b | [
"MIT"
] | null | null | null | src/match_pattern/pattern_matcher/test/interface.py | elsid/master | b3624a6fb3a007fff005c0811f05c5802344b46b | [
"MIT"
] | null | null | null | src/match_pattern/pattern_matcher/test/interface.py | elsid/master | b3624a6fb3a007fff005c0811f05c5802344b46b | [
"MIT"
] | null | null | null | # coding: utf-8
import yaml
from unittest import TestCase, main
from hamcrest import assert_that, equal_to, starts_with
from pattern_matcher.interface import Interface
class MakeInterface(TestCase):
def test_str_should_succeed(self):
assert_that(str(Interface()), starts_with('interface anonymous_'))
assert_that(str(Interface('A')), equal_to('interface A'))
def test_repr_should_succeed(self):
assert_that(repr(Interface('A')), equal_to("Interface('A')"))
def test_dump_and_load_yaml_recursive_interface_should_succeed(self):
interface = Interface('a')
interface.suppliers = [interface]
data = (
"&id001 !Interface\n"
"name: a\n"
"suppliers:\n"
"- *id001\n"
)
assert_that(yaml.dump(interface), equal_to(data))
assert_that(yaml.load(data), equal_to(interface))
| 32.071429 | 74 | 0.665924 |
import yaml
from unittest import TestCase, main
from hamcrest import assert_that, equal_to, starts_with
from pattern_matcher.interface import Interface
class MakeInterface(TestCase):
def test_str_should_succeed(self):
assert_that(str(Interface()), starts_with('interface anonymous_'))
assert_that(str(Interface('A')), equal_to('interface A'))
def test_repr_should_succeed(self):
assert_that(repr(Interface('A')), equal_to("Interface('A')"))
def test_dump_and_load_yaml_recursive_interface_should_succeed(self):
interface = Interface('a')
interface.suppliers = [interface]
data = (
"&id001 !Interface\n"
"name: a\n"
"suppliers:\n"
"- *id001\n"
)
assert_that(yaml.dump(interface), equal_to(data))
assert_that(yaml.load(data), equal_to(interface))
| true | true |
f72216636d7f71629b089edbadf65ae6c85a69fd | 2,180 | py | Python | ComputerVision_HW/3B-L3/match_two_strips.py | darynarr/CV_UKMA | 2d0e5fe42c539b441e45c4281f7dc5d42939dd89 | [
"MIT"
] | null | null | null | ComputerVision_HW/3B-L3/match_two_strips.py | darynarr/CV_UKMA | 2d0e5fe42c539b441e45c4281f7dc5d42939dd89 | [
"MIT"
] | null | null | null | ComputerVision_HW/3B-L3/match_two_strips.py | darynarr/CV_UKMA | 2d0e5fe42c539b441e45c4281f7dc5d42939dd89 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
# We will use the function implemented in the last quiz
# Find best match
def find_best_match(patch, strip):
# TODO: Find patch in strip and return column index (x value) of topleft corner
best_id = None
min_diff = np.inf
strip_n, patch_n = strip.shape[1], patch.shape[1]
for i in range(strip_n-patch_n):
temp = strip[:, i: i + patch_n]
ssd = np.sum(np.power(temp - patch, 2))
if ssd < min_diff:
best_id, min_diff = i, ssd
return best_id
def match_strips(strip_left, strip_right, b):
# For each non-overlapping patch/block of width b in the left strip,
# find the best matching position (along X-axis) in the right strip.
# Return a vector of disparities (left X-position - right X-position).
# Note: Only consider whole blocks that fit within image bounds.
disparities = []
for x_left in range(0, strip_left.shape[1]+1, b):
patch_left = strip_left[:, x_left: x_left + b]
x_right = find_best_match(patch_left, strip_right)
disparities.append(x_left - x_right)
return np.array([disparities])
# Test code:
# Load images
left = cv2.imread('images/flowers-left.png')
right = cv2.imread('images/flowers-right.png')
cv2.imshow('Left', left)
cv2.imshow('Right', right)
# Convert to grayscale, double, [0, 1] range for easier computation
left_gray = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY) / 255.
right_gray = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY) / 255.
# Define strip row (y) and square block size (b)
y = 120
b = 100
# Extract strip from left image
strip_left = left_gray[y: y + b, :]
cv2.imshow('Strip Left', strip_left)
# Extract strip from right image
strip_right = right_gray[y: y + b, :]
cv2.imshow('Strip Right', strip_right)
# Now match these two strips to compute disparity values
disparity = match_strips(strip_left, strip_right, b)
print( disparity)
# Finally we plot the disparity values. Note that there may be some differences
# in the results shown in the quiz because we had to adapt the index values.
plt.plot(range(disparity.shape[1]), disparity[0])
plt.show()
plt.close('all')
| 31.594203 | 83 | 0.703211 | import cv2
import numpy as np
import matplotlib.pyplot as plt
def find_best_match(patch, strip):
best_id = None
min_diff = np.inf
strip_n, patch_n = strip.shape[1], patch.shape[1]
for i in range(strip_n-patch_n):
temp = strip[:, i: i + patch_n]
ssd = np.sum(np.power(temp - patch, 2))
if ssd < min_diff:
best_id, min_diff = i, ssd
return best_id
def match_strips(strip_left, strip_right, b):
disparities = []
for x_left in range(0, strip_left.shape[1]+1, b):
patch_left = strip_left[:, x_left: x_left + b]
x_right = find_best_match(patch_left, strip_right)
disparities.append(x_left - x_right)
return np.array([disparities])
left = cv2.imread('images/flowers-left.png')
right = cv2.imread('images/flowers-right.png')
cv2.imshow('Left', left)
cv2.imshow('Right', right)
left_gray = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY) / 255.
right_gray = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY) / 255.
y = 120
b = 100
strip_left = left_gray[y: y + b, :]
cv2.imshow('Strip Left', strip_left)
strip_right = right_gray[y: y + b, :]
cv2.imshow('Strip Right', strip_right)
disparity = match_strips(strip_left, strip_right, b)
print( disparity)
plt.plot(range(disparity.shape[1]), disparity[0])
plt.show()
plt.close('all')
| true | true |
f722173bc0475354d47f306795749e2b99e3551a | 6,214 | py | Python | exmail/apis/user.py | fatelei/exmail-py | 42c358fca0b0802387f691b47d2655f64c2d3f01 | [
"Apache-2.0"
] | 2 | 2017-07-24T04:40:25.000Z | 2018-10-23T10:02:06.000Z | exmail/apis/user.py | fatelei/exmail-py | 42c358fca0b0802387f691b47d2655f64c2d3f01 | [
"Apache-2.0"
] | null | null | null | exmail/apis/user.py | fatelei/exmail-py | 42c358fca0b0802387f691b47d2655f64c2d3f01 | [
"Apache-2.0"
] | 1 | 2019-05-19T08:02:29.000Z | 2019-05-19T08:02:29.000Z | # -*- coding: utf8 -*-
"""
exmail.apis.contact.
~~~~~~~~~~~~~~~~~~~~~~~
Contact apis.
"""
from exmail import exceptions
from exmail.apis.base import ExmailClient
from exmail.helpers import required_params
class UserApi(ExmailClient):
"""Contact apis."""
@required_params('userid', 'name', 'department', 'password')
def create_user(self,
userid=None,
name=None,
department=[],
position=None,
mobile=None,
tel=None,
extid=None,
gender=None,
slaves=[],
password=None,
cpwd_login=0):
"""Create a new user.
:param str userid: User's email
:param str name: User's name
:param list department: User's department lists
:param str position Position name
:param str mobile: Mobile phone number
:param str tel: Telephone number
:param str extid:
:param int gender: 1 is man, 2 is woman
:param list slaves: Alias
:param str password: Login password
:param int cpwd_login: Change password after login
"""
if len(department) > 20:
raise exceptions.ParamsError('Departments must be within 20')
if not isinstance(department, tuple):
raise exceptions.ParamsError('department should be tuple type')
if len(slaves) > 5:
raise exceptions.ParamsError('Slaves must be within 5')
if slaves and not isinstance(slaves, tuple):
raise exceptions.ParamsError('slaves should be tuple type')
body = {
'userid': userid,
'name': name,
'department': department,
'position': position,
'mobile': mobile,
'tel': tel,
'extid': extid,
'gender': gender,
'slaves': slaves,
'password': password,
'cpwd_login': cpwd_login
}
self.transport.perform_request(
api='/user/create?access_token=%s' % self.access_token,
body=body,
method='POST'
)
@required_params('userid')
def update_user(self,
userid=None,
name=None,
department=[],
position=None,
mobile=None,
tel=None,
extid=None,
gender=None,
slaves=[],
password=None,
cpwd_login=0):
"""Update a new user.
:param str userid: User's email
:param str name: User's name
:param list department: User's department lists
:param str position Position name
:param str mobile: Mobile phone number
:param str tel: Telephone number
:param str extid:
:param int gender: 1 is man, 2 is woman
:param list slaves: Alias
:param str password: Login password
:param int cpwd_login: Change password after login
"""
if len(department) > 20:
raise exceptions.ParamsError('Departments must be within 20')
if department and not isinstance(department, tuple):
raise exceptions.ParamsError('department should be tuple type')
if len(slaves) > 5:
raise exceptions.ParamsError('Slaves must be within 5')
if slaves and not isinstance(slaves, tuple):
raise exceptions.ParamsError('slaves should be tuple type')
body = {
'userid': userid,
'name': name,
'department': department,
'position': position,
'mobile': mobile,
'tel': tel,
'extid': extid,
'gender': gender,
'slaves': slaves,
'password': password,
'cpwd_login': cpwd_login
}
self.transport.perform_request(
api='/user/update?access_token=%s' % self.access_token,
body=body,
method='POST'
)
@required_params('userid')
def delete_user(self,
userid=None):
"""Delete a user.
:param str userid: User's email
"""
if not userid:
raise exceptions.ParamsError('userid should be set')
self.transport.perform_request(
api='/user/delete',
body={'userid': userid, 'access_token': self.access_token}
)
@required_params('userid')
def get_user(self,
userid=None):
"""Get a user.
:param str userid: User's email
"""
if not userid:
raise exceptions.ParamsError('userid should be set')
data = self.transport.perform_request(
api='/user/get',
body={'userid': userid, 'access_token': self.access_token}
)
return data
@required_params('userlist')
def check_users(self, userlist=[]):
"""Check user is valid or not.
:param list userlist: A list of user's email
:return: Check results.
"""
if not userlist:
return []
data = self.transport.perform_request(
api='/user/batchcheck?access_token=%s' % self.access_token,
body={'userlist': userlist},
method='POST'
)
return data['list']
@required_params('department_id')
def list_users(self, department_id=None, is_simple=True):
"""Get users of a department.
:param int department_id: Department id
:param bool is_simple: Result is simple or not
:return: A list of users.
"""
if not department_id:
return []
if is_simple:
api = '/user/simplelist'
else:
api = '/user/list'
body = {
'access_token': self.access_token,
'department_id': department_id,
}
data = self.transport.perform_request(
api=api,
body=body
)
return data['userlist']
| 29.590476 | 75 | 0.52607 |
from exmail import exceptions
from exmail.apis.base import ExmailClient
from exmail.helpers import required_params
class UserApi(ExmailClient):
@required_params('userid', 'name', 'department', 'password')
def create_user(self,
userid=None,
name=None,
department=[],
position=None,
mobile=None,
tel=None,
extid=None,
gender=None,
slaves=[],
password=None,
cpwd_login=0):
if len(department) > 20:
raise exceptions.ParamsError('Departments must be within 20')
if not isinstance(department, tuple):
raise exceptions.ParamsError('department should be tuple type')
if len(slaves) > 5:
raise exceptions.ParamsError('Slaves must be within 5')
if slaves and not isinstance(slaves, tuple):
raise exceptions.ParamsError('slaves should be tuple type')
body = {
'userid': userid,
'name': name,
'department': department,
'position': position,
'mobile': mobile,
'tel': tel,
'extid': extid,
'gender': gender,
'slaves': slaves,
'password': password,
'cpwd_login': cpwd_login
}
self.transport.perform_request(
api='/user/create?access_token=%s' % self.access_token,
body=body,
method='POST'
)
@required_params('userid')
def update_user(self,
userid=None,
name=None,
department=[],
position=None,
mobile=None,
tel=None,
extid=None,
gender=None,
slaves=[],
password=None,
cpwd_login=0):
if len(department) > 20:
raise exceptions.ParamsError('Departments must be within 20')
if department and not isinstance(department, tuple):
raise exceptions.ParamsError('department should be tuple type')
if len(slaves) > 5:
raise exceptions.ParamsError('Slaves must be within 5')
if slaves and not isinstance(slaves, tuple):
raise exceptions.ParamsError('slaves should be tuple type')
body = {
'userid': userid,
'name': name,
'department': department,
'position': position,
'mobile': mobile,
'tel': tel,
'extid': extid,
'gender': gender,
'slaves': slaves,
'password': password,
'cpwd_login': cpwd_login
}
self.transport.perform_request(
api='/user/update?access_token=%s' % self.access_token,
body=body,
method='POST'
)
@required_params('userid')
def delete_user(self,
userid=None):
if not userid:
raise exceptions.ParamsError('userid should be set')
self.transport.perform_request(
api='/user/delete',
body={'userid': userid, 'access_token': self.access_token}
)
@required_params('userid')
def get_user(self,
userid=None):
if not userid:
raise exceptions.ParamsError('userid should be set')
data = self.transport.perform_request(
api='/user/get',
body={'userid': userid, 'access_token': self.access_token}
)
return data
@required_params('userlist')
def check_users(self, userlist=[]):
if not userlist:
return []
data = self.transport.perform_request(
api='/user/batchcheck?access_token=%s' % self.access_token,
body={'userlist': userlist},
method='POST'
)
return data['list']
@required_params('department_id')
def list_users(self, department_id=None, is_simple=True):
if not department_id:
return []
if is_simple:
api = '/user/simplelist'
else:
api = '/user/list'
body = {
'access_token': self.access_token,
'department_id': department_id,
}
data = self.transport.perform_request(
api=api,
body=body
)
return data['userlist']
| true | true |
f7221873d9ecbf77edfe45ec72ecc412e695cb21 | 11,169 | py | Python | rmgweb/database/forms.py | sean-v8/RMG-website | db6c70670c83b3fbe71b02b0874613823c982c9b | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | rmgweb/database/forms.py | sean-v8/RMG-website | db6c70670c83b3fbe71b02b0874613823c982c9b | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | rmgweb/database/forms.py | sean-v8/RMG-website | db6c70670c83b3fbe71b02b0874613823c982c9b | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG Website - A Django-powered website for Reaction Mechanism Generator
#
# Copyright (c) 2011 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
from django import forms
from django.forms.util import ErrorList
from django.utils.safestring import mark_safe
from rmgpy.molecule.molecule import Molecule
import rmgpy
import copy
import sys
class DivErrorList(ErrorList):
def __unicode__(self):
return self.as_divs()
def as_divs(self):
if not self: return u''
return mark_safe(u'<label> </label>%s' % (''.join([u'<div class="error">%s</div>' % e for e in self])))
class ThermoSearchForm(forms.Form):
"""
This form provides a means of specifying a species to get thermodynamic
data for.
"""
species = forms.CharField(widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}))
def clean_species(self):
"""
Custom validation for the species field to ensure that a valid adjacency
list has been provided.
"""
try:
molecule = Molecule()
molecule.fromAdjacencyList(str(self.cleaned_data['species']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['species'])
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = u'<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = u'<tr><td colspan="2">%s</td></tr>',
row_ender = u'</td></tr>',
help_text_html = u'<br />%s',
errors_on_separate_row = False)
class KineticsSearchForm(forms.Form):
"""
This form provides a means of specifying a set of reactants to get
kinetic data for.
"""
reactant1_identifier = forms.CharField(label="Reactant #1 Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("reactant1");','class':'identifier'}), required=False)
reactant1 = forms.CharField(label="Reactant #1", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}))
reactant2_identifier = forms.CharField(label="Reactant #2 Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("reactant2");','class':'identifier'}), required=False)
reactant2 = forms.CharField(label="Reactant #2", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
product1_identifier = forms.CharField(label="Product #1 Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("product1");','class':'identifier'}), required=False)
product1 = forms.CharField(label="Product #1", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
product2_identifier = forms.CharField(label="Product #2 Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("product2");','class':'identifier'}), required=False)
product2 = forms.CharField(label="Product #2", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
def clean_reactant1(self):
"""
Custom validation for the reactant1 field to ensure that a valid
adjacency list has been provided.
"""
try:
molecule = Molecule()
molecule.fromAdjacencyList(str(self.cleaned_data['reactant1']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['reactant1'])
def clean_reactant2(self):
"""
Custom validation for the reactant1 field to ensure that a valid
adjacency list has been provided.
"""
try:
adjlist = str(self.cleaned_data['reactant2'])
if adjlist.strip() == '': return ''
molecule = Molecule()
molecule.fromAdjacencyList(adjlist)
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['reactant2'])
def clean_product1(self):
"""
Custom validation for the product1 field to ensure that a valid
adjacency list has been provided.
"""
try:
adjlist = str(self.cleaned_data['product1'])
if adjlist.strip() == '': return ''
molecule = Molecule()
molecule.fromAdjacencyList(adjlist)
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['product1'])
def clean_product2(self):
"""
Custom validation for the product1 field to ensure that a valid
adjacency list has been provided.
"""
try:
adjlist = str(self.cleaned_data['product2'])
if adjlist.strip() == '': return ''
molecule = Molecule()
molecule.fromAdjacencyList(adjlist)
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['product2'])
class MoleculeSearchForm(forms.Form):
"""
Form for drawing molecule from adjacency list
"""
species_identifier = forms.CharField(label="Species Identifier", widget=forms.TextInput(attrs={'onchange':'resolve();', 'style':'width:100%;'}), required=False)
species = forms.CharField(label ="Adjacency List", widget = forms.Textarea(attrs={'cols': 50, 'rows': 20, 'onchange':"$('.result').hide();" }), required=True)
def clean_species(self):
"""
Custom validation for the species field to ensure that a valid adjacency
list has been provided.
"""
try:
adjlist = str(self.cleaned_data['species'])
if adjlist == '' : return ''
molecule = Molecule()
molecule.fromAdjacencyList(str(self.cleaned_data['species']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return adjlist
class EniSearchForm(forms.Form):
"""
Form for drawing molecule from adjacency list
"""
detergent_identifier = forms.CharField(label="Detergent Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("detergent");','class':'identifier'}), required=False)
detergent = forms.CharField(label="Detergent", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}))
deposit_identifier = forms.CharField(label="Deposit Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("deposit");','class':'identifier'}), required=False)
deposit = forms.CharField(label="Deposit", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
def clean_detergent(self):
"""
Return molecular representation of input detergent structure """
try:
detergent = Molecule()
detergent.fromAdjacencyList(str(self.cleaned_data['detergent']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid SMILES entry.')
return str(self.cleaned_data['detergent'])
def clean_deposit(self):
"""
Return molecular representation of input deposit structure
"""
try:
deposit = Molecule()
deposit.fromAdjacencyList(str(self.cleaned_data['deposit']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid SMILES entry.')
return str(self.cleaned_data['deposit'])
class KineticsEntryEditForm(forms.Form):
"""
Form for editing kinetics database entries
"""
entry = forms.CharField(label="Database Entry", widget = forms.Textarea(attrs={'cols': 80, 'rows': 40, 'class':'data_entry'}), required=True)
change = forms.CharField(label="Summary of changes", widget=forms.TextInput(attrs={'class':'change_summary'}), required=True)
def clean_entry(self):
"""
Custom validation for the entry field to ensure that a valid
entry has been provided.
"""
new_database = rmgpy.data.kinetics.KineticsDatabase()
new_depository = rmgpy.data.kinetics.KineticsDepository()
global_context = {'__builtins__': None} # disable even builtins
local_context = copy.copy(new_database.local_context)
local_context['entry'] = new_depository.loadEntry
for key,value in rmgpy.data.base.Database.local_context.iteritems():
local_context[key]=value
print local_context
try:
entry_string = str(self.cleaned_data['entry'])
entry = eval("entry( index=-1, {0})".format(entry_string), global_context, local_context)
except Exception, e:
print "Invalid entry from KineticsEntryEditForm."
print repr(entry_string)
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid entry.'+ str(sys.exc_info()[1]))
return entry
class TemperatureForm(forms.Form):
"""
This form allows the user to enter a specific temperature and display the resulting rates
on a collection of kinetics search results
"""
temperature = forms.FloatField(label="Specify Temperature (K)") | 45.218623 | 179 | 0.637031 |
adjlist
class EniSearchForm(forms.Form):
"""
Form for drawing molecule from adjacency list
"""
detergent_identifier = forms.CharField(label="Detergent Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("detergent");','class':'identifier'}), required=False)
detergent = forms.CharField(label="Detergent", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}))
deposit_identifier = forms.CharField(label="Deposit Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("deposit");','class':'identifier'}), required=False)
deposit = forms.CharField(label="Deposit", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
def clean_detergent(self):
"""
Return molecular representation of input detergent structure """
try:
detergent = Molecule()
detergent.fromAdjacencyList(str(self.cleaned_data['detergent']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid SMILES entry.')
return str(self.cleaned_data['detergent'])
def clean_deposit(self):
"""
Return molecular representation of input deposit structure
"""
try:
deposit = Molecule()
deposit.fromAdjacencyList(str(self.cleaned_data['deposit']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid SMILES entry.')
return str(self.cleaned_data['deposit'])
class KineticsEntryEditForm(forms.Form):
"""
Form for editing kinetics database entries
"""
entry = forms.CharField(label="Database Entry", widget = forms.Textarea(attrs={'cols': 80, 'rows': 40, 'class':'data_entry'}), required=True)
change = forms.CharField(label="Summary of changes", widget=forms.TextInput(attrs={'class':'change_summary'}), required=True)
def clean_entry(self):
"""
Custom validation for the entry field to ensure that a valid
entry has been provided.
"""
new_database = rmgpy.data.kinetics.KineticsDatabase()
new_depository = rmgpy.data.kinetics.KineticsDepository()
global_context = {'__builtins__': None}
local_context = copy.copy(new_database.local_context)
local_context['entry'] = new_depository.loadEntry
for key,value in rmgpy.data.base.Database.local_context.iteritems():
local_context[key]=value
print local_context
try:
entry_string = str(self.cleaned_data['entry'])
entry = eval("entry( index=-1, {0})".format(entry_string), global_context, local_context)
except Exception, e:
print "Invalid entry from KineticsEntryEditForm."
print repr(entry_string)
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid entry.'+ str(sys.exc_info()[1]))
return entry
class TemperatureForm(forms.Form):
"""
This form allows the user to enter a specific temperature and display the resulting rates
on a collection of kinetics search results
"""
temperature = forms.FloatField(label="Specify Temperature (K)") | false | true |
f722193cd5e32f4fa56c0d5adf757148f05a634e | 1,578 | py | Python | sdk/python/pulumi_azure_native/machinelearningexperimentation/outputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_native/machinelearningexperimentation/outputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_native/machinelearningexperimentation/outputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'StorageAccountPropertiesResponse',
]
@pulumi.output_type
class StorageAccountPropertiesResponse(dict):
"""
The properties of a storage account for a machine learning team account.
"""
def __init__(__self__, *,
access_key: str,
storage_account_id: str):
"""
The properties of a storage account for a machine learning team account.
:param str access_key: The access key to the storage account.
:param str storage_account_id: The fully qualified arm Id of the storage account.
"""
pulumi.set(__self__, "access_key", access_key)
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> str:
"""
The access key to the storage account.
"""
return pulumi.get(self, "access_key")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> str:
"""
The fully qualified arm Id of the storage account.
"""
return pulumi.get(self, "storage_account_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 30.941176 | 89 | 0.665399 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'StorageAccountPropertiesResponse',
]
@pulumi.output_type
class StorageAccountPropertiesResponse(dict):
def __init__(__self__, *,
access_key: str,
storage_account_id: str):
pulumi.set(__self__, "access_key", access_key)
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> str:
return pulumi.get(self, "access_key")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> str:
return pulumi.get(self, "storage_account_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| true | true |
f7221975b001f6b137480353ce2f10a7afacc290 | 5,481 | py | Python | mmdet/core/evaluation/class_names.py | Alphafrey946/Colocalization-of-fluorescent_signals-using-deep-learning-with-Manders-overlapping-coefficient | cc8386b6cc6cbe823983647119511aa87e121f23 | [
"Apache-2.0"
] | 2 | 2021-02-23T04:42:07.000Z | 2021-02-23T07:22:42.000Z | mmdet/core/evaluation/class_names.py | Alphafrey946/Colocalization-of-fluorescent_signals-using-deep-learning-with-Manders-overlapping-coefficient | cc8386b6cc6cbe823983647119511aa87e121f23 | [
"Apache-2.0"
] | null | null | null | mmdet/core/evaluation/class_names.py | Alphafrey946/Colocalization-of-fluorescent_signals-using-deep-learning-with-Manders-overlapping-coefficient | cc8386b6cc6cbe823983647119511aa87e121f23 | [
"Apache-2.0"
] | 1 | 2021-02-23T04:42:08.000Z | 2021-02-23T04:42:08.000Z | import mmcv
def wider_face_classes():
return ['face']
def voc_classes():
return [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
def imagenet_det_classes():
return [
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
'whale', 'wine_bottle', 'zebra'
]
def imagenet_vid_classes():
return [
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
'watercraft', 'whale', 'zebra'
]
def coco_classes():
return ['Cell1','Cell2']
"""
def coco_classes():
return [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
]
"""
def cityscapes_classes():
return [
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
dataset_aliases = {
'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'],
'cityscapes': ['cityscapes']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
| 46.058824 | 79 | 0.580551 | import mmcv
def wider_face_classes():
return ['face']
def voc_classes():
return [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
def imagenet_det_classes():
return [
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
'whale', 'wine_bottle', 'zebra'
]
def imagenet_vid_classes():
return [
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
'watercraft', 'whale', 'zebra'
]
def coco_classes():
return ['Cell1','Cell2']
def cityscapes_classes():
return [
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
dataset_aliases = {
'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'],
'cityscapes': ['cityscapes']
}
def get_classes(dataset):
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
| true | true |
f7221b06cd36fb3a6c04d9d0d0365009fef93286 | 5,629 | py | Python | python/dask_cudf/dask_cudf/io/parquet.py | CZZLEGEND/cudf | 5d2465d6738d00628673fffdc1fac51fad7ef9a7 | [
"Apache-2.0"
] | 1 | 2020-01-14T01:44:35.000Z | 2020-01-14T01:44:35.000Z | python/dask_cudf/dask_cudf/io/parquet.py | CZZLEGEND/cudf | 5d2465d6738d00628673fffdc1fac51fad7ef9a7 | [
"Apache-2.0"
] | null | null | null | python/dask_cudf/dask_cudf/io/parquet.py | CZZLEGEND/cudf | 5d2465d6738d00628673fffdc1fac51fad7ef9a7 | [
"Apache-2.0"
] | null | null | null | from functools import partial
import pyarrow.parquet as pq
import dask.dataframe as dd
from dask.dataframe.io.parquet.arrow import ArrowEngine
import cudf
from cudf.core.column import build_categorical_column
class CudfEngine(ArrowEngine):
@staticmethod
def read_metadata(*args, **kwargs):
meta, stats, parts = ArrowEngine.read_metadata(*args, **kwargs)
# If `strings_to_categorical==True`, convert objects to int32
strings_to_cats = kwargs.get("strings_to_categorical", False)
dtypes = {}
for col in meta.columns:
if meta[col].dtype == "O":
dtypes[col] = "int32" if strings_to_cats else "object"
meta = cudf.DataFrame.from_pandas(meta)
for col, dtype in dtypes.items():
meta[col] = meta[col].astype(dtype)
return (meta, stats, parts)
@staticmethod
def read_partition(
fs, piece, columns, index, categories=(), partitions=(), **kwargs
):
if columns is not None:
columns = [c for c in columns]
if isinstance(index, list):
columns += index
if isinstance(piece, str):
# `piece` is a file-path string
piece = pq.ParquetDatasetPiece(
piece, open_file_func=partial(fs.open, mode="rb")
)
else:
# `piece` = (path, row_group, partition_keys)
(path, row_group, partition_keys) = piece
piece = pq.ParquetDatasetPiece(
path,
row_group=row_group,
partition_keys=partition_keys,
open_file_func=partial(fs.open, mode="rb"),
)
strings_to_cats = kwargs.get("strings_to_categorical", False)
if cudf.utils.ioutils._is_local_filesystem(fs):
df = cudf.read_parquet(
piece.path,
engine="cudf",
columns=columns,
row_group=piece.row_group,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
else:
with fs.open(piece.path, mode="rb") as f:
df = cudf.read_parquet(
f,
engine="cudf",
columns=columns,
row_group=piece.row_group,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
if index and index[0] in df.columns:
df = df.set_index(index[0])
if len(piece.partition_keys) > 0:
if partitions is None:
raise ValueError("Must pass partition sets")
for i, (name, index2) in enumerate(piece.partition_keys):
categories = [
val.as_py() for val in partitions.levels[i].dictionary
]
sr = cudf.Series(index2).astype(type(index2)).repeat(len(df))
df[name] = build_categorical_column(
categories=categories, codes=sr._column, ordered=False
)
return df
@staticmethod
def write_partition(
df,
path,
fs,
filename,
partition_on,
return_metadata,
fmd=None,
compression=None,
index_cols=None,
**kwargs,
):
# TODO: Replace `pq.write_table` with gpu-accelerated
# write after cudf.io.to_parquet is supported.
md_list = []
preserve_index = False
if index_cols:
df = df.set_index(index_cols)
preserve_index = True
# NOTE: `to_arrow` does not accept `schema` argument
t = df.to_arrow(preserve_index=preserve_index)
if partition_on:
pq.write_to_dataset(
t,
path,
partition_cols=partition_on,
filesystem=fs,
metadata_collector=md_list,
**kwargs,
)
else:
with fs.open(fs.sep.join([path, filename]), "wb") as fil:
pq.write_table(
t,
fil,
compression=compression,
metadata_collector=md_list,
**kwargs,
)
if md_list:
md_list[0].set_file_path(filename)
# Return the schema needed to write the metadata
if return_metadata:
return [{"schema": t.schema, "meta": md_list[0]}]
else:
return []
def read_parquet(
path, columns=None, split_row_groups=True, gather_statistics=None, **kwargs
):
""" Read parquet files into a Dask DataFrame
Calls ``dask.dataframe.read_parquet`` to cordinate the execution of
``cudf.read_parquet``, and ultimately read multiple partitions into a
single Dask dataframe. The Dask version must supply an ``ArrowEngine``
class to support full functionality.
See ``cudf.read_parquet`` and Dask documentation for further details.
Examples
--------
>>> import dask_cudf
>>> df = dask_cudf.read_parquet("/path/to/dataset/") # doctest: +SKIP
See Also
--------
cudf.read_parquet
"""
if isinstance(columns, str):
columns = [columns]
if split_row_groups:
gather_statistics = True
return dd.read_parquet(
path,
columns=columns,
split_row_groups=split_row_groups,
gather_statistics=gather_statistics,
engine=CudfEngine,
**kwargs,
)
to_parquet = partial(dd.to_parquet, engine=CudfEngine)
| 31.623596 | 79 | 0.554983 | from functools import partial
import pyarrow.parquet as pq
import dask.dataframe as dd
from dask.dataframe.io.parquet.arrow import ArrowEngine
import cudf
from cudf.core.column import build_categorical_column
class CudfEngine(ArrowEngine):
@staticmethod
def read_metadata(*args, **kwargs):
meta, stats, parts = ArrowEngine.read_metadata(*args, **kwargs)
strings_to_cats = kwargs.get("strings_to_categorical", False)
dtypes = {}
for col in meta.columns:
if meta[col].dtype == "O":
dtypes[col] = "int32" if strings_to_cats else "object"
meta = cudf.DataFrame.from_pandas(meta)
for col, dtype in dtypes.items():
meta[col] = meta[col].astype(dtype)
return (meta, stats, parts)
@staticmethod
def read_partition(
fs, piece, columns, index, categories=(), partitions=(), **kwargs
):
if columns is not None:
columns = [c for c in columns]
if isinstance(index, list):
columns += index
if isinstance(piece, str):
piece = pq.ParquetDatasetPiece(
piece, open_file_func=partial(fs.open, mode="rb")
)
else:
(path, row_group, partition_keys) = piece
piece = pq.ParquetDatasetPiece(
path,
row_group=row_group,
partition_keys=partition_keys,
open_file_func=partial(fs.open, mode="rb"),
)
strings_to_cats = kwargs.get("strings_to_categorical", False)
if cudf.utils.ioutils._is_local_filesystem(fs):
df = cudf.read_parquet(
piece.path,
engine="cudf",
columns=columns,
row_group=piece.row_group,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
else:
with fs.open(piece.path, mode="rb") as f:
df = cudf.read_parquet(
f,
engine="cudf",
columns=columns,
row_group=piece.row_group,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
if index and index[0] in df.columns:
df = df.set_index(index[0])
if len(piece.partition_keys) > 0:
if partitions is None:
raise ValueError("Must pass partition sets")
for i, (name, index2) in enumerate(piece.partition_keys):
categories = [
val.as_py() for val in partitions.levels[i].dictionary
]
sr = cudf.Series(index2).astype(type(index2)).repeat(len(df))
df[name] = build_categorical_column(
categories=categories, codes=sr._column, ordered=False
)
return df
@staticmethod
def write_partition(
df,
path,
fs,
filename,
partition_on,
return_metadata,
fmd=None,
compression=None,
index_cols=None,
**kwargs,
):
md_list = []
preserve_index = False
if index_cols:
df = df.set_index(index_cols)
preserve_index = True
t = df.to_arrow(preserve_index=preserve_index)
if partition_on:
pq.write_to_dataset(
t,
path,
partition_cols=partition_on,
filesystem=fs,
metadata_collector=md_list,
**kwargs,
)
else:
with fs.open(fs.sep.join([path, filename]), "wb") as fil:
pq.write_table(
t,
fil,
compression=compression,
metadata_collector=md_list,
**kwargs,
)
if md_list:
md_list[0].set_file_path(filename)
if return_metadata:
return [{"schema": t.schema, "meta": md_list[0]}]
else:
return []
def read_parquet(
path, columns=None, split_row_groups=True, gather_statistics=None, **kwargs
):
if isinstance(columns, str):
columns = [columns]
if split_row_groups:
gather_statistics = True
return dd.read_parquet(
path,
columns=columns,
split_row_groups=split_row_groups,
gather_statistics=gather_statistics,
engine=CudfEngine,
**kwargs,
)
to_parquet = partial(dd.to_parquet, engine=CudfEngine)
| true | true |
f7221b1daebb09fb2c2136eca3ac9bf27cb12285 | 1,992 | py | Python | scaffold/infer-distribution.py | ryought/3d-dna | caa187a272c2c3d5720727d846489d628092b680 | [
"MIT"
] | null | null | null | scaffold/infer-distribution.py | ryought/3d-dna | caa187a272c2c3d5720727d846489d628092b680 | [
"MIT"
] | null | null | null | scaffold/infer-distribution.py | ryought/3d-dna | caa187a272c2c3d5720727d846489d628092b680 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import dask.dataframe as ddf
import pandas as pd
import time
from sklearn.neighbors.kde import KernelDensity
from scipy.optimize import curve_fit
import numpy as np
def infer_distribution_from_contig(contacts, K, K0):
"""
"""
longest_contig_name = contacts.loc[contacts.P1.idxmax()].N1
inter_contacts = contacts[
(contacts.N1 == longest_contig_name)
& (contacts.N2 == longest_contig_name)]
inter = np.abs(inter_contacts.P1.values - inter_contacts.P2.values)
kde = KernelDensity(kernel='gaussian', bandwidth=200).fit(inter.reshape(-1, 1))
f = lambda x: kde.score_samples(x.reshape(-1, 1))
# distant
x1 = np.logspace(np.log10(K0), np.log10(K), 500)
p = lambda x, a, b: a + b * np.log(x)
param1, cov = curve_fit(p, x1, f(x1))
# proximal
# degree = 30
# x0 = np.logspace(0, np.log10(K0), 500)
# param0 = np.polyfit(x0, f(x0), degree)
# P = (lambda x: np.where(
# x < K0,
# np.poly1d(param0)(x),
# np.where(
# x < K,
# param1[0] + param1[1] * np.log(x),
# param1[0] + param1[1] * np.log(K))
# ))
return param1[0], param1[1]
def main():
import sys
if len(sys.argv) != 4:
print('not enough arguments')
print('usage: python infer-distribution.py foo.mnd K K0')
return -1
mnd_filename = sys.argv[1]
K = int(sys.argv[2])
K0 = int(sys.argv[3])
# print('parsing mnd by dask.dataframe.read_csv', time.time())
df = ddf.read_csv(
mnd_filename,
sep=' ',
header=None,
names=['N1', 'P1', 'N2', 'P2'],
usecols=[1, 2, 5, 6],
engine='c',
).compute()
# reorder index
# print('reset indexing', time.time())
df = df.reset_index(drop=True)
# print('fitting')
p = infer_distribution_from_contig(df, K=K, K0=K0)
print(p[0])
print(p[1])
main()
| 27.666667 | 83 | 0.568775 |
import dask.dataframe as ddf
import pandas as pd
import time
from sklearn.neighbors.kde import KernelDensity
from scipy.optimize import curve_fit
import numpy as np
def infer_distribution_from_contig(contacts, K, K0):
longest_contig_name = contacts.loc[contacts.P1.idxmax()].N1
inter_contacts = contacts[
(contacts.N1 == longest_contig_name)
& (contacts.N2 == longest_contig_name)]
inter = np.abs(inter_contacts.P1.values - inter_contacts.P2.values)
kde = KernelDensity(kernel='gaussian', bandwidth=200).fit(inter.reshape(-1, 1))
f = lambda x: kde.score_samples(x.reshape(-1, 1))
x1 = np.logspace(np.log10(K0), np.log10(K), 500)
p = lambda x, a, b: a + b * np.log(x)
param1, cov = curve_fit(p, x1, f(x1))
return param1[0], param1[1]
def main():
import sys
if len(sys.argv) != 4:
print('not enough arguments')
print('usage: python infer-distribution.py foo.mnd K K0')
return -1
mnd_filename = sys.argv[1]
K = int(sys.argv[2])
K0 = int(sys.argv[3])
df = ddf.read_csv(
mnd_filename,
sep=' ',
header=None,
names=['N1', 'P1', 'N2', 'P2'],
usecols=[1, 2, 5, 6],
engine='c',
).compute()
df = df.reset_index(drop=True)
p = infer_distribution_from_contig(df, K=K, K0=K0)
print(p[0])
print(p[1])
main()
| true | true |
f7221c2250d07f76c7ad1cd264fe694e723e2d5b | 99 | py | Python | 29/03/1.py | pylangstudy/201705 | c69de524faa67fa2d96267d5a51ed9794208f0e4 | [
"CC0-1.0"
] | null | null | null | 29/03/1.py | pylangstudy/201705 | c69de524faa67fa2d96267d5a51ed9794208f0e4 | [
"CC0-1.0"
] | 38 | 2017-05-25T07:08:48.000Z | 2017-05-31T01:42:41.000Z | 29/03/1.py | pylangstudy/201705 | c69de524faa67fa2d96267d5a51ed9794208f0e4 | [
"CC0-1.0"
] | null | null | null | index = 0
for item in ['ab', 'cd', 'ef']:
print("{0}: {1}".format(index, item))
index += 1
| 19.8 | 41 | 0.494949 | index = 0
for item in ['ab', 'cd', 'ef']:
print("{0}: {1}".format(index, item))
index += 1
| true | true |
f7221ce57bed2a1a2f972e888f4e54ba6db6ce0c | 687 | py | Python | Peak_calling_length_python_code.py | abhikbhattacharjee/Motifizer | ee816690f71325e3e17a554d43a7711f08a8c3a9 | [
"MIT"
] | null | null | null | Peak_calling_length_python_code.py | abhikbhattacharjee/Motifizer | ee816690f71325e3e17a554d43a7711f08a8c3a9 | [
"MIT"
] | null | null | null | Peak_calling_length_python_code.py | abhikbhattacharjee/Motifizer | ee816690f71325e3e17a554d43a7711f08a8c3a9 | [
"MIT"
] | null | null | null | import pandas as pd
import sys
peak_calling=pd.read_excel(str(sys.argv[1]), str(sys.argv[4]))
peak_calling['Length'] = peak_calling['End'] - peak_calling['Start']
peak_calling1=pd.read_excel(str(sys.argv[1]), str(sys.argv[3]))
peak_calling1['Length'] = peak_calling1['End'] - peak_calling1['Start']
peak_calling2=pd.read_excel(str(sys.argv[1]), str(sys.argv[2]))
peak_calling2['Length'] = peak_calling2['End'] - peak_calling2['Start']
with pd.ExcelWriter('Excel/Peak_calling_length.xlsx') as writer:
peak_calling.to_excel(writer, sheet_name='not_diff')
peak_calling1.to_excel(writer, sheet_name='down_toptags')
peak_calling2.to_excel(writer, sheet_name='up_toptags')
| 38.166667 | 72 | 0.74818 | import pandas as pd
import sys
peak_calling=pd.read_excel(str(sys.argv[1]), str(sys.argv[4]))
peak_calling['Length'] = peak_calling['End'] - peak_calling['Start']
peak_calling1=pd.read_excel(str(sys.argv[1]), str(sys.argv[3]))
peak_calling1['Length'] = peak_calling1['End'] - peak_calling1['Start']
peak_calling2=pd.read_excel(str(sys.argv[1]), str(sys.argv[2]))
peak_calling2['Length'] = peak_calling2['End'] - peak_calling2['Start']
with pd.ExcelWriter('Excel/Peak_calling_length.xlsx') as writer:
peak_calling.to_excel(writer, sheet_name='not_diff')
peak_calling1.to_excel(writer, sheet_name='down_toptags')
peak_calling2.to_excel(writer, sheet_name='up_toptags')
| true | true |
f7221d0551cd6f0b8fa57b66e6a2611f8ad92156 | 2,762 | py | Python | main.py | danredtmf/RepoLoader | 32edf66a2efb63d0123fa9ef0641d81eb062ee64 | [
"MIT"
] | null | null | null | main.py | danredtmf/RepoLoader | 32edf66a2efb63d0123fa9ef0641d81eb062ee64 | [
"MIT"
] | null | null | null | main.py | danredtmf/RepoLoader | 32edf66a2efb63d0123fa9ef0641d81eb062ee64 | [
"MIT"
] | null | null | null | import requests
from requests.structures import CaseInsensitiveDict
from urllib.parse import urlparse
import json
import wget
import os
# Переменные программы
repo_link = str()
repo_segment = str()
branch = int()
branches_link = str()
branches_list = []
branch_link = str()
# Заголовки для запроса
headers = CaseInsensitiveDict()
headers['Authorization'] = 'Accept: application/vnd.github.v3+json'
def repo_name_split(link):
'''
Разбивает ссылку на элементы, и складывается в формате `'организация/репозиторий'`
'''
return '{0}/{1}'.format(
urlparse(link).path.split('/', 3)[1],
urlparse(link).path.split('/', 3)[2])
def clear():
'''
Сбрасывает значения переменных (кроме `headers`)
'''
global repo_link, repo_segment, branch, branches_link, branches_list, branch_link
repo_link = str()
repo_segment = str()
branch = int()
branches_link = str()
branches_list = []
branch_link = str()
def set_branch():
'''
Выбор скачиваемой ветки
'''
global branch, branches_list
print('Список веток репозитория: {0}'.format(branches_list))
branch = int(input('Наберите цифру (не название) нужной ветки (отчёт начинается с нуля): '))
if branch > len(branches_list) - 1:
print('Неверная цифра!')
set_branch()
else:
print('Вы выбрали ветку `{0}`'.format(branches_list[branch]))
def restart():
'''
При условии перезапускает программу
'''
print()
_ = input('Перейти в начало программы? (Y/n): ')
if _ == '' or _ == 'Y' or _ == 'y':
start()
else:
os._exit(1)
def start():
'''
Запускает программу.
Используются функции `clear`, `repo_name_split`, `set_branch` и `restart`.
Происходит ввод ссылки на репозиторий, запрос названий его веток и скачивание
выбранной ветки в архив возле исполняемого файла/python-скрипта.
'''
global repo_link, repo_segment, branch, branches_link, branches_list, branch_link
clear()
print('Команды: `e` -> выход из программы')
repo_link = str(input('Введите ссылку на репозиторий GitHub: '))
if repo_link != 'e':
repo_segment = repo_name_split(repo_link)
branches_link = 'https://api.github.com/repos/{0}/branches'.format(repo_segment)
branches_response = requests.get(branches_link, headers=headers)
branches = json.loads(branches_response.text)
[branches_list.append(branch['name']) for branch in branches]
set_branch()
branch_link = 'https://api.github.com/repos/{0}/zipball/{1}'.format(repo_segment, branches_list[branch])
file = wget.download(branch_link)
restart()
elif repo_link == 'e':
os._exit(1)
if __name__ == '__main__':
start()
| 29.382979 | 112 | 0.658219 | import requests
from requests.structures import CaseInsensitiveDict
from urllib.parse import urlparse
import json
import wget
import os
repo_link = str()
repo_segment = str()
branch = int()
branches_link = str()
branches_list = []
branch_link = str()
headers = CaseInsensitiveDict()
headers['Authorization'] = 'Accept: application/vnd.github.v3+json'
def repo_name_split(link):
return '{0}/{1}'.format(
urlparse(link).path.split('/', 3)[1],
urlparse(link).path.split('/', 3)[2])
def clear():
global repo_link, repo_segment, branch, branches_link, branches_list, branch_link
repo_link = str()
repo_segment = str()
branch = int()
branches_link = str()
branches_list = []
branch_link = str()
def set_branch():
global branch, branches_list
print('Список веток репозитория: {0}'.format(branches_list))
branch = int(input('Наберите цифру (не название) нужной ветки (отчёт начинается с нуля): '))
if branch > len(branches_list) - 1:
print('Неверная цифра!')
set_branch()
else:
print('Вы выбрали ветку `{0}`'.format(branches_list[branch]))
def restart():
print()
_ = input('Перейти в начало программы? (Y/n): ')
if _ == '' or _ == 'Y' or _ == 'y':
start()
else:
os._exit(1)
def start():
global repo_link, repo_segment, branch, branches_link, branches_list, branch_link
clear()
print('Команды: `e` -> выход из программы')
repo_link = str(input('Введите ссылку на репозиторий GitHub: '))
if repo_link != 'e':
repo_segment = repo_name_split(repo_link)
branches_link = 'https://api.github.com/repos/{0}/branches'.format(repo_segment)
branches_response = requests.get(branches_link, headers=headers)
branches = json.loads(branches_response.text)
[branches_list.append(branch['name']) for branch in branches]
set_branch()
branch_link = 'https://api.github.com/repos/{0}/zipball/{1}'.format(repo_segment, branches_list[branch])
file = wget.download(branch_link)
restart()
elif repo_link == 'e':
os._exit(1)
if __name__ == '__main__':
start()
| true | true |
f7221dc3803de8020ab91c68973c173f7c0adb6f | 34,578 | py | Python | tests/test_migrate.py | 1frag/aerich | 2b9702efc86b78187656bc5f97e4ac56a54860b5 | [
"Apache-2.0"
] | null | null | null | tests/test_migrate.py | 1frag/aerich | 2b9702efc86b78187656bc5f97e4ac56a54860b5 | [
"Apache-2.0"
] | null | null | null | tests/test_migrate.py | 1frag/aerich | 2b9702efc86b78187656bc5f97e4ac56a54860b5 | [
"Apache-2.0"
] | 1 | 2021-05-28T14:47:58.000Z | 2021-05-28T14:47:58.000Z | import pytest
from pytest_mock import MockerFixture
from aerich.ddl.mysql import MysqlDDL
from aerich.ddl.postgres import PostgresDDL
from aerich.ddl.sqlite import SqliteDDL
from aerich.exceptions import NotSupportError
from aerich.migrate import Migrate
from aerich.utils import get_models_describe
old_models_describe = {
"models.Category": {
"name": "models.Category",
"app": "models",
"table": "category",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "slug",
"field_type": "CharField",
"db_column": "slug",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "name",
"field_type": "CharField",
"db_column": "name",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "created_at",
"field_type": "DatetimeField",
"db_column": "created_at",
"python_type": "datetime.datetime",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"readOnly": True},
"db_field_types": {
"": "TIMESTAMP",
"mysql": "DATETIME(6)",
"postgres": "TIMESTAMPTZ",
},
"auto_now_add": True,
"auto_now": False,
},
{
"name": "user_id",
"field_type": "IntField",
"db_column": "user_id",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "User",
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
],
"fk_fields": [
{
"name": "user",
"field_type": "ForeignKeyFieldInstance",
"python_type": "models.User",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "User",
"docstring": None,
"constraints": {},
"raw_field": "user_id",
"on_delete": "CASCADE",
}
],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [
{
"name": "products",
"field_type": "ManyToManyFieldInstance",
"python_type": "models.Product",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"model_name": "models.Product",
"related_name": "categories",
"forward_key": "product_id",
"backward_key": "category_id",
"through": "product_category",
"on_delete": "CASCADE",
"_generated": True,
}
],
},
"models.Config": {
"name": "models.Config",
"app": "models",
"table": "configs",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "label",
"field_type": "CharField",
"db_column": "label",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "key",
"field_type": "CharField",
"db_column": "key",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 20},
"db_field_types": {"": "VARCHAR(20)"},
},
{
"name": "value",
"field_type": "JSONField",
"db_column": "value",
"python_type": "Union[dict, list]",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "TEXT", "postgres": "JSONB"},
},
{
"name": "status",
"field_type": "IntEnumFieldInstance",
"db_column": "status",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": 1,
"description": "on: 1\noff: 0",
"docstring": None,
"constraints": {"ge": -32768, "le": 32767},
"db_field_types": {"": "SMALLINT"},
},
],
"fk_fields": [],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [],
},
"models.Email": {
"name": "models.Email",
"app": "models",
"table": "email",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "email",
"field_type": "CharField",
"db_column": "email",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "is_primary",
"field_type": "BooleanField",
"db_column": "is_primary",
"python_type": "bool",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": False,
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "BOOL", "sqlite": "INT"},
},
{
"name": "user_id",
"field_type": "IntField",
"db_column": "user_id",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
],
"fk_fields": [
{
"name": "user",
"field_type": "ForeignKeyFieldInstance",
"python_type": "models.User",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"raw_field": "user_id",
"on_delete": "CASCADE",
}
],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [],
},
"models.Product": {
"name": "models.Product",
"app": "models",
"table": "product",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "name",
"field_type": "CharField",
"db_column": "name",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 50},
"db_field_types": {"": "VARCHAR(50)"},
},
{
"name": "view_num",
"field_type": "IntField",
"db_column": "view_num",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "View Num",
"docstring": None,
"constraints": {"ge": -2147483648, "le": 2147483647},
"db_field_types": {"": "INT"},
},
{
"name": "sort",
"field_type": "IntField",
"db_column": "sort",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": -2147483648, "le": 2147483647},
"db_field_types": {"": "INT"},
},
{
"name": "is_reviewed",
"field_type": "BooleanField",
"db_column": "is_reviewed",
"python_type": "bool",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "Is Reviewed",
"docstring": None,
"constraints": {},
"db_field_types": {"": "BOOL", "sqlite": "INT"},
},
{
"name": "type",
"field_type": "IntEnumFieldInstance",
"db_column": "type",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "Product Type",
"docstring": None,
"constraints": {"ge": -32768, "le": 32767},
"db_field_types": {"": "SMALLINT"},
},
{
"name": "image",
"field_type": "CharField",
"db_column": "image",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "body",
"field_type": "TextField",
"db_column": "body",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "TEXT", "mysql": "LONGTEXT"},
},
{
"name": "created_at",
"field_type": "DatetimeField",
"db_column": "created_at",
"python_type": "datetime.datetime",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"readOnly": True},
"db_field_types": {
"": "TIMESTAMP",
"mysql": "DATETIME(6)",
"postgres": "TIMESTAMPTZ",
},
"auto_now_add": True,
"auto_now": False,
},
],
"fk_fields": [],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [
{
"name": "categories",
"field_type": "ManyToManyFieldInstance",
"python_type": "models.Category",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"model_name": "models.Category",
"related_name": "products",
"forward_key": "category_id",
"backward_key": "product_id",
"through": "product_category",
"on_delete": "CASCADE",
"_generated": False,
}
],
},
"models.User": {
"name": "models.User",
"app": "models",
"table": "user",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "username",
"field_type": "CharField",
"db_column": "username",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 20},
"db_field_types": {"": "VARCHAR(20)"},
},
{
"name": "password",
"field_type": "CharField",
"db_column": "password",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "last_login",
"field_type": "DatetimeField",
"db_column": "last_login",
"python_type": "datetime.datetime",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": "<function None.now>",
"description": "Last Login",
"docstring": None,
"constraints": {},
"db_field_types": {
"": "TIMESTAMP",
"mysql": "DATETIME(6)",
"postgres": "TIMESTAMPTZ",
},
"auto_now_add": False,
"auto_now": False,
},
{
"name": "is_active",
"field_type": "BooleanField",
"db_column": "is_active",
"python_type": "bool",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": True,
"description": "Is Active",
"docstring": None,
"constraints": {},
"db_field_types": {"": "BOOL", "sqlite": "INT"},
},
{
"name": "is_superuser",
"field_type": "BooleanField",
"db_column": "is_superuser",
"python_type": "bool",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": False,
"description": "Is SuperUser",
"docstring": None,
"constraints": {},
"db_field_types": {"": "BOOL", "sqlite": "INT"},
},
{
"name": "avatar",
"field_type": "CharField",
"db_column": "avatar",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": "",
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "intro",
"field_type": "TextField",
"db_column": "intro",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": "",
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "TEXT", "mysql": "LONGTEXT"},
},
],
"fk_fields": [],
"backward_fk_fields": [
{
"name": "categorys",
"field_type": "BackwardFKRelation",
"python_type": "models.Category",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "User",
"docstring": None,
"constraints": {},
},
{
"name": "emails",
"field_type": "BackwardFKRelation",
"python_type": "models.Email",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
},
],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [],
},
"models.Aerich": {
"name": "models.Aerich",
"app": "models",
"table": "aerich",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "version",
"field_type": "CharField",
"db_column": "version",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 255},
"db_field_types": {"": "VARCHAR(255)"},
},
{
"name": "app",
"field_type": "CharField",
"db_column": "app",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 20},
"db_field_types": {"": "VARCHAR(20)"},
},
{
"name": "content",
"field_type": "JSONField",
"db_column": "content",
"python_type": "Union[dict, list]",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "TEXT", "postgres": "JSONB"},
},
],
"fk_fields": [],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [],
},
}
def test_migrate(mocker: MockerFixture):
"""
models.py diff with old_models.py
- change email pk: id -> email_id
- add field: Email.address
- add fk: Config.user
- drop fk: Email.user
- drop field: User.avatar
- add index: Email.email
- add many to many: Email.users
- remove unique: User.username
- change column: length User.password
- add unique_together: (name,type) of Product
- alter default: Config.status
- rename column: Product.image -> Product.pic
"""
mocker.patch("click.prompt", side_effect=(True,))
models_describe = get_models_describe("models")
Migrate.app = "models"
if isinstance(Migrate.ddl, SqliteDDL):
with pytest.raises(NotSupportError):
Migrate.diff_models(old_models_describe, models_describe)
Migrate.diff_models(models_describe, old_models_describe, False)
else:
Migrate.diff_models(old_models_describe, models_describe)
Migrate.diff_models(models_describe, old_models_describe, False)
Migrate._merge_operators()
if isinstance(Migrate.ddl, MysqlDDL):
assert sorted(Migrate.upgrade_operators) == sorted(
[
"ALTER TABLE `category` MODIFY COLUMN `name` VARCHAR(200)",
"ALTER TABLE `category` MODIFY COLUMN `slug` VARCHAR(100) NOT NULL",
"ALTER TABLE `config` ADD `user_id` INT NOT NULL COMMENT 'User'",
"ALTER TABLE `config` ADD CONSTRAINT `fk_config_user_17daa970` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE",
"ALTER TABLE `config` ALTER COLUMN `status` DROP DEFAULT",
"ALTER TABLE `email` ADD `address` VARCHAR(200) NOT NULL",
"ALTER TABLE `email` DROP COLUMN `user_id`",
"ALTER TABLE `configs` RENAME TO `config`",
"ALTER TABLE `product` RENAME COLUMN `image` TO `pic`",
"ALTER TABLE `email` RENAME COLUMN `id` TO `email_id`",
"ALTER TABLE `email` DROP FOREIGN KEY `fk_email_user_5b58673d`",
"ALTER TABLE `email` ADD INDEX `idx_email_email_4a1a33` (`email`)",
"ALTER TABLE `product` ADD UNIQUE INDEX `uid_product_name_f14935` (`name`, `type`)",
"ALTER TABLE `product` ALTER COLUMN `view_num` SET DEFAULT 0",
"ALTER TABLE `user` DROP COLUMN `avatar`",
"ALTER TABLE `user` MODIFY COLUMN `password` VARCHAR(100) NOT NULL",
"CREATE TABLE IF NOT EXISTS `newmodel` (\n `id` INT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n `name` VARCHAR(50) NOT NULL\n) CHARACTER SET utf8mb4;",
"ALTER TABLE `user` ADD UNIQUE INDEX `uid_user_usernam_9987ab` (`username`)",
"CREATE TABLE `email_user` (`email_id` INT NOT NULL REFERENCES `email` (`email_id`) ON DELETE CASCADE,`user_id` INT NOT NULL REFERENCES `user` (`id`) ON DELETE CASCADE) CHARACTER SET utf8mb4",
]
)
assert sorted(Migrate.downgrade_operators) == sorted(
[
"ALTER TABLE `category` MODIFY COLUMN `name` VARCHAR(200) NOT NULL",
"ALTER TABLE `category` MODIFY COLUMN `slug` VARCHAR(200) NOT NULL",
"ALTER TABLE `config` DROP COLUMN `user_id`",
"ALTER TABLE `config` DROP FOREIGN KEY `fk_config_user_17daa970`",
"ALTER TABLE `config` ALTER COLUMN `status` SET DEFAULT 1",
"ALTER TABLE `email` ADD `user_id` INT NOT NULL",
"ALTER TABLE `email` DROP COLUMN `address`",
"ALTER TABLE `config` RENAME TO `configs`",
"ALTER TABLE `product` RENAME COLUMN `pic` TO `image`",
"ALTER TABLE `email` RENAME COLUMN `email_id` TO `id`",
"ALTER TABLE `email` ADD CONSTRAINT `fk_email_user_5b58673d` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE",
"ALTER TABLE `email` DROP INDEX `idx_email_email_4a1a33`",
"ALTER TABLE `product` DROP INDEX `uid_product_name_f14935`",
"ALTER TABLE `product` ALTER COLUMN `view_num` DROP DEFAULT",
"ALTER TABLE `user` ADD `avatar` VARCHAR(200) NOT NULL DEFAULT ''",
"ALTER TABLE `user` DROP INDEX `idx_user_usernam_9987ab`",
"ALTER TABLE `user` MODIFY COLUMN `password` VARCHAR(200) NOT NULL",
"DROP TABLE IF EXISTS `email_user`",
"DROP TABLE IF EXISTS `newmodel`",
]
)
elif isinstance(Migrate.ddl, PostgresDDL):
assert sorted(Migrate.upgrade_operators) == sorted(
[
'ALTER TABLE "category" ALTER COLUMN "name" DROP NOT NULL',
'ALTER TABLE "category" ALTER COLUMN "slug" TYPE VARCHAR(100) USING "slug"::VARCHAR(100)',
'ALTER TABLE "config" ADD "user_id" INT NOT NULL',
'ALTER TABLE "config" ADD CONSTRAINT "fk_config_user_17daa970" FOREIGN KEY ("user_id") REFERENCES "user" ("id") ON DELETE CASCADE',
'ALTER TABLE "config" ALTER COLUMN "status" DROP DEFAULT',
'ALTER TABLE "email" ADD "address" VARCHAR(200) NOT NULL',
'ALTER TABLE "email" DROP COLUMN "user_id"',
'ALTER TABLE "product" RENAME COLUMN "image" TO "pic"',
'ALTER TABLE "email" RENAME COLUMN "id" TO "email_id"',
'ALTER TABLE "configs" RENAME TO "config"',
'ALTER TABLE "email" DROP CONSTRAINT "fk_email_user_5b58673d"',
'CREATE INDEX "idx_email_email_4a1a33" ON "email" ("email")',
'CREATE UNIQUE INDEX "uid_product_name_f14935" ON "product" ("name", "type")',
'ALTER TABLE "product" ALTER COLUMN "view_num" SET DEFAULT 0',
'ALTER TABLE "user" DROP COLUMN "avatar"',
'ALTER TABLE "user" ALTER COLUMN "password" TYPE VARCHAR(100) USING "password"::VARCHAR(100)',
'CREATE TABLE IF NOT EXISTS "newmodel" (\n "id" SERIAL NOT NULL PRIMARY KEY,\n "name" VARCHAR(50) NOT NULL\n);\nCOMMENT ON COLUMN "config"."user_id" IS \'User\';',
'CREATE UNIQUE INDEX "uid_user_usernam_9987ab" ON "user" ("username")',
'CREATE TABLE "email_user" ("email_id" INT NOT NULL REFERENCES "email" ("email_id") ON DELETE CASCADE,"user_id" INT NOT NULL REFERENCES "user" ("id") ON DELETE CASCADE)',
]
)
assert sorted(Migrate.downgrade_operators) == sorted(
[
'ALTER TABLE "category" ALTER COLUMN "name" SET NOT NULL',
'ALTER TABLE "category" ALTER COLUMN "slug" TYPE VARCHAR(200) USING "slug"::VARCHAR(200)',
'ALTER TABLE "user" ALTER COLUMN "password" TYPE VARCHAR(200) USING "password"::VARCHAR(200)',
'ALTER TABLE "config" DROP COLUMN "user_id"',
'ALTER TABLE "config" DROP CONSTRAINT "fk_config_user_17daa970"',
'ALTER TABLE "config" ALTER COLUMN "status" SET DEFAULT 1',
'ALTER TABLE "email" ADD "user_id" INT NOT NULL',
'ALTER TABLE "email" DROP COLUMN "address"',
'ALTER TABLE "config" RENAME TO "configs"',
'ALTER TABLE "product" RENAME COLUMN "pic" TO "image"',
'ALTER TABLE "email" RENAME COLUMN "email_id" TO "id"',
'ALTER TABLE "email" ADD CONSTRAINT "fk_email_user_5b58673d" FOREIGN KEY ("user_id") REFERENCES "user" ("id") ON DELETE CASCADE',
'DROP INDEX "idx_email_email_4a1a33"',
'ALTER TABLE "product" ALTER COLUMN "view_num" DROP DEFAULT',
'ALTER TABLE "user" ADD "avatar" VARCHAR(200) NOT NULL DEFAULT \'\'',
'DROP INDEX "idx_user_usernam_9987ab"',
'DROP INDEX "uid_product_name_f14935"',
'DROP TABLE IF EXISTS "email_user"',
'DROP TABLE IF EXISTS "newmodel"',
]
)
elif isinstance(Migrate.ddl, SqliteDDL):
assert Migrate.upgrade_operators == []
assert Migrate.downgrade_operators == []
def test_sort_all_version_files(mocker):
mocker.patch(
"os.listdir",
return_value=[
"1_datetime_update.sql",
"11_datetime_update.sql",
"10_datetime_update.sql",
"2_datetime_update.sql",
],
)
Migrate.migrate_location = "."
assert Migrate.get_all_version_files() == [
"1_datetime_update.sql",
"2_datetime_update.sql",
"10_datetime_update.sql",
"11_datetime_update.sql",
]
def test_sort_sql_and_py_version_files(mocker):
mocker.patch(
"os.listdir",
return_value=[
"1_datetime_update.sql",
"11_datetime_update.sql",
"10_datetime_update.py",
"2_datetime_update.sql",
"3_datetime_update.py",
],
)
Migrate.migrate_location = "."
assert Migrate.get_all_version_files() == [
"1_datetime_update.sql",
"2_datetime_update.sql",
"3_datetime_update.py",
"10_datetime_update.py",
"11_datetime_update.sql",
]
| 37.300971 | 208 | 0.440916 | import pytest
from pytest_mock import MockerFixture
from aerich.ddl.mysql import MysqlDDL
from aerich.ddl.postgres import PostgresDDL
from aerich.ddl.sqlite import SqliteDDL
from aerich.exceptions import NotSupportError
from aerich.migrate import Migrate
from aerich.utils import get_models_describe
old_models_describe = {
"models.Category": {
"name": "models.Category",
"app": "models",
"table": "category",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "slug",
"field_type": "CharField",
"db_column": "slug",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "name",
"field_type": "CharField",
"db_column": "name",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "created_at",
"field_type": "DatetimeField",
"db_column": "created_at",
"python_type": "datetime.datetime",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"readOnly": True},
"db_field_types": {
"": "TIMESTAMP",
"mysql": "DATETIME(6)",
"postgres": "TIMESTAMPTZ",
},
"auto_now_add": True,
"auto_now": False,
},
{
"name": "user_id",
"field_type": "IntField",
"db_column": "user_id",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "User",
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
],
"fk_fields": [
{
"name": "user",
"field_type": "ForeignKeyFieldInstance",
"python_type": "models.User",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "User",
"docstring": None,
"constraints": {},
"raw_field": "user_id",
"on_delete": "CASCADE",
}
],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [
{
"name": "products",
"field_type": "ManyToManyFieldInstance",
"python_type": "models.Product",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"model_name": "models.Product",
"related_name": "categories",
"forward_key": "product_id",
"backward_key": "category_id",
"through": "product_category",
"on_delete": "CASCADE",
"_generated": True,
}
],
},
"models.Config": {
"name": "models.Config",
"app": "models",
"table": "configs",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "label",
"field_type": "CharField",
"db_column": "label",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "key",
"field_type": "CharField",
"db_column": "key",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 20},
"db_field_types": {"": "VARCHAR(20)"},
},
{
"name": "value",
"field_type": "JSONField",
"db_column": "value",
"python_type": "Union[dict, list]",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "TEXT", "postgres": "JSONB"},
},
{
"name": "status",
"field_type": "IntEnumFieldInstance",
"db_column": "status",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": 1,
"description": "on: 1\noff: 0",
"docstring": None,
"constraints": {"ge": -32768, "le": 32767},
"db_field_types": {"": "SMALLINT"},
},
],
"fk_fields": [],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [],
},
"models.Email": {
"name": "models.Email",
"app": "models",
"table": "email",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "email",
"field_type": "CharField",
"db_column": "email",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "is_primary",
"field_type": "BooleanField",
"db_column": "is_primary",
"python_type": "bool",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": False,
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "BOOL", "sqlite": "INT"},
},
{
"name": "user_id",
"field_type": "IntField",
"db_column": "user_id",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
],
"fk_fields": [
{
"name": "user",
"field_type": "ForeignKeyFieldInstance",
"python_type": "models.User",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"raw_field": "user_id",
"on_delete": "CASCADE",
}
],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [],
},
"models.Product": {
"name": "models.Product",
"app": "models",
"table": "product",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "name",
"field_type": "CharField",
"db_column": "name",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 50},
"db_field_types": {"": "VARCHAR(50)"},
},
{
"name": "view_num",
"field_type": "IntField",
"db_column": "view_num",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "View Num",
"docstring": None,
"constraints": {"ge": -2147483648, "le": 2147483647},
"db_field_types": {"": "INT"},
},
{
"name": "sort",
"field_type": "IntField",
"db_column": "sort",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": -2147483648, "le": 2147483647},
"db_field_types": {"": "INT"},
},
{
"name": "is_reviewed",
"field_type": "BooleanField",
"db_column": "is_reviewed",
"python_type": "bool",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "Is Reviewed",
"docstring": None,
"constraints": {},
"db_field_types": {"": "BOOL", "sqlite": "INT"},
},
{
"name": "type",
"field_type": "IntEnumFieldInstance",
"db_column": "type",
"python_type": "int",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "Product Type",
"docstring": None,
"constraints": {"ge": -32768, "le": 32767},
"db_field_types": {"": "SMALLINT"},
},
{
"name": "image",
"field_type": "CharField",
"db_column": "image",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "body",
"field_type": "TextField",
"db_column": "body",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "TEXT", "mysql": "LONGTEXT"},
},
{
"name": "created_at",
"field_type": "DatetimeField",
"db_column": "created_at",
"python_type": "datetime.datetime",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"readOnly": True},
"db_field_types": {
"": "TIMESTAMP",
"mysql": "DATETIME(6)",
"postgres": "TIMESTAMPTZ",
},
"auto_now_add": True,
"auto_now": False,
},
],
"fk_fields": [],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [
{
"name": "categories",
"field_type": "ManyToManyFieldInstance",
"python_type": "models.Category",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"model_name": "models.Category",
"related_name": "products",
"forward_key": "category_id",
"backward_key": "product_id",
"through": "product_category",
"on_delete": "CASCADE",
"_generated": False,
}
],
},
"models.User": {
"name": "models.User",
"app": "models",
"table": "user",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "username",
"field_type": "CharField",
"db_column": "username",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 20},
"db_field_types": {"": "VARCHAR(20)"},
},
{
"name": "password",
"field_type": "CharField",
"db_column": "password",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "last_login",
"field_type": "DatetimeField",
"db_column": "last_login",
"python_type": "datetime.datetime",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": "<function None.now>",
"description": "Last Login",
"docstring": None,
"constraints": {},
"db_field_types": {
"": "TIMESTAMP",
"mysql": "DATETIME(6)",
"postgres": "TIMESTAMPTZ",
},
"auto_now_add": False,
"auto_now": False,
},
{
"name": "is_active",
"field_type": "BooleanField",
"db_column": "is_active",
"python_type": "bool",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": True,
"description": "Is Active",
"docstring": None,
"constraints": {},
"db_field_types": {"": "BOOL", "sqlite": "INT"},
},
{
"name": "is_superuser",
"field_type": "BooleanField",
"db_column": "is_superuser",
"python_type": "bool",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": False,
"description": "Is SuperUser",
"docstring": None,
"constraints": {},
"db_field_types": {"": "BOOL", "sqlite": "INT"},
},
{
"name": "avatar",
"field_type": "CharField",
"db_column": "avatar",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": "",
"description": None,
"docstring": None,
"constraints": {"max_length": 200},
"db_field_types": {"": "VARCHAR(200)"},
},
{
"name": "intro",
"field_type": "TextField",
"db_column": "intro",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": "",
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "TEXT", "mysql": "LONGTEXT"},
},
],
"fk_fields": [],
"backward_fk_fields": [
{
"name": "categorys",
"field_type": "BackwardFKRelation",
"python_type": "models.Category",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": "User",
"docstring": None,
"constraints": {},
},
{
"name": "emails",
"field_type": "BackwardFKRelation",
"python_type": "models.Email",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
},
],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [],
},
"models.Aerich": {
"name": "models.Aerich",
"app": "models",
"table": "aerich",
"abstract": False,
"description": None,
"docstring": None,
"unique_together": [],
"pk_field": {
"name": "id",
"field_type": "IntField",
"db_column": "id",
"python_type": "int",
"generated": True,
"nullable": False,
"unique": True,
"indexed": True,
"default": None,
"description": None,
"docstring": None,
"constraints": {"ge": 1, "le": 2147483647},
"db_field_types": {"": "INT"},
},
"data_fields": [
{
"name": "version",
"field_type": "CharField",
"db_column": "version",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 255},
"db_field_types": {"": "VARCHAR(255)"},
},
{
"name": "app",
"field_type": "CharField",
"db_column": "app",
"python_type": "str",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {"max_length": 20},
"db_field_types": {"": "VARCHAR(20)"},
},
{
"name": "content",
"field_type": "JSONField",
"db_column": "content",
"python_type": "Union[dict, list]",
"generated": False,
"nullable": False,
"unique": False,
"indexed": False,
"default": None,
"description": None,
"docstring": None,
"constraints": {},
"db_field_types": {"": "TEXT", "postgres": "JSONB"},
},
],
"fk_fields": [],
"backward_fk_fields": [],
"o2o_fields": [],
"backward_o2o_fields": [],
"m2m_fields": [],
},
}
def test_migrate(mocker: MockerFixture):
mocker.patch("click.prompt", side_effect=(True,))
models_describe = get_models_describe("models")
Migrate.app = "models"
if isinstance(Migrate.ddl, SqliteDDL):
with pytest.raises(NotSupportError):
Migrate.diff_models(old_models_describe, models_describe)
Migrate.diff_models(models_describe, old_models_describe, False)
else:
Migrate.diff_models(old_models_describe, models_describe)
Migrate.diff_models(models_describe, old_models_describe, False)
Migrate._merge_operators()
if isinstance(Migrate.ddl, MysqlDDL):
assert sorted(Migrate.upgrade_operators) == sorted(
[
"ALTER TABLE `category` MODIFY COLUMN `name` VARCHAR(200)",
"ALTER TABLE `category` MODIFY COLUMN `slug` VARCHAR(100) NOT NULL",
"ALTER TABLE `config` ADD `user_id` INT NOT NULL COMMENT 'User'",
"ALTER TABLE `config` ADD CONSTRAINT `fk_config_user_17daa970` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE",
"ALTER TABLE `config` ALTER COLUMN `status` DROP DEFAULT",
"ALTER TABLE `email` ADD `address` VARCHAR(200) NOT NULL",
"ALTER TABLE `email` DROP COLUMN `user_id`",
"ALTER TABLE `configs` RENAME TO `config`",
"ALTER TABLE `product` RENAME COLUMN `image` TO `pic`",
"ALTER TABLE `email` RENAME COLUMN `id` TO `email_id`",
"ALTER TABLE `email` DROP FOREIGN KEY `fk_email_user_5b58673d`",
"ALTER TABLE `email` ADD INDEX `idx_email_email_4a1a33` (`email`)",
"ALTER TABLE `product` ADD UNIQUE INDEX `uid_product_name_f14935` (`name`, `type`)",
"ALTER TABLE `product` ALTER COLUMN `view_num` SET DEFAULT 0",
"ALTER TABLE `user` DROP COLUMN `avatar`",
"ALTER TABLE `user` MODIFY COLUMN `password` VARCHAR(100) NOT NULL",
"CREATE TABLE IF NOT EXISTS `newmodel` (\n `id` INT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n `name` VARCHAR(50) NOT NULL\n) CHARACTER SET utf8mb4;",
"ALTER TABLE `user` ADD UNIQUE INDEX `uid_user_usernam_9987ab` (`username`)",
"CREATE TABLE `email_user` (`email_id` INT NOT NULL REFERENCES `email` (`email_id`) ON DELETE CASCADE,`user_id` INT NOT NULL REFERENCES `user` (`id`) ON DELETE CASCADE) CHARACTER SET utf8mb4",
]
)
assert sorted(Migrate.downgrade_operators) == sorted(
[
"ALTER TABLE `category` MODIFY COLUMN `name` VARCHAR(200) NOT NULL",
"ALTER TABLE `category` MODIFY COLUMN `slug` VARCHAR(200) NOT NULL",
"ALTER TABLE `config` DROP COLUMN `user_id`",
"ALTER TABLE `config` DROP FOREIGN KEY `fk_config_user_17daa970`",
"ALTER TABLE `config` ALTER COLUMN `status` SET DEFAULT 1",
"ALTER TABLE `email` ADD `user_id` INT NOT NULL",
"ALTER TABLE `email` DROP COLUMN `address`",
"ALTER TABLE `config` RENAME TO `configs`",
"ALTER TABLE `product` RENAME COLUMN `pic` TO `image`",
"ALTER TABLE `email` RENAME COLUMN `email_id` TO `id`",
"ALTER TABLE `email` ADD CONSTRAINT `fk_email_user_5b58673d` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE",
"ALTER TABLE `email` DROP INDEX `idx_email_email_4a1a33`",
"ALTER TABLE `product` DROP INDEX `uid_product_name_f14935`",
"ALTER TABLE `product` ALTER COLUMN `view_num` DROP DEFAULT",
"ALTER TABLE `user` ADD `avatar` VARCHAR(200) NOT NULL DEFAULT ''",
"ALTER TABLE `user` DROP INDEX `idx_user_usernam_9987ab`",
"ALTER TABLE `user` MODIFY COLUMN `password` VARCHAR(200) NOT NULL",
"DROP TABLE IF EXISTS `email_user`",
"DROP TABLE IF EXISTS `newmodel`",
]
)
elif isinstance(Migrate.ddl, PostgresDDL):
assert sorted(Migrate.upgrade_operators) == sorted(
[
'ALTER TABLE "category" ALTER COLUMN "name" DROP NOT NULL',
'ALTER TABLE "category" ALTER COLUMN "slug" TYPE VARCHAR(100) USING "slug"::VARCHAR(100)',
'ALTER TABLE "config" ADD "user_id" INT NOT NULL',
'ALTER TABLE "config" ADD CONSTRAINT "fk_config_user_17daa970" FOREIGN KEY ("user_id") REFERENCES "user" ("id") ON DELETE CASCADE',
'ALTER TABLE "config" ALTER COLUMN "status" DROP DEFAULT',
'ALTER TABLE "email" ADD "address" VARCHAR(200) NOT NULL',
'ALTER TABLE "email" DROP COLUMN "user_id"',
'ALTER TABLE "product" RENAME COLUMN "image" TO "pic"',
'ALTER TABLE "email" RENAME COLUMN "id" TO "email_id"',
'ALTER TABLE "configs" RENAME TO "config"',
'ALTER TABLE "email" DROP CONSTRAINT "fk_email_user_5b58673d"',
'CREATE INDEX "idx_email_email_4a1a33" ON "email" ("email")',
'CREATE UNIQUE INDEX "uid_product_name_f14935" ON "product" ("name", "type")',
'ALTER TABLE "product" ALTER COLUMN "view_num" SET DEFAULT 0',
'ALTER TABLE "user" DROP COLUMN "avatar"',
'ALTER TABLE "user" ALTER COLUMN "password" TYPE VARCHAR(100) USING "password"::VARCHAR(100)',
'CREATE TABLE IF NOT EXISTS "newmodel" (\n "id" SERIAL NOT NULL PRIMARY KEY,\n "name" VARCHAR(50) NOT NULL\n);\nCOMMENT ON COLUMN "config"."user_id" IS \'User\';',
'CREATE UNIQUE INDEX "uid_user_usernam_9987ab" ON "user" ("username")',
'CREATE TABLE "email_user" ("email_id" INT NOT NULL REFERENCES "email" ("email_id") ON DELETE CASCADE,"user_id" INT NOT NULL REFERENCES "user" ("id") ON DELETE CASCADE)',
]
)
assert sorted(Migrate.downgrade_operators) == sorted(
[
'ALTER TABLE "category" ALTER COLUMN "name" SET NOT NULL',
'ALTER TABLE "category" ALTER COLUMN "slug" TYPE VARCHAR(200) USING "slug"::VARCHAR(200)',
'ALTER TABLE "user" ALTER COLUMN "password" TYPE VARCHAR(200) USING "password"::VARCHAR(200)',
'ALTER TABLE "config" DROP COLUMN "user_id"',
'ALTER TABLE "config" DROP CONSTRAINT "fk_config_user_17daa970"',
'ALTER TABLE "config" ALTER COLUMN "status" SET DEFAULT 1',
'ALTER TABLE "email" ADD "user_id" INT NOT NULL',
'ALTER TABLE "email" DROP COLUMN "address"',
'ALTER TABLE "config" RENAME TO "configs"',
'ALTER TABLE "product" RENAME COLUMN "pic" TO "image"',
'ALTER TABLE "email" RENAME COLUMN "email_id" TO "id"',
'ALTER TABLE "email" ADD CONSTRAINT "fk_email_user_5b58673d" FOREIGN KEY ("user_id") REFERENCES "user" ("id") ON DELETE CASCADE',
'DROP INDEX "idx_email_email_4a1a33"',
'ALTER TABLE "product" ALTER COLUMN "view_num" DROP DEFAULT',
'ALTER TABLE "user" ADD "avatar" VARCHAR(200) NOT NULL DEFAULT \'\'',
'DROP INDEX "idx_user_usernam_9987ab"',
'DROP INDEX "uid_product_name_f14935"',
'DROP TABLE IF EXISTS "email_user"',
'DROP TABLE IF EXISTS "newmodel"',
]
)
elif isinstance(Migrate.ddl, SqliteDDL):
assert Migrate.upgrade_operators == []
assert Migrate.downgrade_operators == []
def test_sort_all_version_files(mocker):
mocker.patch(
"os.listdir",
return_value=[
"1_datetime_update.sql",
"11_datetime_update.sql",
"10_datetime_update.sql",
"2_datetime_update.sql",
],
)
Migrate.migrate_location = "."
assert Migrate.get_all_version_files() == [
"1_datetime_update.sql",
"2_datetime_update.sql",
"10_datetime_update.sql",
"11_datetime_update.sql",
]
def test_sort_sql_and_py_version_files(mocker):
mocker.patch(
"os.listdir",
return_value=[
"1_datetime_update.sql",
"11_datetime_update.sql",
"10_datetime_update.py",
"2_datetime_update.sql",
"3_datetime_update.py",
],
)
Migrate.migrate_location = "."
assert Migrate.get_all_version_files() == [
"1_datetime_update.sql",
"2_datetime_update.sql",
"3_datetime_update.py",
"10_datetime_update.py",
"11_datetime_update.sql",
]
| true | true |
f7221df4f8a05e5f1d4020f35d37fa12dab3c87e | 1,365 | py | Python | test/unit/test_utils_weighted_clusters.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | 1 | 2020-07-08T19:37:09.000Z | 2020-07-08T19:37:09.000Z | test/unit/test_utils_weighted_clusters.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | null | null | null | test/unit/test_utils_weighted_clusters.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | null | null | null | import pytest
from sovereign.utils.weighted_clusters import fit_weights
@pytest.mark.parametrize(
"weights,normalized",
[
pytest.param([1, 2, 3], [16, 33, 51] , id='1, 2, 3'),
pytest.param([20, 25, 1], [43, 54, 3] , id='20, 25, 1'),
pytest.param([20, 10, 20], [40, 20, 40] , id='20, 10, 20'),
pytest.param([100, 100, 100], [33, 33, 34] , id='100, 100, 100'),
pytest.param([1, 1, 1], [33, 33, 34] , id='1, 1, 1'),
pytest.param([1, 1, 0], [50, 50, 0] , id='1, 1, 0'),
pytest.param([1, 1], [50, 50] , id='1, 1'),
pytest.param([1], [100] , id='1'),
pytest.param([1, 0, 0], [100, 0, 0] , id='1, 0, 0'),
pytest.param([1, 0, 0, 5, 1, 7], [7, 0, 0, 35, 7, 51], id='1, 0, 0, 5, 1, 7'),
]
)
def test_cluster_weights_normalize__and_add_up_to_a_total_weight_of_100(weights, normalized):
weighted_clusters = [
{'name': f'Name{weight}', 'weight': weight}
for weight in weights
]
expected = [
{'name': f'Name{weight}', 'weight': normalized_weight}
for weight, normalized_weight in zip(weights, normalized)
]
actual = fit_weights(weighted_clusters)
assert expected == actual
| 44.032258 | 93 | 0.487179 | import pytest
from sovereign.utils.weighted_clusters import fit_weights
@pytest.mark.parametrize(
"weights,normalized",
[
pytest.param([1, 2, 3], [16, 33, 51] , id='1, 2, 3'),
pytest.param([20, 25, 1], [43, 54, 3] , id='20, 25, 1'),
pytest.param([20, 10, 20], [40, 20, 40] , id='20, 10, 20'),
pytest.param([100, 100, 100], [33, 33, 34] , id='100, 100, 100'),
pytest.param([1, 1, 1], [33, 33, 34] , id='1, 1, 1'),
pytest.param([1, 1, 0], [50, 50, 0] , id='1, 1, 0'),
pytest.param([1, 1], [50, 50] , id='1, 1'),
pytest.param([1], [100] , id='1'),
pytest.param([1, 0, 0], [100, 0, 0] , id='1, 0, 0'),
pytest.param([1, 0, 0, 5, 1, 7], [7, 0, 0, 35, 7, 51], id='1, 0, 0, 5, 1, 7'),
]
)
def test_cluster_weights_normalize__and_add_up_to_a_total_weight_of_100(weights, normalized):
weighted_clusters = [
{'name': f'Name{weight}', 'weight': weight}
for weight in weights
]
expected = [
{'name': f'Name{weight}', 'weight': normalized_weight}
for weight, normalized_weight in zip(weights, normalized)
]
actual = fit_weights(weighted_clusters)
assert expected == actual
| true | true |
f7221e29383b3704423e51c4f3340c269de72359 | 2,121 | py | Python | brightcove/EPG.py | asha-bc/BrightcovePY | 52e220e8e95221745b4de52ba94689509d27c072 | [
"MIT"
] | 3 | 2020-12-14T23:08:09.000Z | 2021-08-05T05:44:19.000Z | brightcove/EPG.py | asha-bc/BrightcovePY | 52e220e8e95221745b4de52ba94689509d27c072 | [
"MIT"
] | null | null | null | brightcove/EPG.py | asha-bc/BrightcovePY | 52e220e8e95221745b4de52ba94689509d27c072 | [
"MIT"
] | 2 | 2021-10-19T15:24:28.000Z | 2022-03-08T08:17:30.000Z | """
Implements wrapper class and methods to work with Brightcove's EPG API.
See: https://apis.support.brightcove.com/epg/getting-started/overview-epg-api.html
"""
from requests.models import Response
from .Base import Base
from .OAuth import OAuth
class EPG(Base):
"""
Class to wrap the Brightcove EPG API calls. Inherits from Base.
Attributes:
-----------
base_url (str)
Base URL for API calls.
Methods:
--------
GetAllCPChannels(self, account_id: str='') -> Response
Get a list of all Cloud Playout channels for an account.
GetEPG(self, channel_id: str, query: str='', account_id: str='') -> Response
Get EPG for a specific channel.
"""
# base URL for all API calls
base_url ='https://cm.cloudplayout.brightcove.com/accounts/{account_id}'
def __init__(self, oauth: OAuth, query: str='') -> None:
"""
Args:
oauth (OAuth): OAuth instance to use for the API calls.
query (str, optional): Default search query for this instance.
"""
super().__init__(oauth=oauth, query=query)
def GetAllCPChannels(self, account_id: str='') -> Response:
"""
Get a list of all Cloud Playout channels for an account.
Args:
account_id (str, optional): Video Cloud account ID. Defaults to ''
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/cp_channels'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
def GetEPG(self, channel_id: str, query: str='', account_id: str='') -> Response:
"""
Get EPG for a specific channel.
Args:
channel_id (str): Channel ID to get the EPG for.
query (str, optional): Search query string. Defaults to ''.
account_id (str, optional): Video Cloud account ID. Defaults to ''
Returns:
Response: API response as requests Response object.
"""
base = 'https://sm.cloudplayout.brightcove.com/accounts/{account_id}'
query = query or self.search_query
url = f'{base}/channels/{channel_id}/epg?{query}'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
| 30.73913 | 106 | 0.708628 |
from requests.models import Response
from .Base import Base
from .OAuth import OAuth
class EPG(Base):
base_url ='https://cm.cloudplayout.brightcove.com/accounts/{account_id}'
def __init__(self, oauth: OAuth, query: str='') -> None:
super().__init__(oauth=oauth, query=query)
def GetAllCPChannels(self, account_id: str='') -> Response:
url = f'{self.base_url}/cp_channels'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
def GetEPG(self, channel_id: str, query: str='', account_id: str='') -> Response:
base = 'https://sm.cloudplayout.brightcove.com/accounts/{account_id}'
query = query or self.search_query
url = f'{base}/channels/{channel_id}/epg?{query}'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
| true | true |
f7221f00933b36ae67355884950172132b8d731e | 1,488 | py | Python | mirage/libs/bt_utils/hciconfig.py | epablosensei/mirage | 3c0d2fb0f0e570356e7126c999e83e0256920420 | [
"MIT"
] | 123 | 2019-11-20T19:53:23.000Z | 2022-03-07T19:51:03.000Z | mirage/libs/bt_utils/hciconfig.py | epablosensei/mirage | 3c0d2fb0f0e570356e7126c999e83e0256920420 | [
"MIT"
] | 23 | 2019-10-22T13:53:34.000Z | 2022-03-22T22:22:55.000Z | mirage/libs/bt_utils/hciconfig.py | epablosensei/mirage | 3c0d2fb0f0e570356e7126c999e83e0256920420 | [
"MIT"
] | 25 | 2019-11-15T12:13:48.000Z | 2021-12-22T00:21:15.000Z | from fcntl import ioctl
import socket
class HCIConfig(object):
'''
This class allows to easily configure an HCI Interface.
'''
@staticmethod
def down(index):
'''
This class method stops an HCI interface.
Its role is equivalent to the following command : ``hciconfig hci<index> down``
:param index: index of the HCI interface to stop
:type index: integer
:Example:
>>> HCIConfig.down(0)
'''
try:
sock = socket.socket(31, socket.SOCK_RAW, 1)
ioctl(sock.fileno(), 0x400448ca, index)
sock.close()
except IOError:
return False
return True
@staticmethod
def reset(index):
'''
This class method resets an HCI interface.
Its role is equivalent to the following command : ``hciconfig hci<index> reset``
:param index: index of the HCI interface to reset
:type index: integer
:Example:
>>> HCIConfig.reset(0)
'''
try:
sock = socket.socket(31, socket.SOCK_RAW, index)
ioctl(sock.fileno(), 0x400448cb, 0)
sock.close()
except IOError:
return False
return True
@staticmethod
def up(index):
'''
This class method starts an HCI interface.
Its role is equivalent to the following command : ``hciconfig hci<index> up``
:param index: index of the HCI interface to start
:type index: integer
:Example:
>>> HCIConfig.up(0)
'''
try:
sock = socket.socket(31, socket.SOCK_RAW, index)
ioctl(sock.fileno(), 0x400448c9, 0)
sock.close()
except IOError:
return False
return True
| 19.84 | 82 | 0.677419 | from fcntl import ioctl
import socket
class HCIConfig(object):
@staticmethod
def down(index):
try:
sock = socket.socket(31, socket.SOCK_RAW, 1)
ioctl(sock.fileno(), 0x400448ca, index)
sock.close()
except IOError:
return False
return True
@staticmethod
def reset(index):
try:
sock = socket.socket(31, socket.SOCK_RAW, index)
ioctl(sock.fileno(), 0x400448cb, 0)
sock.close()
except IOError:
return False
return True
@staticmethod
def up(index):
try:
sock = socket.socket(31, socket.SOCK_RAW, index)
ioctl(sock.fileno(), 0x400448c9, 0)
sock.close()
except IOError:
return False
return True
| true | true |
f7221f016309edf12b6b55dbb2027c830de5a1e1 | 1,581 | py | Python | vue_backend/webapi/migrations/0002_auto_20201117_0251.py | hanson190505/coteam | 8bd01f4edc2a0b2a65dc18d68e36efb11cbdf576 | [
"MIT"
] | 1 | 2021-03-18T17:04:52.000Z | 2021-03-18T17:04:52.000Z | vue_backend/webapi/migrations/0002_auto_20201117_0251.py | hanson190505/coteam | 8bd01f4edc2a0b2a65dc18d68e36efb11cbdf576 | [
"MIT"
] | 11 | 2020-04-03T04:16:24.000Z | 2022-03-26T10:36:49.000Z | vue_backend/webapi/migrations/0002_auto_20201117_0251.py | hanson190505/coteam | 8bd01f4edc2a0b2a65dc18d68e36efb11cbdf576 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-11-17 02:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapi', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='products',
name='capacities',
field=models.CharField(default='custom', max_length=128, verbose_name='容量'),
),
migrations.AddField(
model_name='products',
name='imprint_location',
field=models.CharField(default='custom', max_length=64, verbose_name='logo位置'),
),
migrations.AddField(
model_name='products',
name='imprint_methods',
field=models.CharField(default='custom', max_length=128, verbose_name='logo工艺'),
),
migrations.AddField(
model_name='products',
name='imprint_size',
field=models.CharField(default='custom', max_length=128, verbose_name='logo尺寸'),
),
migrations.AddField(
model_name='products',
name='material',
field=models.CharField(default='custom', max_length=64, verbose_name='材质'),
),
migrations.AddField(
model_name='products',
name='moq',
field=models.IntegerField(default=100, verbose_name='起订量'),
),
migrations.AlterField(
model_name='products',
name='pro_color',
field=models.CharField(blank=True, max_length=1024, verbose_name='产品颜色'),
),
]
| 32.265306 | 92 | 0.577483 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapi', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='products',
name='capacities',
field=models.CharField(default='custom', max_length=128, verbose_name='容量'),
),
migrations.AddField(
model_name='products',
name='imprint_location',
field=models.CharField(default='custom', max_length=64, verbose_name='logo位置'),
),
migrations.AddField(
model_name='products',
name='imprint_methods',
field=models.CharField(default='custom', max_length=128, verbose_name='logo工艺'),
),
migrations.AddField(
model_name='products',
name='imprint_size',
field=models.CharField(default='custom', max_length=128, verbose_name='logo尺寸'),
),
migrations.AddField(
model_name='products',
name='material',
field=models.CharField(default='custom', max_length=64, verbose_name='材质'),
),
migrations.AddField(
model_name='products',
name='moq',
field=models.IntegerField(default=100, verbose_name='起订量'),
),
migrations.AlterField(
model_name='products',
name='pro_color',
field=models.CharField(blank=True, max_length=1024, verbose_name='产品颜色'),
),
]
| true | true |
f7221f4199a21ad48a1e78d734c001ee35792cf4 | 9,286 | py | Python | TrafficMan/admin.py | mizunashi-sh/TrafficMan-DB | d02098dc86f7d126a79a67fc5bbf2140544fb187 | [
"BSD-3-Clause"
] | 5 | 2021-06-22T08:56:19.000Z | 2022-01-14T13:18:58.000Z | TrafficMan/admin.py | mizunashi-sh/TrafficMan-DB | d02098dc86f7d126a79a67fc5bbf2140544fb187 | [
"BSD-3-Clause"
] | null | null | null | TrafficMan/admin.py | mizunashi-sh/TrafficMan-DB | d02098dc86f7d126a79a67fc5bbf2140544fb187 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User, Group
from .models import Vehicle
from .models import VehicleLicense
from .models import Violation
from .models import UserProfile
from .models import DriverLicense
from .models import Education
from .models import OwnerVehicleView
from .models import ExceededViolationView
from .models import UnprocessedViolationView
from .models import ViolationProcessRecordView
admin.site.site_header = '道路交通违章信息管理系统控制台'
admin.site.site_title = "道路交通违章信息管理系统控制台"
class UserInline(admin.StackedInline):
model = UserProfile
can_delete = False
class UserAdmin(BaseUserAdmin):
inlines = (UserInline,)
class EducationInline(admin.TabularInline):
model = Education
can_delete = False
readonly_fields = ('driver', 'create_time')
def has_add_permission(self, request, obj):
return False
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
return False
class VehicleLicenseInline(admin.StackedInline):
model = VehicleLicense
def has_add_permission(self, request, obj):
return True
def has_change_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
class ViolationInlineForVehicle(admin.TabularInline):
model = Violation
can_delete = False
show_change_link = True
fields = ('driver', 'date', 'location', 'point_minus', 'fine', 'deadline', 'is_processed')
readonly_fields = ('driver', 'date', 'location', 'point_minus', 'fine', 'deadline', 'is_processed')
extra = 0
def has_add_permission(self, request, obj):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class VehicleAdmin(admin.ModelAdmin):
list_display = (
'engine_id', 'plate_number', 'brand', 'manufacture_model', 'owner', 'vehicle_type', 'manufacture_date',
'status')
search_fields = ('plate_number', 'engine_id', 'brand', 'manufacture_model', 'owner__name', 'owner__identity')
list_filter = ('vehicle_type', 'status')
autocomplete_fields = ('owner',)
inlines = (VehicleLicenseInline, ViolationInlineForVehicle)
def has_add_permission(self, request):
return True
def has_change_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser:
return True
return False
class DriverLicenseInline(admin.StackedInline):
model = DriverLicense
can_delete = False
def has_add_permission(self, request, obj):
return True
def has_change_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
class VehicleInline(admin.TabularInline):
model = Vehicle
can_delete = False
show_change_link = True
fields = ('plate_number', 'engine_id', 'brand', 'manufacture_model')
readonly_fields = ('plate_number', 'engine_id', 'brand', 'manufacture_model')
extra = 0
def has_add_permission(self, request, obj):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class ViolationInlineForUser(admin.TabularInline):
model = Violation
can_delete = False
show_change_link = True
fields = ('vehicle', 'date', 'location', 'point_minus', 'fine', 'deadline', 'is_processed')
readonly_fields = ('vehicle', 'date', 'location', 'point_minus', 'fine', 'deadline', 'is_processed')
extra = 0
def has_add_permission(self, request, obj):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class UserProfileAdmin(admin.ModelAdmin):
search_fields = ('name', 'identity')
list_display = ('name', 'identity', 'gender', 'mobile')
autocomplete_fields = ('user',)
inlines = [DriverLicenseInline, EducationInline, VehicleInline, ViolationInlineForUser]
def has_add_permission(self, request):
return True
def has_change_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser:
return True
return False
class ViolationAdmin(admin.ModelAdmin):
list_filter = ('is_processed',)
list_display = ('id', 'vehicle', 'driver', 'date', 'type', 'deadline', 'is_processed')
search_fields = (
'id', 'type', 'area', 'location', 'vehicle__plate_number', 'driver__name', 'driver__identity',
'vehicle__owner__name', 'vehicle__owner__identity')
autocomplete_fields = ('vehicle', 'driver',)
def has_add_permission(self, request):
return True
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
class OwnerVehicleAdmin(admin.ModelAdmin):
list_display = ('plate_number', 'name', 'identity', 'brand', 'manufacture_model', 'status')
search_fields = (
'identity', 'name', 'brand', 'manufacture_model', 'vehicle_type', 'plate_number')
list_filter = ('status', 'vehicle_type')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class ExceededViolationAdmin(admin.ModelAdmin):
search_fields = (
'id', 'type', 'location', 'plate_number', 'driver_name', 'driver_id', 'owner_name', 'owner_id')
list_display = (
'id', 'plate_number', 'date', 'type', 'fine', 'deadline', 'driver_name', 'driver_id', 'owner_name', 'owner_id')
readonly_fields = (
'id', 'date', 'location', 'type', 'deadline', 'fine', 'plate_number', 'driver_id', 'driver_name', 'owner_id',
'owner_name')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class UnprocessedViolationAdmin(admin.ModelAdmin):
search_fields = (
'id', 'type', 'area', 'plate_number', 'driver_name', 'driver_id', 'owner_name', 'owner_id')
list_display = (
'id', 'plate_number', 'date', 'type', 'fine', 'deadline', 'driver_name', 'driver_id', 'owner_name', 'owner_id')
readonly_fields = (
'id', 'date', 'area', 'type', 'deadline', 'fine', 'plate_number', 'driver_id', 'driver_name', 'owner_id',
'owner_name')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class ViolationProcessRecordAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'type', 'plate_number', 'driver_id', 'deadline', 'process_time')
search_fields = ('id', 'type', 'area', 'location', 'plate_number', 'driver_id')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_view_permission(self, request, obj=None):
return True
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(OwnerVehicleView, OwnerVehicleAdmin)
admin.site.register(ExceededViolationView, ExceededViolationAdmin)
admin.site.register(UnprocessedViolationView, UnprocessedViolationAdmin)
admin.site.register(ViolationProcessRecordView, ViolationProcessRecordAdmin)
admin.site.register(Vehicle, VehicleAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Violation, ViolationAdmin)
| 32.355401 | 119 | 0.682317 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User, Group
from .models import Vehicle
from .models import VehicleLicense
from .models import Violation
from .models import UserProfile
from .models import DriverLicense
from .models import Education
from .models import OwnerVehicleView
from .models import ExceededViolationView
from .models import UnprocessedViolationView
from .models import ViolationProcessRecordView
admin.site.site_header = '道路交通违章信息管理系统控制台'
admin.site.site_title = "道路交通违章信息管理系统控制台"
class UserInline(admin.StackedInline):
model = UserProfile
can_delete = False
class UserAdmin(BaseUserAdmin):
inlines = (UserInline,)
class EducationInline(admin.TabularInline):
model = Education
can_delete = False
readonly_fields = ('driver', 'create_time')
def has_add_permission(self, request, obj):
return False
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
return False
class VehicleLicenseInline(admin.StackedInline):
model = VehicleLicense
def has_add_permission(self, request, obj):
return True
def has_change_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
class ViolationInlineForVehicle(admin.TabularInline):
model = Violation
can_delete = False
show_change_link = True
fields = ('driver', 'date', 'location', 'point_minus', 'fine', 'deadline', 'is_processed')
readonly_fields = ('driver', 'date', 'location', 'point_minus', 'fine', 'deadline', 'is_processed')
extra = 0
def has_add_permission(self, request, obj):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class VehicleAdmin(admin.ModelAdmin):
list_display = (
'engine_id', 'plate_number', 'brand', 'manufacture_model', 'owner', 'vehicle_type', 'manufacture_date',
'status')
search_fields = ('plate_number', 'engine_id', 'brand', 'manufacture_model', 'owner__name', 'owner__identity')
list_filter = ('vehicle_type', 'status')
autocomplete_fields = ('owner',)
inlines = (VehicleLicenseInline, ViolationInlineForVehicle)
def has_add_permission(self, request):
return True
def has_change_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser:
return True
return False
class DriverLicenseInline(admin.StackedInline):
model = DriverLicense
can_delete = False
def has_add_permission(self, request, obj):
return True
def has_change_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
class VehicleInline(admin.TabularInline):
model = Vehicle
can_delete = False
show_change_link = True
fields = ('plate_number', 'engine_id', 'brand', 'manufacture_model')
readonly_fields = ('plate_number', 'engine_id', 'brand', 'manufacture_model')
extra = 0
def has_add_permission(self, request, obj):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class ViolationInlineForUser(admin.TabularInline):
model = Violation
can_delete = False
show_change_link = True
fields = ('vehicle', 'date', 'location', 'point_minus', 'fine', 'deadline', 'is_processed')
readonly_fields = ('vehicle', 'date', 'location', 'point_minus', 'fine', 'deadline', 'is_processed')
extra = 0
def has_add_permission(self, request, obj):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class UserProfileAdmin(admin.ModelAdmin):
search_fields = ('name', 'identity')
list_display = ('name', 'identity', 'gender', 'mobile')
autocomplete_fields = ('user',)
inlines = [DriverLicenseInline, EducationInline, VehicleInline, ViolationInlineForUser]
def has_add_permission(self, request):
return True
def has_change_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser:
return True
return False
class ViolationAdmin(admin.ModelAdmin):
list_filter = ('is_processed',)
list_display = ('id', 'vehicle', 'driver', 'date', 'type', 'deadline', 'is_processed')
search_fields = (
'id', 'type', 'area', 'location', 'vehicle__plate_number', 'driver__name', 'driver__identity',
'vehicle__owner__name', 'vehicle__owner__identity')
autocomplete_fields = ('vehicle', 'driver',)
def has_add_permission(self, request):
return True
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser:
return True
user = User.objects.get(id=request.user.id)
groups = user.groups.all().values_list('name', flat=True)
if 'superior_staff' in groups:
return True
return False
class OwnerVehicleAdmin(admin.ModelAdmin):
list_display = ('plate_number', 'name', 'identity', 'brand', 'manufacture_model', 'status')
search_fields = (
'identity', 'name', 'brand', 'manufacture_model', 'vehicle_type', 'plate_number')
list_filter = ('status', 'vehicle_type')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class ExceededViolationAdmin(admin.ModelAdmin):
search_fields = (
'id', 'type', 'location', 'plate_number', 'driver_name', 'driver_id', 'owner_name', 'owner_id')
list_display = (
'id', 'plate_number', 'date', 'type', 'fine', 'deadline', 'driver_name', 'driver_id', 'owner_name', 'owner_id')
readonly_fields = (
'id', 'date', 'location', 'type', 'deadline', 'fine', 'plate_number', 'driver_id', 'driver_name', 'owner_id',
'owner_name')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class UnprocessedViolationAdmin(admin.ModelAdmin):
search_fields = (
'id', 'type', 'area', 'plate_number', 'driver_name', 'driver_id', 'owner_name', 'owner_id')
list_display = (
'id', 'plate_number', 'date', 'type', 'fine', 'deadline', 'driver_name', 'driver_id', 'owner_name', 'owner_id')
readonly_fields = (
'id', 'date', 'area', 'type', 'deadline', 'fine', 'plate_number', 'driver_id', 'driver_name', 'owner_id',
'owner_name')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class ViolationProcessRecordAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'type', 'plate_number', 'driver_id', 'deadline', 'process_time')
search_fields = ('id', 'type', 'area', 'location', 'plate_number', 'driver_id')
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_view_permission(self, request, obj=None):
return True
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(OwnerVehicleView, OwnerVehicleAdmin)
admin.site.register(ExceededViolationView, ExceededViolationAdmin)
admin.site.register(UnprocessedViolationView, UnprocessedViolationAdmin)
admin.site.register(ViolationProcessRecordView, ViolationProcessRecordAdmin)
admin.site.register(Vehicle, VehicleAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Violation, ViolationAdmin)
| true | true |
f722228558d4f889610e571fcd4e2f7ff014bf26 | 2,803 | py | Python | test/functional/keypool-topup.py | GoStartupsLtd/hydra-core | 293c20204be9eb04e491420aa4c94b6c2adf6757 | [
"MIT"
] | 18 | 2021-02-11T16:36:38.000Z | 2021-12-15T11:33:14.000Z | test/functional/keypool-topup.py | GoStartupsLtd/hydra-core | 293c20204be9eb04e491420aa4c94b6c2adf6757 | [
"MIT"
] | 10 | 2021-01-17T05:57:32.000Z | 2022-03-03T12:49:32.000Z | test/functional/keypool-topup.py | GoStartupsLtd/hydra-core | 293c20204be9eb04e491420aa4c94b6c2adf6757 | [
"MIT"
] | 3 | 2021-08-23T05:29:30.000Z | 2022-03-25T20:18:00.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_blocks,
)
from test_framework.qtumconfig import COINBASE_MATURITY
class KeypoolRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=100', '-keypoolmin=20']]
def run_test(self):
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(COINBASE_MATURITY+1)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Generate keys for wallet")
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
assert_equal(self.nodes[1].getbalance(), 15)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
assert_equal(self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['hdkeypath'], "m/88'/0'/111'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| 36.881579 | 164 | 0.686051 |
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_blocks,
)
from test_framework.qtumconfig import COINBASE_MATURITY
class KeypoolRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=100', '-keypoolmin=20']]
def run_test(self):
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(COINBASE_MATURITY+1)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Generate keys for wallet")
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
assert_equal(self.nodes[1].getbalance(), 15)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
assert_equal(self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['hdkeypath'], "m/88'/0'/111'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| true | true |
f7222360e95da6f77246679240f78ac6c5f9ef4b | 105,292 | py | Python | bin/cqlsh.py | shalinigr/CASSANDRA-13123 | 0f215e119fe25c8eec725edff7cf711b6f1395fe | [
"Apache-2.0"
] | null | null | null | bin/cqlsh.py | shalinigr/CASSANDRA-13123 | 0f215e119fe25c8eec725edff7cf711b6f1395fe | [
"Apache-2.0"
] | null | null | null | bin/cqlsh.py | shalinigr/CASSANDRA-13123 | 0f215e119fe25c8eec725edff7cf711b6f1395fe | [
"Apache-2.0"
] | 1 | 2020-06-14T20:57:52.000Z | 2020-06-14T20:57:52.000Z | #!/bin/sh
# -*- mode: Python -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":"
# bash code here; finds a suitable python interpreter and execs this file.
# prefer unqualified "python" if suitable:
python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion < 0x03000000))' 2>/dev/null \
&& exec python "$0" "$@"
for pyver in 2.7; do
which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@"
done
echo "No appropriate python interpreter found." >&2
exit 1
":"""
from __future__ import with_statement
import cmd
import codecs
import ConfigParser
import csv
import getpass
import optparse
import os
import platform
import sys
import traceback
import warnings
import webbrowser
from StringIO import StringIO
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
sys.exit("\nCQL Shell supports only Python 2.7\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001' # Win utf-8 variant
description = "CQL Shell for Apache Cassandra"
version = "5.0.1"
readline = None
try:
# check if tty first, cause readline doesn't check, and only cares
# about $TERM. we don't want the funky escape code stuff to be
# output if not a tty.
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.0.html'
# default location of local CQL.html
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
# default location of local CQL.html
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
# fallback to package file
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
# fallback to online version
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
# On Linux, the Python webbrowser module uses the 'xdg-open' executable
# to open a file/URL. But that only works, if the current session has been
# opened from _within_ a desktop environment. I.e. 'xdg-open' will fail,
# if the session's been opened via ssh to a remote box.
#
# Use 'python' to get some information about the detected browsers.
# >>> import webbrowser
# >>> webbrowser._tryorder
# >>> webbrowser._browser
#
if len(webbrowser._tryorder) == 0:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled libs for python-cql and thrift, if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError, e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, DateTimeFormat,
format_by_type, format_value_utype,
formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_CQLVER = '3.4.0'
DEFAULT_PROTOCOL_VERSION = 4
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/2/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option("--encoding", help="Specify a non-default encoding for output." +
" (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option('--cqlversion', default=DEFAULT_CQLVER,
help='Specify a particular CQL version (default: %default).'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print '\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR)
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print '\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print '\nWarning: cqlshrc config files were found at both the old location (%s) and \
the new location (%s), the old config file will not be migrated to the new \
location, and the new location will be used for now. You should manually \
consolidate the config files at the new location and remove the old file.' \
% (OLD_CONFIG_FILE, CONFIG_FILE)
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
# we want the cql parser to understand our cqlsh-specific commands too
my_commands_ending_with_newline = (
'help',
'?',
'consistency',
'serial',
'describe',
'desc',
'show',
'source',
'capture',
'login',
'debug',
'tracing',
'expand',
'paging',
'exit',
'quit',
'clear',
'cls'
)
cqlsh_syntax_completers = []
def cqlsh_syntax_completer(rulename, termname):
def registrator(f):
cqlsh_syntax_completers.append((rulename, termname, f))
return f
return registrator
cqlsh_extra_syntax_rules = r'''
<cqlshCommand> ::= <CQL_Statement>
| <specialCommand> ( ";" | "\n" )
;
<specialCommand> ::= <describeCommand>
| <consistencyCommand>
| <serialConsistencyCommand>
| <showCommand>
| <sourceCommand>
| <captureCommand>
| <copyCommand>
| <loginCommand>
| <debugCommand>
| <helpCommand>
| <tracingCommand>
| <expandCommand>
| <exitCommand>
| <pagingCommand>
| <clearCommand>
;
<describeCommand> ::= ( "DESCRIBE" | "DESC" )
( "FUNCTIONS"
| "FUNCTION" udf=<anyFunctionName>
| "AGGREGATES"
| "AGGREGATE" uda=<userAggregateName>
| "KEYSPACES"
| "KEYSPACE" ksname=<keyspaceName>?
| ( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
| "INDEX" idx=<indexName>
| "MATERIALIZED" "VIEW" mv=<materializedViewName>
| ( "COLUMNFAMILIES" | "TABLES" )
| "FULL"? "SCHEMA"
| "CLUSTER"
| "TYPES"
| "TYPE" ut=<userTypeName>
| (ksname=<keyspaceName> | cf=<columnFamilyName> | idx=<indexName> | mv=<materializedViewName>))
;
<consistencyCommand> ::= "CONSISTENCY" ( level=<consistencyLevel> )?
;
<consistencyLevel> ::= "ANY"
| "ONE"
| "TWO"
| "THREE"
| "QUORUM"
| "ALL"
| "LOCAL_QUORUM"
| "EACH_QUORUM"
| "SERIAL"
| "LOCAL_SERIAL"
| "LOCAL_ONE"
;
<serialConsistencyCommand> ::= "SERIAL" "CONSISTENCY" ( level=<serialConsistencyLevel> )?
;
<serialConsistencyLevel> ::= "SERIAL"
| "LOCAL_SERIAL"
;
<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> )
;
<sourceCommand> ::= "SOURCE" fname=<stringLiteral>
;
<captureCommand> ::= "CAPTURE" ( fname=( <stringLiteral> | "OFF" ) )?
;
<copyCommand> ::= "COPY" cf=<columnFamilyName>
( "(" [colnames]=<colname> ( "," [colnames]=<colname> )* ")" )?
( dir="FROM" ( fname=<stringLiteral> | "STDIN" )
| dir="TO" ( fname=<stringLiteral> | "STDOUT" ) )
( "WITH" <copyOption> ( "AND" <copyOption> )* )?
;
<copyOption> ::= [optnames]=(<identifier>|<reserved_identifier>) "=" [optvals]=<copyOptionVal>
;
<copyOptionVal> ::= <identifier>
| <reserved_identifier>
| <term>
;
# avoiding just "DEBUG" so that this rule doesn't get treated as a terminal
<debugCommand> ::= "DEBUG" "THINGS"?
;
<helpCommand> ::= ( "HELP" | "?" ) [topic]=( /[a-z_]*/ )*
;
<tracingCommand> ::= "TRACING" ( switch=( "ON" | "OFF" ) )?
;
<expandCommand> ::= "EXPAND" ( switch=( "ON" | "OFF" ) )?
;
<pagingCommand> ::= "PAGING" ( switch=( "ON" | "OFF" | /[0-9]+/) )?
;
<loginCommand> ::= "LOGIN" username=<username> (password=<stringLiteral>)?
;
<exitCommand> ::= "exit" | "quit"
;
<clearCommand> ::= "CLEAR" | "CLS"
;
<qmark> ::= "?" ;
'''
@cqlsh_syntax_completer('helpCommand', 'topic')
def complete_help(ctxt, cqlsh):
return sorted([t.upper() for t in cqldocs.get_help_topics() + cqlsh.get_help_topics()])
def complete_source_quoted_filename(ctxt, cqlsh):
partial_path = ctxt.get_binding('partial', '')
head, tail = os.path.split(partial_path)
exhead = os.path.expanduser(head)
try:
contents = os.listdir(exhead or '.')
except OSError:
return ()
matches = filter(lambda f: f.startswith(tail), contents)
annotated = []
for f in matches:
match = os.path.join(head, f)
if os.path.isdir(os.path.join(exhead, f)):
match += '/'
annotated.append(match)
return annotated
cqlsh_syntax_completer('sourceCommand', 'fname')(complete_source_quoted_filename)
cqlsh_syntax_completer('captureCommand', 'fname')(complete_source_quoted_filename)
@cqlsh_syntax_completer('copyCommand', 'fname')
def copy_fname_completer(ctxt, cqlsh):
lasttype = ctxt.get_binding('*LASTTYPE*')
if lasttype == 'unclosedString':
return complete_source_quoted_filename(ctxt, cqlsh)
partial_path = ctxt.get_binding('partial')
if partial_path == '':
return ["'"]
return ()
@cqlsh_syntax_completer('copyCommand', 'colnames')
def complete_copy_column_names(ctxt, cqlsh):
existcols = map(cqlsh.cql_unprotect_name, ctxt.get_binding('colnames', ()))
ks = cqlsh.cql_unprotect_name(ctxt.get_binding('ksname', None))
cf = cqlsh.cql_unprotect_name(ctxt.get_binding('cfname'))
colnames = cqlsh.get_column_names(ks, cf)
if len(existcols) == 0:
return [colnames[0]]
return set(colnames[1:]) - set(existcols)
COPY_COMMON_OPTIONS = ['DELIMITER', 'QUOTE', 'ESCAPE', 'HEADER', 'NULL', 'DATETIMEFORMAT',
'MAXATTEMPTS', 'REPORTFREQUENCY', 'DECIMALSEP', 'THOUSANDSSEP', 'BOOLSTYLE',
'NUMPROCESSES', 'CONFIGFILE', 'RATEFILE']
COPY_FROM_OPTIONS = ['CHUNKSIZE', 'INGESTRATE', 'MAXBATCHSIZE', 'MINBATCHSIZE', 'MAXROWS',
'SKIPROWS', 'SKIPCOLS', 'MAXPARSEERRORS', 'MAXINSERTERRORS', 'ERRFILE', 'PREPAREDSTATEMENTS']
COPY_TO_OPTIONS = ['ENCODING', 'PAGESIZE', 'PAGETIMEOUT', 'BEGINTOKEN', 'ENDTOKEN', 'MAXOUTPUTSIZE', 'MAXREQUESTS']
@cqlsh_syntax_completer('copyOption', 'optnames')
def complete_copy_options(ctxt, cqlsh):
optnames = map(str.upper, ctxt.get_binding('optnames', ()))
direction = ctxt.get_binding('dir').upper()
if direction == 'FROM':
opts = set(COPY_COMMON_OPTIONS + COPY_FROM_OPTIONS) - set(optnames)
elif direction == 'TO':
opts = set(COPY_COMMON_OPTIONS + COPY_TO_OPTIONS) - set(optnames)
return opts
@cqlsh_syntax_completer('copyOption', 'optvals')
def complete_copy_opt_values(ctxt, cqlsh):
optnames = ctxt.get_binding('optnames', ())
lastopt = optnames[-1].lower()
if lastopt == 'header':
return ['true', 'false']
return [cqlhandling.Hint('<single_character_string>')]
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(map(int, ver_parts[0].split('.')) + [ver_parts[1]])
return ver, vertuple
def format_value(val, output_encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(type(val), val, output_encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
extend_cql_deserialization()
auto_format_udts()
def extend_cql_deserialization():
"""
The python driver returns BLOBs as string, but we expect them as bytearrays
the implementation of cassandra.cqltypes.BytesType.deserialize.
The deserializers package exists only when the driver has been compiled with cython extensions and
cassandra.deserializers.DesBytesType replaces cassandra.cqltypes.BytesType.deserialize.
DesBytesTypeByteArray is a fast deserializer that converts blobs into bytearrays but it was
only introduced recently (3.1.0). If it is available we use it, otherwise we remove
cassandra.deserializers.DesBytesType so that we fall back onto cassandra.cqltypes.BytesType.deserialize
just like in the case where no cython extensions are present.
"""
if hasattr(cassandra, 'deserializers'):
if hasattr(cassandra.deserializers, 'DesBytesTypeByteArray'):
cassandra.deserializers.DesBytesType = cassandra.deserializers.DesBytesTypeByteArray
else:
del cassandra.deserializers.DesBytesType
cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts))
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
def auto_format_udts():
# when we see a new user defined type, set up the shell formatting for it
udt_apply_params = cassandra.cqltypes.UserType.apply_parameters
def new_apply_params(cls, *args, **kwargs):
udt_class = udt_apply_params(*args, **kwargs)
formatter_for(udt_class.typename)(format_value_utype)
return udt_class
cassandra.cqltypes.UserType.udt_apply_parameters = classmethod(new_apply_params)
make_udt_class = cassandra.cqltypes.UserType.make_udt_class
def new_make_udt_class(cls, *args, **kwargs):
udt_class = make_udt_class(*args, **kwargs)
formatter_for(udt_class.tuple_type.__name__)(format_value_utype)
return udt_class
cassandra.cqltypes.UserType.make_udt_class = classmethod(new_make_udt_class)
class FrozenType(cassandra.cqltypes._ParameterizedType):
"""
Needed until the bundled python driver adds FrozenType.
"""
typename = "frozen"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
class Shell(cmd.Cmd):
custom_prompt = os.getenv('CQLSH_PROMPT', '')
if custom_prompt is not '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:%s> "
keyspace_continue_prompt = "%s ... "
show_line_nums = False
debug = False
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=DEFAULT_CQLVER, keyspace=None,
tracing_enabled=False, expand_enabled=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=DEFAULT_PROTOCOL_VERSION,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
if use_conn:
self.conn = use_conn
else:
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
protocol_version=protocol_version,
auth_provider=self.auth_provider,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout)
self.owns_connection = not use_conn
self.set_expanded_cql_version(cqlver)
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.current_keyspace = keyspace
self.display_timestamp_format = display_timestamp_format
self.display_nanotime_format = display_nanotime_format
self.display_date_format = display_date_format
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print 'Use HELP for help.'
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
return format_value(val, self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=self.display_float_precision, **kwargs)
except Exception, e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print "Connected to %s at %s:%d." % \
(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port)
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print "[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'protocol': result['native_protocol_version'],
'cql': result['cql_version'],
}
self.connection_versions = vers
def get_keyspace_names(self):
return map(str, self.conn.metadata.keyspaces.keys())
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).tables.keys())
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).views.keys())
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).indexes.keys())
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return [unicode(col) for col in layout.columns]
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return self.get_keyspace_meta(ksname).user_types.keys()
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
return [(field_name, field_type.cql_parameterized_type())
for field_name, field_type in zip(user_type.field_names, user_type.field_types)]
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).functions.values())
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).aggregates.values())
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname not in self.conn.metadata.keyspaces:
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
return self.conn.metadata.keyspaces[ksname]
def get_keyspaces(self):
return self.conn.metadata.keyspaces.values()
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index %r not found" % idxname)
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view %r not found" % viewname)
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("%r not found in keyspaces" % (ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in self.get_keyspace_meta(ksname).tables.values()
for trigger in table.triggers.values()]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt % self.current_keyspace, True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt % spaces)
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print "WARNING: pyreadline dependency missing. Install to enable tab completion."
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
def get_input_line(self, prompt=''):
if self.tty:
try:
self.lastcmd = raw_input(prompt).decode(self.encoding)
except UnicodeDecodeError:
self.lastcmd = ''
traceback.print_exc()
self.check_windows_encoding()
line = self.lastcmd + '\n'
else:
self.lastcmd = self.stdin.readline()
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS, cqlerr:
self.printerr(cqlerr.message.decode(encoding='utf-8'))
except KeyboardInterrupt:
self.reset_statement()
print
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError, e:
if self.show_line_nums:
self.printerr('Invalid syntax at char %d' % (e.charnum,))
else:
self.printerr('Invalid syntax at line %d, char %d'
% (e.linenum, e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' %s' % statementline)
self.printerr(' %s^' % (' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception, e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
# Concat multi-line statements and insert into history
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = srcstr.replace("\n", " ").rstrip()
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist.encode(self.encoding))
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
# successful complete parse
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# hey, maybe they know about some new syntax we don't. type
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception, err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS, err:
self.printerr(unicode(err.__class__.__name__) + u": " + err.message.decode(encoding='utf-8'))
except Exception:
import traceback
self.printerr(traceback.format_exc())
# Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689)
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
# CAS INSERT/UPDATE
self.writeresult("")
self.print_static_result(result.column_names, list(result), self.parse_for_update_meta(statement.query_string))
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
if result.has_more_pages and self.tty:
num_rows = 0
while True:
page = result.current_rows
if page:
num_rows += len(page)
self.print_static_result(result.column_names, page, table_meta)
if result.has_more_pages:
raw_input("---MORE---")
result.fetch_next_page()
else:
break
else:
rows = list(result)
num_rows = len(rows)
self.print_static_result(result.column_names, rows, table_meta)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, column_names, rows, table_meta):
if not column_names and not table_meta:
return
column_names = column_names or table_meta.columns.keys()
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not rows:
# print header only
self.print_formatted_result(formatted_names, None)
return
formatted_values = [map(self.myformat_value, row.values()) for row in rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values)
else:
self.print_formatted_result(formatted_names, formatted_values)
def print_formatted_result(self, formatted_names, formatted_values):
# determine column widths
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
# print header
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
# stop if there are no rows
if formatted_values is None:
self.writeresult("")
return
# print row data
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
# for each row returned, list all the column-value pairs
for row_id, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_id + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
# this shouldn't be needed
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "%s@%s" % (self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def print_recreate_keyspace(self, ksdef, out):
out.write(ksdef.export_as_string())
out.write("\n")
def print_recreate_columnfamily(self, ksname, cfname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given table.
Writes output to the given out stream.
"""
out.write(self.get_table_meta(ksname, cfname).export_as_string())
out.write("\n")
def print_recreate_index(self, ksname, idxname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given index.
Writes output to the given out stream.
"""
out.write(self.get_index_meta(ksname, idxname).export_as_string())
out.write("\n")
def print_recreate_materialized_view(self, ksname, viewname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given materialized view.
Writes output to the given out stream.
"""
out.write(self.get_view_meta(ksname, viewname).export_as_string())
out.write("\n")
def print_recreate_object(self, ks, name, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given object (ks, table or index).
Writes output to the given out stream.
"""
out.write(self.get_object_meta(ks, name).export_as_string())
out.write("\n")
def describe_keyspaces(self):
print
cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names()))
print
def describe_keyspace(self, ksname):
print
self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout)
print
def describe_columnfamily(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_columnfamily(ksname, cfname, sys.stdout)
print
def describe_index(self, ksname, idxname):
print
self.print_recreate_index(ksname, idxname, sys.stdout)
print
def describe_materialized_view(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_materialized_view(ksname, viewname, sys.stdout)
print
def describe_object(self, ks, name):
print
self.print_recreate_object(ks, name, sys.stdout)
print
def describe_columnfamilies(self, ksname):
print
if ksname is None:
for k in self.get_keyspaces():
name = protect_name(k.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name)))
print
else:
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname)))
print
def describe_functions(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.functions.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.functions.keys()))
print
def describe_function(self, ksname, functionname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
functions = filter(lambda f: f.name == functionname, ksmeta.functions.values())
if len(functions) == 0:
raise FunctionNotFound("User defined function %r not found" % functionname)
print "\n\n".join(func.export_as_string() for func in functions)
print
def describe_aggregates(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.aggregates.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.aggregates.keys()))
print
def describe_aggregate(self, ksname, aggregatename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
aggregates = filter(lambda f: f.name == aggregatename, ksmeta.aggregates.values())
if len(aggregates) == 0:
raise FunctionNotFound("User defined aggregate %r not found" % aggregatename)
print "\n\n".join(aggr.export_as_string() for aggr in aggregates)
print
def describe_usertypes(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.user_types.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.user_types.keys()))
print
def describe_usertype(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
try:
usertype = ksmeta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
print usertype.export_as_string()
print
def describe_cluster(self):
print '\nCluster: %s' % self.get_cluster_name()
p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.')
print 'Partitioner: %s\n' % p
# TODO: snitch?
# snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.')
# print 'Snitch: %s\n' % snitch
if self.current_keyspace is not None and self.current_keyspace != 'system':
print "Range ownership:"
ring = self.get_ring(self.current_keyspace)
for entry in ring.items():
print ' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]]))
print
def describe_schema(self, include_system=False):
print
for k in self.get_keyspaces():
if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES:
self.print_recreate_keyspace(k, sys.stdout)
print
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
what = parsed.matched[1][1].lower()
if what == 'functions':
self.describe_functions(self.current_keyspace)
elif what == 'function':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
functionname = self.cql_unprotect_name(parsed.get_binding('udfname'))
self.describe_function(ksname, functionname)
elif what == 'aggregates':
self.describe_aggregates(self.current_keyspace)
elif what == 'aggregate':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname'))
self.describe_aggregate(ksname, aggregatename)
elif what == 'keyspaces':
self.describe_keyspaces()
elif what == 'keyspace':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', ''))
if not ksname:
ksname = self.current_keyspace
if ksname is None:
self.printerr('Not in any keyspace.')
return
self.describe_keyspace(ksname)
elif what in ('columnfamily', 'table'):
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
self.describe_columnfamily(ks, cf)
elif what == 'index':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
idx = self.cql_unprotect_name(parsed.get_binding('idxname', None))
self.describe_index(ks, idx)
elif what == 'materialized' and parsed.matched[2][1].lower() == 'view':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
mv = self.cql_unprotect_name(parsed.get_binding('mvname'))
self.describe_materialized_view(ks, mv)
elif what in ('columnfamilies', 'tables'):
self.describe_columnfamilies(self.current_keyspace)
elif what == 'types':
self.describe_usertypes(self.current_keyspace)
elif what == 'type':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
ut = self.cql_unprotect_name(parsed.get_binding('utname'))
self.describe_usertype(ks, ut)
elif what == 'cluster':
self.describe_cluster()
elif what == 'schema':
self.describe_schema(False)
elif what == 'full' and parsed.matched[2][1].lower() == 'schema':
self.describe_schema(True)
elif what:
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname'))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('idxname', None))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('mvname', None))
self.describe_object(ks, name)
do_desc = do_describe
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = map(self.cql_unprotect_name, columns)
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = map(str.lower, parsed.get_binding('optnames', ()))
copyoptvals = map(self.cql_unprotect_value, parsed.get_binding('optvals', ()))
opts = dict(zip(copyoptnames, copyoptvals))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, DEFAULT_PROTOCOL_VERSION, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, DEFAULT_PROTOCOL_VERSION, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the versions of the CQL spec and the Thrift protocol that
the connected Cassandra instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError, e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout)
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print "Currently capturing query output to %r." % (self.query_out.name,)
else:
print "Currently not capturing query output."
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError, e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print 'Now capturing query output to %r.' % (fname,)
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level])
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Consistency level set to %s.' % (level.upper(),)
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level])
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Serial consistency level set to %s.' % (level.upper(),)
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if len(webbrowser._tryorder) == 0:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
elif self.browser is not None:
webbrowser.get(self.browser).open_new_tab(url)
else:
webbrowser.open_new_tab(url)
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print("Page size: {}".format(self.page_size))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, (unicode, str)):
text = unicode(text)
if isinstance(text, unicode):
text = text.encode(self.encoding)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print "%s is currently enabled. Use %s OFF to disable" \
% (self.description, self.command)
else:
print "%s is currently disabled. Use %s ON to enable." \
% (self.description, self.command)
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print 'Now %s is enabled' % (self.description,)
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print 'Disabled %s.' % (self.description,)
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except ConfigParser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except ConfigParser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def read_options(cmdlineargs, environment):
configs = ConfigParser.SafeConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = ConfigParser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
optvalues = optparse.Values()
optvalues.username = option_with_default(configs.get, 'authentication', 'username')
optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.file = None
optvalues.ssl = False
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', DEFAULT_CQLVER)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
else:
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError, e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, but local timezone could not be detected.\n" +
"Either install Python 'tzlocal' module for auto-detection or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
cqlver=options.cqlversion,
keyspace=options.keyspace,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS, e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported, e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
shell.cmdloop()
save_history()
batch_mode = options.file or options.execute
if batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
| 39.435206 | 195 | 0.619126 |
""":"
# bash code here; finds a suitable python interpreter and execs this file.
# prefer unqualified "python" if suitable:
python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion < 0x03000000))' 2>/dev/null \
&& exec python "$0" "$@"
for pyver in 2.7; do
which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@"
done
echo "No appropriate python interpreter found." >&2
exit 1
":"""
from __future__ import with_statement
import cmd
import codecs
import ConfigParser
import csv
import getpass
import optparse
import os
import platform
import sys
import traceback
import warnings
import webbrowser
from StringIO import StringIO
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
sys.exit("\nCQL Shell supports only Python 2.7\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001'
description = "CQL Shell for Apache Cassandra"
version = "5.0.1"
readline = None
try:
# about $TERM. we don't want the funky escape code stuff to be
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.0.html'
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
#
# Use 'python' to get some information about the detected browsers.
# >>> import webbrowser
# >>> webbrowser._tryorder
# >>> webbrowser._browser
#
if len(webbrowser._tryorder) == 0:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled libs for python-cql and thrift, if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError, e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, DateTimeFormat,
format_by_type, format_value_utype,
formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_CQLVER = '3.4.0'
DEFAULT_PROTOCOL_VERSION = 4
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/2/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option("--encoding", help="Specify a non-default encoding for output." +
" (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option('--cqlversion', default=DEFAULT_CQLVER,
help='Specify a particular CQL version (default: %default).'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print '\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR)
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print '\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print '\nWarning: cqlshrc config files were found at both the old location (%s) and \
the new location (%s), the old config file will not be migrated to the new \
location, and the new location will be used for now. You should manually \
consolidate the config files at the new location and remove the old file.' \
% (OLD_CONFIG_FILE, CONFIG_FILE)
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
# we want the cql parser to understand our cqlsh-specific commands too
my_commands_ending_with_newline = (
'help',
'?',
'consistency',
'serial',
'describe',
'desc',
'show',
'source',
'capture',
'login',
'debug',
'tracing',
'expand',
'paging',
'exit',
'quit',
'clear',
'cls'
)
cqlsh_syntax_completers = []
def cqlsh_syntax_completer(rulename, termname):
def registrator(f):
cqlsh_syntax_completers.append((rulename, termname, f))
return f
return registrator
cqlsh_extra_syntax_rules = r'''
<cqlshCommand> ::= <CQL_Statement>
| <specialCommand> ( ";" | "\n" )
;
<specialCommand> ::= <describeCommand>
| <consistencyCommand>
| <serialConsistencyCommand>
| <showCommand>
| <sourceCommand>
| <captureCommand>
| <copyCommand>
| <loginCommand>
| <debugCommand>
| <helpCommand>
| <tracingCommand>
| <expandCommand>
| <exitCommand>
| <pagingCommand>
| <clearCommand>
;
<describeCommand> ::= ( "DESCRIBE" | "DESC" )
( "FUNCTIONS"
| "FUNCTION" udf=<anyFunctionName>
| "AGGREGATES"
| "AGGREGATE" uda=<userAggregateName>
| "KEYSPACES"
| "KEYSPACE" ksname=<keyspaceName>?
| ( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
| "INDEX" idx=<indexName>
| "MATERIALIZED" "VIEW" mv=<materializedViewName>
| ( "COLUMNFAMILIES" | "TABLES" )
| "FULL"? "SCHEMA"
| "CLUSTER"
| "TYPES"
| "TYPE" ut=<userTypeName>
| (ksname=<keyspaceName> | cf=<columnFamilyName> | idx=<indexName> | mv=<materializedViewName>))
;
<consistencyCommand> ::= "CONSISTENCY" ( level=<consistencyLevel> )?
;
<consistencyLevel> ::= "ANY"
| "ONE"
| "TWO"
| "THREE"
| "QUORUM"
| "ALL"
| "LOCAL_QUORUM"
| "EACH_QUORUM"
| "SERIAL"
| "LOCAL_SERIAL"
| "LOCAL_ONE"
;
<serialConsistencyCommand> ::= "SERIAL" "CONSISTENCY" ( level=<serialConsistencyLevel> )?
;
<serialConsistencyLevel> ::= "SERIAL"
| "LOCAL_SERIAL"
;
<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> )
;
<sourceCommand> ::= "SOURCE" fname=<stringLiteral>
;
<captureCommand> ::= "CAPTURE" ( fname=( <stringLiteral> | "OFF" ) )?
;
<copyCommand> ::= "COPY" cf=<columnFamilyName>
( "(" [colnames]=<colname> ( "," [colnames]=<colname> )* ")" )?
( dir="FROM" ( fname=<stringLiteral> | "STDIN" )
| dir="TO" ( fname=<stringLiteral> | "STDOUT" ) )
( "WITH" <copyOption> ( "AND" <copyOption> )* )?
;
<copyOption> ::= [optnames]=(<identifier>|<reserved_identifier>) "=" [optvals]=<copyOptionVal>
;
<copyOptionVal> ::= <identifier>
| <reserved_identifier>
| <term>
;
# avoiding just "DEBUG" so that this rule doesn't get treated as a terminal
<debugCommand> ::= "DEBUG" "THINGS"?
;
<helpCommand> ::= ( "HELP" | "?" ) [topic]=( /[a-z_]*/ )*
;
<tracingCommand> ::= "TRACING" ( switch=( "ON" | "OFF" ) )?
;
<expandCommand> ::= "EXPAND" ( switch=( "ON" | "OFF" ) )?
;
<pagingCommand> ::= "PAGING" ( switch=( "ON" | "OFF" | /[0-9]+/) )?
;
<loginCommand> ::= "LOGIN" username=<username> (password=<stringLiteral>)?
;
<exitCommand> ::= "exit" | "quit"
;
<clearCommand> ::= "CLEAR" | "CLS"
;
<qmark> ::= "?" ;
'''
@cqlsh_syntax_completer('helpCommand', 'topic')
def complete_help(ctxt, cqlsh):
return sorted([t.upper() for t in cqldocs.get_help_topics() + cqlsh.get_help_topics()])
def complete_source_quoted_filename(ctxt, cqlsh):
partial_path = ctxt.get_binding('partial', '')
head, tail = os.path.split(partial_path)
exhead = os.path.expanduser(head)
try:
contents = os.listdir(exhead or '.')
except OSError:
return ()
matches = filter(lambda f: f.startswith(tail), contents)
annotated = []
for f in matches:
match = os.path.join(head, f)
if os.path.isdir(os.path.join(exhead, f)):
match += '/'
annotated.append(match)
return annotated
cqlsh_syntax_completer('sourceCommand', 'fname')(complete_source_quoted_filename)
cqlsh_syntax_completer('captureCommand', 'fname')(complete_source_quoted_filename)
@cqlsh_syntax_completer('copyCommand', 'fname')
def copy_fname_completer(ctxt, cqlsh):
lasttype = ctxt.get_binding('*LASTTYPE*')
if lasttype == 'unclosedString':
return complete_source_quoted_filename(ctxt, cqlsh)
partial_path = ctxt.get_binding('partial')
if partial_path == '':
return ["'"]
return ()
@cqlsh_syntax_completer('copyCommand', 'colnames')
def complete_copy_column_names(ctxt, cqlsh):
existcols = map(cqlsh.cql_unprotect_name, ctxt.get_binding('colnames', ()))
ks = cqlsh.cql_unprotect_name(ctxt.get_binding('ksname', None))
cf = cqlsh.cql_unprotect_name(ctxt.get_binding('cfname'))
colnames = cqlsh.get_column_names(ks, cf)
if len(existcols) == 0:
return [colnames[0]]
return set(colnames[1:]) - set(existcols)
COPY_COMMON_OPTIONS = ['DELIMITER', 'QUOTE', 'ESCAPE', 'HEADER', 'NULL', 'DATETIMEFORMAT',
'MAXATTEMPTS', 'REPORTFREQUENCY', 'DECIMALSEP', 'THOUSANDSSEP', 'BOOLSTYLE',
'NUMPROCESSES', 'CONFIGFILE', 'RATEFILE']
COPY_FROM_OPTIONS = ['CHUNKSIZE', 'INGESTRATE', 'MAXBATCHSIZE', 'MINBATCHSIZE', 'MAXROWS',
'SKIPROWS', 'SKIPCOLS', 'MAXPARSEERRORS', 'MAXINSERTERRORS', 'ERRFILE', 'PREPAREDSTATEMENTS']
COPY_TO_OPTIONS = ['ENCODING', 'PAGESIZE', 'PAGETIMEOUT', 'BEGINTOKEN', 'ENDTOKEN', 'MAXOUTPUTSIZE', 'MAXREQUESTS']
@cqlsh_syntax_completer('copyOption', 'optnames')
def complete_copy_options(ctxt, cqlsh):
optnames = map(str.upper, ctxt.get_binding('optnames', ()))
direction = ctxt.get_binding('dir').upper()
if direction == 'FROM':
opts = set(COPY_COMMON_OPTIONS + COPY_FROM_OPTIONS) - set(optnames)
elif direction == 'TO':
opts = set(COPY_COMMON_OPTIONS + COPY_TO_OPTIONS) - set(optnames)
return opts
@cqlsh_syntax_completer('copyOption', 'optvals')
def complete_copy_opt_values(ctxt, cqlsh):
optnames = ctxt.get_binding('optnames', ())
lastopt = optnames[-1].lower()
if lastopt == 'header':
return ['true', 'false']
return [cqlhandling.Hint('<single_character_string>')]
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(map(int, ver_parts[0].split('.')) + [ver_parts[1]])
return ver, vertuple
def format_value(val, output_encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(type(val), val, output_encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
extend_cql_deserialization()
auto_format_udts()
def extend_cql_deserialization():
"""
The python driver returns BLOBs as string, but we expect them as bytearrays
the implementation of cassandra.cqltypes.BytesType.deserialize.
The deserializers package exists only when the driver has been compiled with cython extensions and
cassandra.deserializers.DesBytesType replaces cassandra.cqltypes.BytesType.deserialize.
DesBytesTypeByteArray is a fast deserializer that converts blobs into bytearrays but it was
only introduced recently (3.1.0). If it is available we use it, otherwise we remove
cassandra.deserializers.DesBytesType so that we fall back onto cassandra.cqltypes.BytesType.deserialize
just like in the case where no cython extensions are present.
"""
if hasattr(cassandra, 'deserializers'):
if hasattr(cassandra.deserializers, 'DesBytesTypeByteArray'):
cassandra.deserializers.DesBytesType = cassandra.deserializers.DesBytesTypeByteArray
else:
del cassandra.deserializers.DesBytesType
cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts))
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
def auto_format_udts():
# when we see a new user defined type, set up the shell formatting for it
udt_apply_params = cassandra.cqltypes.UserType.apply_parameters
def new_apply_params(cls, *args, **kwargs):
udt_class = udt_apply_params(*args, **kwargs)
formatter_for(udt_class.typename)(format_value_utype)
return udt_class
cassandra.cqltypes.UserType.udt_apply_parameters = classmethod(new_apply_params)
make_udt_class = cassandra.cqltypes.UserType.make_udt_class
def new_make_udt_class(cls, *args, **kwargs):
udt_class = make_udt_class(*args, **kwargs)
formatter_for(udt_class.tuple_type.__name__)(format_value_utype)
return udt_class
cassandra.cqltypes.UserType.make_udt_class = classmethod(new_make_udt_class)
class FrozenType(cassandra.cqltypes._ParameterizedType):
"""
Needed until the bundled python driver adds FrozenType.
"""
typename = "frozen"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
class Shell(cmd.Cmd):
custom_prompt = os.getenv('CQLSH_PROMPT', '')
if custom_prompt is not '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:%s> "
keyspace_continue_prompt = "%s ... "
show_line_nums = False
debug = False
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=DEFAULT_CQLVER, keyspace=None,
tracing_enabled=False, expand_enabled=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=DEFAULT_PROTOCOL_VERSION,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
if use_conn:
self.conn = use_conn
else:
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
protocol_version=protocol_version,
auth_provider=self.auth_provider,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout)
self.owns_connection = not use_conn
self.set_expanded_cql_version(cqlver)
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.current_keyspace = keyspace
self.display_timestamp_format = display_timestamp_format
self.display_nanotime_format = display_nanotime_format
self.display_date_format = display_date_format
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print 'Use HELP for help.'
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
return format_value(val, self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=self.display_float_precision, **kwargs)
except Exception, e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print "Connected to %s at %s:%d." % \
(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port)
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print "[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'protocol': result['native_protocol_version'],
'cql': result['cql_version'],
}
self.connection_versions = vers
def get_keyspace_names(self):
return map(str, self.conn.metadata.keyspaces.keys())
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).tables.keys())
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).views.keys())
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).indexes.keys())
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return [unicode(col) for col in layout.columns]
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return self.get_keyspace_meta(ksname).user_types.keys()
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
return [(field_name, field_type.cql_parameterized_type())
for field_name, field_type in zip(user_type.field_names, user_type.field_types)]
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).functions.values())
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).aggregates.values())
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname not in self.conn.metadata.keyspaces:
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
return self.conn.metadata.keyspaces[ksname]
def get_keyspaces(self):
return self.conn.metadata.keyspaces.values()
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index %r not found" % idxname)
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view %r not found" % viewname)
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("%r not found in keyspaces" % (ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in self.get_keyspace_meta(ksname).tables.values()
for trigger in table.triggers.values()]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt % self.current_keyspace, True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt % spaces)
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print "WARNING: pyreadline dependency missing. Install to enable tab completion."
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
def get_input_line(self, prompt=''):
if self.tty:
try:
self.lastcmd = raw_input(prompt).decode(self.encoding)
except UnicodeDecodeError:
self.lastcmd = ''
traceback.print_exc()
self.check_windows_encoding()
line = self.lastcmd + '\n'
else:
self.lastcmd = self.stdin.readline()
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS, cqlerr:
self.printerr(cqlerr.message.decode(encoding='utf-8'))
except KeyboardInterrupt:
self.reset_statement()
print
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError, e:
if self.show_line_nums:
self.printerr('Invalid syntax at char %d' % (e.charnum,))
else:
self.printerr('Invalid syntax at line %d, char %d'
% (e.linenum, e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' %s' % statementline)
self.printerr(' %s^' % (' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception, e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = srcstr.replace("\n", " ").rstrip()
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist.encode(self.encoding))
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception, err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS, err:
self.printerr(unicode(err.__class__.__name__) + u": " + err.message.decode(encoding='utf-8'))
except Exception:
import traceback
self.printerr(traceback.format_exc())
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5)
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
self.writeresult("")
self.print_static_result(result.column_names, list(result), self.parse_for_update_meta(statement.query_string))
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
if result.has_more_pages and self.tty:
num_rows = 0
while True:
page = result.current_rows
if page:
num_rows += len(page)
self.print_static_result(result.column_names, page, table_meta)
if result.has_more_pages:
raw_input("---MORE---")
result.fetch_next_page()
else:
break
else:
rows = list(result)
num_rows = len(rows)
self.print_static_result(result.column_names, rows, table_meta)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, column_names, rows, table_meta):
if not column_names and not table_meta:
return
column_names = column_names or table_meta.columns.keys()
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not rows:
self.print_formatted_result(formatted_names, None)
return
formatted_values = [map(self.myformat_value, row.values()) for row in rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values)
else:
self.print_formatted_result(formatted_names, formatted_values)
def print_formatted_result(self, formatted_names, formatted_values):
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
if formatted_values is None:
self.writeresult("")
return
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
for row_id, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_id + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "%s@%s" % (self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def print_recreate_keyspace(self, ksdef, out):
out.write(ksdef.export_as_string())
out.write("\n")
def print_recreate_columnfamily(self, ksname, cfname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given table.
Writes output to the given out stream.
"""
out.write(self.get_table_meta(ksname, cfname).export_as_string())
out.write("\n")
def print_recreate_index(self, ksname, idxname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given index.
Writes output to the given out stream.
"""
out.write(self.get_index_meta(ksname, idxname).export_as_string())
out.write("\n")
def print_recreate_materialized_view(self, ksname, viewname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given materialized view.
Writes output to the given out stream.
"""
out.write(self.get_view_meta(ksname, viewname).export_as_string())
out.write("\n")
def print_recreate_object(self, ks, name, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given object (ks, table or index).
Writes output to the given out stream.
"""
out.write(self.get_object_meta(ks, name).export_as_string())
out.write("\n")
def describe_keyspaces(self):
print
cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names()))
print
def describe_keyspace(self, ksname):
print
self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout)
print
def describe_columnfamily(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_columnfamily(ksname, cfname, sys.stdout)
print
def describe_index(self, ksname, idxname):
print
self.print_recreate_index(ksname, idxname, sys.stdout)
print
def describe_materialized_view(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_materialized_view(ksname, viewname, sys.stdout)
print
def describe_object(self, ks, name):
print
self.print_recreate_object(ks, name, sys.stdout)
print
def describe_columnfamilies(self, ksname):
print
if ksname is None:
for k in self.get_keyspaces():
name = protect_name(k.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name)))
print
else:
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname)))
print
def describe_functions(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.functions.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.functions.keys()))
print
def describe_function(self, ksname, functionname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
functions = filter(lambda f: f.name == functionname, ksmeta.functions.values())
if len(functions) == 0:
raise FunctionNotFound("User defined function %r not found" % functionname)
print "\n\n".join(func.export_as_string() for func in functions)
print
def describe_aggregates(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.aggregates.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.aggregates.keys()))
print
def describe_aggregate(self, ksname, aggregatename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
aggregates = filter(lambda f: f.name == aggregatename, ksmeta.aggregates.values())
if len(aggregates) == 0:
raise FunctionNotFound("User defined aggregate %r not found" % aggregatename)
print "\n\n".join(aggr.export_as_string() for aggr in aggregates)
print
def describe_usertypes(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.user_types.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.user_types.keys()))
print
def describe_usertype(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
try:
usertype = ksmeta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
print usertype.export_as_string()
print
def describe_cluster(self):
print '\nCluster: %s' % self.get_cluster_name()
p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.')
print 'Partitioner: %s\n' % p
# TODO: snitch?
# snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.')
# print 'Snitch: %s\n' % snitch
if self.current_keyspace is not None and self.current_keyspace != 'system':
print "Range ownership:"
ring = self.get_ring(self.current_keyspace)
for entry in ring.items():
print ' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]]))
print
def describe_schema(self, include_system=False):
print
for k in self.get_keyspaces():
if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES:
self.print_recreate_keyspace(k, sys.stdout)
print
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
what = parsed.matched[1][1].lower()
if what == 'functions':
self.describe_functions(self.current_keyspace)
elif what == 'function':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
functionname = self.cql_unprotect_name(parsed.get_binding('udfname'))
self.describe_function(ksname, functionname)
elif what == 'aggregates':
self.describe_aggregates(self.current_keyspace)
elif what == 'aggregate':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname'))
self.describe_aggregate(ksname, aggregatename)
elif what == 'keyspaces':
self.describe_keyspaces()
elif what == 'keyspace':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', ''))
if not ksname:
ksname = self.current_keyspace
if ksname is None:
self.printerr('Not in any keyspace.')
return
self.describe_keyspace(ksname)
elif what in ('columnfamily', 'table'):
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
self.describe_columnfamily(ks, cf)
elif what == 'index':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
idx = self.cql_unprotect_name(parsed.get_binding('idxname', None))
self.describe_index(ks, idx)
elif what == 'materialized' and parsed.matched[2][1].lower() == 'view':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
mv = self.cql_unprotect_name(parsed.get_binding('mvname'))
self.describe_materialized_view(ks, mv)
elif what in ('columnfamilies', 'tables'):
self.describe_columnfamilies(self.current_keyspace)
elif what == 'types':
self.describe_usertypes(self.current_keyspace)
elif what == 'type':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
ut = self.cql_unprotect_name(parsed.get_binding('utname'))
self.describe_usertype(ks, ut)
elif what == 'cluster':
self.describe_cluster()
elif what == 'schema':
self.describe_schema(False)
elif what == 'full' and parsed.matched[2][1].lower() == 'schema':
self.describe_schema(True)
elif what:
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname'))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('idxname', None))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('mvname', None))
self.describe_object(ks, name)
do_desc = do_describe
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = map(self.cql_unprotect_name, columns)
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = map(str.lower, parsed.get_binding('optnames', ()))
copyoptvals = map(self.cql_unprotect_value, parsed.get_binding('optvals', ()))
opts = dict(zip(copyoptnames, copyoptvals))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, DEFAULT_PROTOCOL_VERSION, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, DEFAULT_PROTOCOL_VERSION, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the versions of the CQL spec and the Thrift protocol that
the connected Cassandra instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError, e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout)
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print "Currently capturing query output to %r." % (self.query_out.name,)
else:
print "Currently not capturing query output."
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError, e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print 'Now capturing query output to %r.' % (fname,)
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level])
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Consistency level set to %s.' % (level.upper(),)
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level])
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Serial consistency level set to %s.' % (level.upper(),)
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if len(webbrowser._tryorder) == 0:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
elif self.browser is not None:
webbrowser.get(self.browser).open_new_tab(url)
else:
webbrowser.open_new_tab(url)
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print("Page size: {}".format(self.page_size))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, (unicode, str)):
text = unicode(text)
if isinstance(text, unicode):
text = text.encode(self.encoding)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print "%s is currently enabled. Use %s OFF to disable" \
% (self.description, self.command)
else:
print "%s is currently disabled. Use %s ON to enable." \
% (self.description, self.command)
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print 'Now %s is enabled' % (self.description,)
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print 'Disabled %s.' % (self.description,)
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except ConfigParser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except ConfigParser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def read_options(cmdlineargs, environment):
configs = ConfigParser.SafeConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = ConfigParser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
optvalues = optparse.Values()
optvalues.username = option_with_default(configs.get, 'authentication', 'username')
optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.file = None
optvalues.ssl = False
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', DEFAULT_CQLVER)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
else:
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError, e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, but local timezone could not be detected.\n" +
"Either install Python 'tzlocal' module for auto-detection or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
cqlver=options.cqlversion,
keyspace=options.keyspace,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS, e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported, e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
shell.cmdloop()
save_history()
batch_mode = options.file or options.execute
if batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
| false | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.