code
stringlengths 1
199k
|
|---|
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
MessageFactory = MessageFactory('sng.sitecontent')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
|
"""
Problem 11
Data Structures : Trie, Bloom Filters
"""
import unittest
class UrlEncodingUsingTrie:
def __init__(self):
self._visited_paths = {}
@property
def visited(self):
return self._visited_paths
@visited.setter
def visited(self, link):
"""
:param link:
:type link: str
:return:
"""
link = list(link)
trie_node = self._visited_paths
for character in link:
if character in trie_node:
trie_node = trie_node[character]
else:
trie_node[character] = {}
trie_node = trie_node[character]
trie_node[link[-1]] = {'*': True}
class MyTestCase(unittest.TestCase):
def test_UrlEncodingUsingTrie(self):
test_link = 'www.a.co'
encoding = UrlEncodingUsingTrie()
encoding.visited = test_link
test_link = 'www.alpha.com'
encoding.visited = test_link
|
import requests
import re
import urllib
import logging
logger = logging.getLogger('pycanvas.BaseCanvasAPI')
class BaseCanvasAPI(object):
def __init__(self, instance_address, access_token, **kwargs):
self.instance_address = instance_address
self.access_token = access_token
logger.debug('Created new CanvasAPI client for instance: {}.'.format('self.instance_address'))
self.session = requests.Session()
self.session.headers.update({'Authorization': 'Bearer {}'.format(self.access_token)})
logger.debug('Using Authorization Token authentication method. Added token to headers: {}'.format('Authorization: Bearer {}'.format(self.access_token)))
self.rel_matcher = re.compile(r' ?rel="([a-z]+)"')
def uri_for(self, a):
return self.instance_address + a
def extract_data_from_response(self, response, data_key=None):
"""Given a response and an optional data_key should return a dictionary of data returned as part of the response."""
response_json_data = response.json()
# Seems to be two types of response, a dict with keys and then lists of data or a flat list data with no key.
if type(response_json_data) == list:
# Return the data
return response_json_data
elif type(response_json_data) == dict:
if data_key is None:
return response_json_data
else:
return response_json_data[data_key]
else:
raise CanvasAPIError(response)
def extract_pagination_links(self, response):
'''Given a wrapped_response from a Canvas API endpoint,
extract the pagination links from the response headers'''
try:
link_header = response.headers['Link']
except KeyError:
logger.warn('Unable to find the Link header. Unable to continue with pagination.')
return None
split_header = link_header.split(',')
exploded_split_header = [i.split(';') for i in split_header]
pagination_links = {}
for h in exploded_split_header:
link = h[0]
rel = h[1]
# Check that the link format is what we expect
if link.startswith('<') and link.endswith('>'):
link = link[1:-1]
else:
continue
# Extract the rel argument
m = self.rel_matcher.match(rel)
try:
rel = m.groups()[0]
except AttributeError:
# Match returned None, just skip.
continue
except IndexError:
# Matched but no groups returned
continue
pagination_links[rel] = link
return pagination_links
def has_pagination_links(self, response):
return 'Link' in response.headers
def depaginate(self, response, data_key=None):
logging.debug('Attempting to depaginate response from {}'.format(response.url))
all_data = []
this_data = self.extract_data_from_response(response, data_key=data_key)
if this_data is not None:
if type(this_data) == list:
all_data += this_data
else:
all_data.append(this_data)
if self.has_pagination_links(response):
pagination_links = self.extract_pagination_links(response)
while 'next' in pagination_links:
response = self.session.get(pagination_links['next'])
pagination_links = self.extract_pagination_links(response)
this_data = self.extract_data_from_response(response, data_key=data_key)
if this_data is not None:
if type(this_data) == list:
all_data += this_data
else:
all_data.append(this_data)
else:
logging.warn('Response from {} has no pagination links.'.format(response.url))
return all_data
def generic_request(self, method, uri,
all_pages=False,
data_key=None,
no_data=False,
do_not_process=False,
force_urlencode_data=False,
data=None,
params=None,
files=None,
single_item=False):
"""Generic Canvas Request Method."""
if not uri.startswith('http'):
uri = self.uri_for(uri)
if force_urlencode_data is True:
uri += '?' + urllib.urlencode(data)
assert method in ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS']
if method == 'GET':
response = self.session.get(uri, params=params)
elif method == 'POST':
response = self.session.post(uri, data=data, files=files)
elif method == 'PUT':
response = self.session.put(uri, data=data)
elif method == 'DELETE':
response = self.session.delete(uri, params=params)
elif method == 'HEAD':
response = self.session.head(uri, params=params)
elif method == 'OPTIONS':
response = self.session.options(uri, params=params)
response.raise_for_status()
if do_not_process is True:
return response
if no_data:
return response.status_code
if all_pages:
return self.depaginate(response, data_key)
if single_item:
r = response.json()
if data_key:
return r[data_key]
else:
return r
return response.json()
def _validate_enum(self, value, acceptable_values):
if not hasattr(value, '__iter__'):
if value not in acceptable_values:
raise ValueError('{} not in {}'.format(value, str(acceptable_values)))
else:
for v in value:
if v not in acceptable_values:
raise ValueError('{} not in {}'.format(value, str(acceptable_values)))
return value
def _validate_iso8601_string(self, value):
"""Return the value or raise a ValueError if it is not a string in ISO8601 format."""
ISO8601_REGEX = r'(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})([+-](\d{2})\:(\d{2})|Z)'
if re.match(ISO8601_REGEX, value):
return value
else:
raise ValueError('{} must be in ISO8601 format.'.format(value))
class CanvasAPIError(Exception):
def __init__(self, response):
self.response = response
def __unicode__(self):
return u'API Request Failed. Status: {} Content: {}'.format(self.response.status_code, self.response.content)
def __str__(self):
return 'API Request Failed. Status: {} Content: {}'.format(self.response.status_code, self.response.content)
class BaseModel(object):
pass
HTTPError = requests.HTTPError
|
import numpy as np
import os
import weight
from fpa import post_process_operation
from fpa.FPA import Wing
def main(wing, velocity, temperature, aoaarray):
"""
csvfile, number of cell, design cruise speed, ambient temperature
:param velocity:
:param temperature:
:param aoaarray:
:return:
"""
wing.set_temperature(temperature)
wing.set_velocity(velocity)
wing.calc_reynolds()
wing.set_aerodynamcis_data()
"""機体の特性を出す"""
CLarray = []
CDarray = []
for i in aoaarray:
print "calculating aerodynamics at alpha = {} [deg]".format(str(i))
wing.calc_lift_slope_and_zero_lift_array()
wing.calc_CL_Cdi_CD(i)
CLarray.append(wing.CL)
CDarray.append(wing.CD)
return np.array(CLarray), np.array(CDarray)
# wing.CLarray = CLarray
# wing.CDarray = CDarray
# """maxL/Dの線を引くためのリスト生成"""
# # j = 0
# maxslope = None
# for i in np.array(CLarray)/np.array(CDarray):
# if i == max(np.array(CLarray)/np.array(CDarray)):
# maxslope = i
# # j += 1
# wing.maxslope = maxslope
#
# xlist = list(CDarray)
# ylist = list(np.array(wing.CDarray)*wing.maxslope)
# xlist.insert(0, 0)
# ylist.insert(0, 0)
# wing.xmaxLDline = xlist
# wing.ymaxLDline = ylist
if __name__ == '__main__':
number_cell = 40
velocity = 8.5
temperature = 30
aspect_ratio = 21.0
surface_area = 24.0
aoa_array = np.arange(-5, 15, 0.5)
testWing = Wing('testplane.csv', number_cell, aspect_ratio, surface_area, optflag=0)
cl, cd = main(testWing, velocity, temperature, aoa_array)
data_for_record = np.array([aoa_array, cl, cd]).transpose()
if not os.path.isdir("./results/{}".format(testWing.dirname)):
os.mkdir("./results/{}".format(testWing.dirname))
np.savetxt("./results/{}/aerodynamics_data.csv".format(testWing.dirname), data_for_record,
delimiter=",", header="alpha, CL, CD")
# testWing.calc_variedaoa(velocity, temperature, range(0, 10))
# testWing.calc_planform()
# ww = weight.calc_weight(testWing.span, "FX76-MP140")
# post_process_operation.draw_spandirdata(testWing.yy,
# testWing.dL,
# testWing.clDist,
# testWing.circDist,
# testWing.ellipse,
# testWing.inducedAoa,
# testWing.planx,
# testWing.plany,
# testWing.dirname)
#
|
'''
render.py
changelog
2013-12-01[00:32:46]:created
2013-12-14[23:52:33]:define TokenRender
2013-12-17[12:13:55]:move removeCssDepsDeclaration out of class
@info yinyong,osx-x64,UTF-8,192.168.1.101,py,/Users/yinyong/work/ursa2/src
@author yanni4night@gmail.com
@version 0.0.1
@since 0.0.1
'''
from conf import C,log,BASE_DIR
import utils
import os
import re
import json
from timestamp import html_link,html_script,html_img,all_url,all as allt
from deps import DepsFinder
from replace import replace
from jinja2 import Template,Environment,FileSystemLoader,TemplateNotFound,TemplateSyntaxError
_template_dir = C('template_dir')
jinjaenv = Environment(loader = FileSystemLoader(utils.abspath(_template_dir), C('encoding') ), extensions = ["jinja2.ext.do"] , autoescape = True )
build_jinjaenv = Environment( loader = FileSystemLoader( os.path.join( os.getcwd() , C('build_dir'), _template_dir) , C('encoding') ))
mgr_jinjaenv = Environment( loader = FileSystemLoader( os.path.join(BASE_DIR,'tpl') , C('encoding') ))
def render_file(filename,data = None,noEnvironment = False,build = False):
'''
渲染文件
'''
if noEnvironment:
body = mgr_jinjaenv.get_template(filename)#Template(utils.readfile(filename))#这里应为绝对路径
else:
if build:
body = build_jinjaenv.get_template(filename)
else:
body = jinjaenv.get_template(filename)
return body.render(data or {})
def removeCssDepsDeclaration(html):
'''
移除HTML中对CSS的依赖声明
'''
return re.sub(r'<!\-\-[\s\S]*?@require[\s\S]*?\-\->','',html)
class TokenRender(object):
'''
'''
@classmethod
def __init__(self,token):
self.__token = utils.filterPath(token)
df = DepsFinder(token)
self.__deps = df.find()
self.__include_deps = df.findIncludes()
self.__html = None
@classmethod
def getData(self,including_deps = True):
data = {}
if C('disable_deps_search') or not including_deps:
deps = [self.__token+'.'+C('template_ext')]
else:
#复制
deps = self.__deps[0:]
deps.reverse()
deps.insert(len(deps),self.__token+".json")
deps.insert(0,"_ursa.json")
for dep in deps:
try:
json_filepath = utils.abspath(os.path.join(C('data_dir'),re.sub(r'\.%s$'%C('template_ext'),'.json',dep)))
content = utils.readfile(json_filepath)
content = re.sub('\/\*[\s\S]*?\*\/','',content)
json_data = json.loads(content)
data.update(json_data)
except Exception, e:
e#log.warn('[getdata]%s:%s'%(json_filepath,e))
return data
@classmethod
def render(self,build = False):
'''
查找数据文件依赖并渲染模板
'''
#remove '/'s at start
if self.__html is None:
data = self.getData()
multoken = self.__token.split('/')
data.update({'_token': self.__token.replace('/','_')})
data.update({'_folder':multoken[0]})
data.update({'_subtoken':multoken[1] if len(multoken)>1 else ""})
tpl_path = self.__token + "." + C('template_ext')
html = render_file( tpl_path,data,False,build)
if C('server_add_timestamp'):
#html = html_script(html)
#html = html_link(html)
#html = html_img(html)
#html = all_url(html)
html = allt(html)
html = replace(html)
if not build and not re.match(r'[\s\S]*?<html[\s\S]+?<body',html,re.I):
#sub template
css_deps = self.__getDepsCss(html)
for tpl in self.__include_deps:
css = os.path.join('.',C('static_dir'),C('css_dir'),re.sub(r"\.%s"%C('template_ext'),".css",tpl))
css_deps.append(css)
subparent = 'subparent.tpl'# os.path.join(BASE_DIR,"tpl",'subparent.tpl')
html = render_file(subparent,{'name': self.__token,'content': html,'required_css': css_deps},noEnvironment = True)
html = removeCssDepsDeclaration(html)
self.__html = html
return self.__html
@classmethod
def __getDepsCss(self,html):
'''
分析@require xxx.css,获取依赖
'''
ret = []
iters = re.finditer(r'@require\s+?([/\w\-]+?\.css)',html,re.I)
for it in reversed(list(iters)):
css = it.group(1)
css = utils.filterRelPath(css)
ret.append( os.path.join('.',C('static_dir'),C('css_dir'),css) )
return {}.fromkeys(ret).keys()
if __name__ == '__main__':
tr = TokenRender('index')
print tr.render()
|
"""Python version compatibility code."""
import enum
import functools
import inspect
import os
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
from typing import Any
from typing import Callable
from typing import Generic
from typing import Optional
from typing import overload as overload
from typing import Tuple
from typing import TypeVar
from typing import Union
import attr
from _pytest._io.saferepr import saferepr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
if sys.version_info < (3, 5, 2):
TYPE_CHECKING = False # type: bool
else:
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import NoReturn
from typing import Type
from typing_extensions import Final
_T = TypeVar("_T")
_S = TypeVar("_S")
class NotSetType(enum.Enum):
token = 0
NOTSET = NotSetType.token # type: Final # noqa: E305
MODULE_NOT_FOUND_ERROR = (
"ModuleNotFoundError" if sys.version_info[:2] >= (3, 6) else "ImportError"
)
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata # noqa: F401
def _format_args(func: Callable[..., Any]) -> str:
return str(signature(func))
REGEX_TYPE = type(re.compile(""))
if sys.version_info < (3, 6):
def fspath(p):
"""os.fspath replacement, useful to point out when we should replace it by the
real function once we drop py35."""
return str(p)
else:
fspath = os.fspath
def is_generator(func: object) -> bool:
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func: object) -> bool:
"""Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def is_async_function(func: object) -> bool:
"""Return True if the given function seems to be an async function or
an async generator."""
return iscoroutinefunction(func) or (
sys.version_info >= (3, 6) and inspect.isasyncgenfunction(func)
)
def getlocation(function, curdir: Optional[str] = None) -> str:
from _pytest.pathlib import Path
function = get_real_func(function)
fn = Path(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None:
try:
relfn = fn.relative_to(curdir)
except ValueError:
pass
else:
return "%s:%d" % (relfn, lineno + 1)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function) -> int:
"""Return number of arguments used up by mock arguments (if any)."""
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(
function: Callable[..., Any],
*,
name: str = "",
is_method: bool = False,
cls: Optional[type] = None
) -> Tuple[str, ...]:
"""Return the names of a function's mandatory arguments.
Should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
"""
# TODO(RonnyPfannschmidt): This function should be refactored when we
# revisit fixtures. The fixture mechanism should ask the node for
# the fixture names, and not try to obtain directly from the
# function object well after collection has occurred.
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
# TODO: Remove type ignore after https://github.com/python/typeshed/pull/4383
p.kind is Parameter.POSITIONAL_OR_KEYWORD # type: ignore[unreachable]
or p.kind is Parameter.KEYWORD_ONLY # type: ignore[unreachable]
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(name, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
if sys.version_info < (3, 7):
@contextmanager
def nullcontext():
yield
else:
from contextlib import nullcontext as nullcontext # noqa: F401
def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]:
# Note: this code intentionally mirrors the code at the beginning of
# getfuncargnames, to get the arguments which were excluded from its result
# because they had default values.
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s: str) -> str:
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val: bytes) -> str:
return val.decode("ascii", "backslashreplace")
def ascii_escaped(val: Union[bytes, str]) -> str:
r"""If val is pure ASCII, return it as an str, otherwise, escape
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
Note:
The obvious "v.decode('unicode-escape')" will return
valid UTF-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a UTF-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
@attr.s
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object when we are
creating fixtures, because we wrap the function object ourselves with a
decorator to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
"""Get the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial."""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""Attempt to obtain the real function object that might be wrapping
``obj``, while at the same time returning a bound method to ``holder`` if
the original object was a bound method."""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object: Any, name: str, default: Any) -> Any:
"""Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes
are derived from BaseException instead of Exception (for more details
check #2707).
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj: object) -> bool:
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
if sys.version_info < (3, 5, 2):
def overload(f): # noqa: F811
return f
if getattr(attr, "__version_info__", ()) >= (19, 2):
ATTRS_EQ_FIELD = "eq"
else:
ATTRS_EQ_FIELD = "cmp"
if sys.version_info >= (3, 8):
from functools import cached_property as cached_property
else:
class cached_property(Generic[_S, _T]):
__slots__ = ("func", "__doc__")
def __init__(self, func: Callable[[_S], _T]) -> None:
self.func = func
self.__doc__ = func.__doc__
@overload
def __get__(
self, instance: None, owner: Optional["Type[_S]"] = ...
) -> "cached_property[_S, _T]":
...
@overload # noqa: F811
def __get__( # noqa: F811
self, instance: _S, owner: Optional["Type[_S]"] = ...
) -> _T:
...
def __get__(self, instance, owner=None): # noqa: F811
if instance is None:
return self
value = instance.__dict__[self.func.__name__] = self.func(instance)
return value
if sys.version_info >= (3, 7):
order_preserving_dict = dict
else:
from collections import OrderedDict
order_preserving_dict = OrderedDict
def assert_never(value: "NoReturn") -> "NoReturn":
assert False, "Unhandled value: {} ({})".format(value, type(value).__name__)
|
import numpy as np
import pandas as pd
import statsmodels
import statsmodels.api as sm
from statsmodels.tsa.stattools import coint
import sys
import matplotlib.pyplot as plt
class Market_Maker:
def __init__(self):
self._stocks = ["BOND", "GS", "MS", "WFC", "XLF", "VALBZ", "VALE"]
self._limits = {"BOND": 100, "GS": 100, "MS": 100, "WFC": 100, \
"XLF": 100, "VALBZ": 10, "VALE": 10}
self._inventory = {"XLF": 0, "VALE": 0, "VALBZ": 0, "GS": 0, \
"BOND": 0, "MS": 0, "WFC": 0}
self._buy = {"XLF": "", "GS": "", "MS": "", "WFC": "", "BOND": "", \
"VALE": "", "VALBZ": ""}
self._sell = {"XLF": "", "GS": "", "MS": "", "WFC": "", "BOND": "", \
"VALE": "", "VALBZ": ""}
self._buyprices = {"XLF": 0, "GS": 0, "MS": 0, "WFC": 0, "BOND": 0, \
"VALE":0, "VALBZ":0}
self._sellprices = {"XLF": 0, "GS": 0, "MS": 0, "WFC": 0, "BOND": 0, \
"VALE":0, "VALBZ":0}
self._quant = 5
self._cancels = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
self._buyidx = 1
self._sellidx = 100000
self._curr_bids = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
self._curr_asks = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
def update_inventory(self, fill_strings):
## go over the fill strings, delete the corresponding outstanding bids
for _ in fill_strings:
name, ord, stock, action = fill_strings[_].split()[0:4]
ord = int(ord)
if name == 'FILL' and action == 'BUY':
to_delete = []
# go over the bids and asks and cancel that one
for idx in range(len(self._curr_bids[stock]))
if self._curr_bids[stock][idx][0] == ord
to_delete.append(idx)
del self._curr_bids[stock][to_delete] # delte filled bids
self._inventory[stock] += self._quant
if name == 'FILL' and action == 'SELL':
to_delete = []
# go over the bids and asks and cancel that one
for idx in range(len(self._curr_asks[stock]))
if self._curr_asks[stock][idx][0] == ord
to_delete.append(idx)
del self._curr_asks[stock][to_delete] # delete filled asks
self._inventory[stock] -= self._quant #
def get_avg_prices(self, books):
# "book" is the book dictionary for this collection of stocks
for _ in books:
key = books[_]['symbol']
self._buyprices[key] = 0.0
length = len(books['buy'])
for idx in range(length):
self._buyprices[key] += books[_]['buy'][idx][0]
self._buyprices[key] //= length
self._sellprices[key] = 0.0
length = len(books['sell'])
for idx in range(length):
self._sellprices[key] += books[_]['sell'][idx][0]
self._sellprices[key] //= length
def purge(self):
for _ in self._stocks:
self._cancels[_] = []
# check if there are any existing stocks with outstanding
# bids or asks and cancel them
for order in self._curr_bids[_]:
self.cancels[_].append("CANCEL " + str(order[0]))
for order in self._curr_asks[_]:
self.cancels[_].append("CANCEL " + str(order[0]))
self._curr_bids = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
self._curr_asks = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
return self._cancels
def update_orders(self): # dictionary of prices
for _ in self._stocks: # stock key
self._buyidx += 1
self._sellidx += 1
# update buy and sell orders
self._buy[_] = "ADD " + self._buyidx + " " + key + " BUY " + \
str(sell._buyprices[_])+" " + self._quant
self._sell[_] = "ADD " + self.buyidx + " " + key + " SELL " + \
str(sell._sellprices[_])+" " + self._quant
if self._inventory[_] < self._limits[_] - self._quant:
self._curr_bids[_].append([self._buyidx, sell._buyprices[_], \
self._quant])
if self._inventory[_] > -self._limits[_] + self._quant
self._curr_asks[_].append([self._sellidx, sell._sellprices[_],\
self._quant])
return self._buy,self._sell
|
import os
import sys
from setuptools import find_packages
from distutils.core import setup
name = "spectate"
here = os.path.abspath(os.path.dirname(__file__))
pkg_root = os.path.join(here, name)
package = dict(
name=name,
license="MIT",
packages=find_packages(exclude=["tests*"]),
python_requires=">=3.6",
description="Track changes to mutable data types.",
classifiers=["Intended Audience :: Developers"],
author="Ryan Morshead",
author_email="ryan.morshead@gmail.com",
url="https://github.com/rmorshea/spectate",
keywords=["eventful", "callbacks", "mutable", "MVC", "model", "view", "controller"],
platforms="Linux, Mac OS X, Windows",
include_package_data=True,
)
with open(os.path.join(pkg_root, "__init__.py")) as f:
for line in f.read().split("\n"):
if line.startswith("__version__ = "):
package["version"] = eval(line.split("=", 1)[1])
break
else:
print("No version found in %s/__init__.py" % pkg_root)
sys.exit(1)
package["long_description_content_type"] = "text/markdown"
with open(os.path.join(here, "README.md")) as f:
package["long_description"] = f.read()
if __name__ == "__main__":
setup(**package)
|
import pytest
import os
from csv import Sniffer
from natural_bm import callbacks
from natural_bm import optimizers
from natural_bm import training
from natural_bm.models import Model
from natural_bm.datasets import random
from natural_bm.utils_testing import nnet_for_testing
@pytest.mark.parametrize('sep', [',', '\t'], ids=['csv', 'tsv'])
def test_CSVLogger(sep):
"""
This test is a slight modification of test_CSVLogger from
https://github.com/fchollet/keras/blob/master/tests/keras/test_callbacks.py
"""
nnet = nnet_for_testing('rbm')
data = random.Random('probability')
batch_size = 6
n_epoch = 1
if sep == '\t':
filepath = 'log.tsv'
elif sep == ',':
filepath = 'log.csv'
def make_model(dbm, data):
optimizer = optimizers.SGD()
trainer = training.CD(dbm)
model = Model(dbm, optimizer, trainer)
return model
# case 1, create new file with defined separator
model = make_model(nnet, data)
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
history = model.fit(data.train.data,
batch_size=batch_size,
n_epoch=n_epoch,
callbacks=cbks,
validation_data=data.valid.data)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model(nnet, data)
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
history = model.fit(data.train.data,
batch_size=batch_size,
n_epoch=n_epoch,
callbacks=cbks,
validation_data=data.valid.data)
# case 3, reuse of CSVLogger object
history = model.fit(data.train.data,
batch_size=batch_size,
n_epoch=n_epoch,
callbacks=cbks,
validation_data=data.valid.data)
import re
with open(filepath) as csvfile:
output = " ".join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
if __name__ == '__main__':
pytest.main([__file__])
|
import argparse
import srilm.vocab
import srilm.stats
import srilm.ngram
import srilm.discount
import srilm.maxent
gtmin = [1, 1, 1, 2, 2, 2, 2, 2, 2, 2]
gtmax = [5, 1, 7, 7, 7, 7, 7, 7, 7, 7]
def ngramLmWithGoodTuring(order, vocab, train, heldout, test):
tr = srilm.stats.Stats(vocab, order)
tr.count_file(train)
lm = srilm.ngram.Lm(vocab, order)
for i in range(order):
lm.set_discount(
i + 1,
srilm.discount.Discount(
method="good-turing", min_count=gtmin[i + 1], max_count=gtmax[i + 1]
),
)
lm.train(tr)
return lm.test(test)
def ngramLmWithWittenBell(order, vocab, train, heldout, test):
tr = srilm.stats.Stats(vocab, order)
tr.count_file(train)
lm = srilm.ngram.Lm(vocab, order)
for i in range(order):
lm.set_discount(
i + 1, srilm.discount.Discount(method="witten-bell", min_count=gtmin[i + 1])
)
lm.train(tr)
return lm.test(test)
def ngramLmWithKneserNey(order, vocab, train, heldout, test):
tr = srilm.stats.Stats(vocab, order)
tr.count_file(train)
lm = srilm.ngram.Lm(vocab, order)
for i in range(order):
lm.set_discount(
i + 1, srilm.discount.Discount(method="kneser-ney", interpolate=True)
)
lm.train(tr)
return lm.test(test)
def ngramLmWithChenGoodman(order, vocab, train, heldout, test):
tr = srilm.stats.Stats(vocab, order)
tr.count_file(train)
lm = srilm.ngram.Lm(vocab, order)
for i in range(order):
lm.set_discount(
i + 1, srilm.discount.Discount(method="chen-goodman", interpolate=True)
)
lm.train(tr)
return lm.test(test)
def ngramCountLm(order, vocab, train, heldout, test):
tr = srilm.stats.Stats(vocab, order)
tr.count_file(train)
lm = srilm.ngram.CountLm(vocab, order)
lm.train(tr, heldout)
return lm.test(test)
def maxentLm(order, vocab, train, heldout, test):
tr = srilm.stats.Stats(vocab, order)
tr.count_file(train)
lm = srilm.maxent.Lm(vocab, order)
lm.train(tr)
return lm.test(test)
def main(args):
vocab = srilm.vocab.Vocab()
vocab.read(args.vocab)
heldout = srilm.stats.Stats(vocab, args.order)
heldout.count_file(args.heldout)
test = srilm.stats.Stats(vocab, args.order)
test.count_file(args.test)
test.make_test()
# we don't make a shared train stats because some model will change train stats during model estimation
prob, denom, ppl = ngramLmWithGoodTuring(
args.order, vocab, args.train, heldout, test
)
print(
"Ngram LM with Good-Turing discount: logprob =",
prob,
"denom =",
denom,
"ppl =",
ppl,
)
prob, denom, ppl = ngramLmWithWittenBell(
args.order, vocab, args.train, heldout, test
)
print(
"Ngram LM with Witten-Bell discount: logprob =",
prob,
"denom =",
denom,
"ppl =",
ppl,
)
prob, denom, ppl = ngramLmWithKneserNey(
args.order, vocab, args.train, heldout, test
)
print(
"Ngram LM with Kneser-Ney discount: logprob =",
prob,
"denom =",
denom,
"ppl =",
ppl,
)
prob, denom, ppl = ngramLmWithChenGoodman(
args.order, vocab, args.train, heldout, test
)
print(
"Ngram LM with Chen-Goodman discount: logprob =",
prob,
"denom =",
denom,
"ppl =",
ppl,
)
prob, denom, ppl = ngramCountLm(args.order, vocab, args.train, heldout, test)
print(
"Ngram LM with Jelinek-Mercer smoothing: logprob =",
prob,
"denom =",
denom,
"ppl =",
ppl,
)
prob, denom, ppl = maxentLm(args.order, vocab, args.train, heldout, test)
print("MaxEnt LM: logprob =", prob, "denom =", denom, "ppl =", ppl)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train various types of language models on the same train/heldout/test data"
)
parser.add_argument("--order", type=int, default=3, help="Order of the model")
parser.add_argument("--vocab", required=True, help="Vocabulary file")
parser.add_argument("--train", required=True, help="Training text file")
parser.add_argument("--heldout", required=True, help="Heldout text file")
parser.add_argument("--test", required=True, help="Test text file")
args = parser.parse_args()
main(args)
|
"""FileBackend functional tests."""
import unittest
import os
import os.path
import shutil
HERE = os.path.dirname(__file__)
DATA_DIR = os.path.join(HERE, 'data')
TMP_DIR = os.path.join(HERE, 'tmp')
class FileBackendBugs(unittest.TestCase):
def setUp(self):
if not os.path.isdir(TMP_DIR):
os.mkdir(TMP_DIR)
def tearDown(self):
shutil.rmtree(TMP_DIR)
def test_BZ1(self):
"""
If the file backend creates a directory into which to unpack TIFFs, but
then fails for some reason (e.g. bfconvert is not in the path), it has
created a directory, but not a manifest. At this point, re-running the
unpack fails because the directory already exists, and:
os.mkdir(self.directory) throws an error.
"""
from jicbioimage.core.io import DataManager, FileBackend
backend = FileBackend(directory=TMP_DIR)
data_manager = DataManager(backend=backend)
fname = 'single-channel.ome.tif'
# The directory already exists.
os.mkdir(os.path.join(TMP_DIR, fname))
# The below throws if the bug is present.
data_manager.load(os.path.join(DATA_DIR, fname))
if __name__ == '__main__':
unittest.main()
|
"""
Random supporting methods.
(c) May 2017 by Daniel Seita
"""
import numpy as np
import sys
import tensorflow as tf
def compute_ranks(x):
""" Returns ranks in [0, len(x))
Note: This is different from scipy.stats.rankdata, which returns
ranks in [1, len(x)].
Note: this is from OpenAI's code.
"""
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
def compute_centered_ranks(x):
""" This is OpenAI's rank transformation code.
They call this with x.shape = (n,2). The first column indicates the return
for the +eps_i case, the second for the -eps_i case (mirrored sampling).
Each time a roll-out happens, they append [rews_pos, rews_neg] to a list,
which they then vertically concatenate to get to (n,2), so n must indicate
the npop parameter (or maybe half of it).
This will make the maximum score have a rank of 0.5, the smallest score have
a rank of -0.5, and all other values get ranks uniformly distributed in
(-0.5, 0.5), with ties broken based on the order from np.argsort().
The OpenAI code, for each generated +eps_i noise vector, the weight for that
vector is actually (F_i-F_i') where F_i' is the result from negating that
vector. So when they do the update, they don't "use" the -eps_i vector. It's
just the +eps_i vector with a *weight* that takes into account the negative
case. Actually that seems right to me, if the weight is negative then we
would have wanted -eps_i and that would be encouraged.
See their `batched_weighted_sum` method, which takes as its first argument a
vector of length (n,) where n is presumably npop. Each component in that
vector represents an F_i-F_i' term. They then do a (1,n)*(n,numparams)
matrix multiply to get a final (1,numparam) weight update.
Finally, they *further* divide that vector by (n*2) before feeding it to the
update. That should represent the (1/npop) which I've been doing.
"""
y = compute_ranks(x.ravel()).reshape(x.shape).astype(np.float32)
y /= (x.size - 1)
y -= .5
return y
def get_tf_session():
""" Returning a session. Set options here (e.g. for GPUs) if desired. """
tf.reset_default_graph()
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']
print("AVAILABLE GPUS: ", get_available_gpus())
return session
def normc_initializer(std=1.0):
""" Initialize array with normalized columns """
def _initializer(shape, dtype=None, partition_info=None): #pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20151024_1544'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(max_length=30, blank=True),
preserve_default=True,
),
]
|
import django
if django.VERSION > (2,):
from django.urls import reverse, include, reverse_lazy
from django.urls import re_path as url
else:
from django.core.urlresolvers import reverse, reverse_lazy
from django.conf.urls import url, include
__all__ = ['reverse', 'include', 'reverse_lazy', 'url']
|
from django import forms
class SignInForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(min_length=6)
|
from _external import *
java = HeaderChecker('java', 'jni.h', 'c')
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="surface.contours.y", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 16),
min=kwargs.pop("min", 1),
**kwargs
)
|
import ConfigParser
import datetime as dt
import numpy as np
from fisher import pvalue
import os
import elasticsearch as es
from es_query_generator import es_query_generator
from itertools import izip
import json
class DictParser(ConfigParser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
def get_or( obj, field, default ):
return obj[field] if field in obj else default
class EventDetector:
cfg = None
@staticmethod
def get_counts( start, end, baseline_filters, target_filters,
keylist = None, index = None, time_field = None, constant_baseline = False ):
esconfig = EventDetector.cfg['ElasticSearch']
if index == None: index = get_or(esconfig, 'default_index', '')
if time_field == None: time_field = get_or(esconfig, 'time_field', 'date')
try:
protocol = get_or(esconfig, 'protocol', 'https')
usernm = get_or(esconfig, 'username', '')
passwd = get_or(esconfig, 'password', '')
if len(passwd) > 0: passwd = ':' + passwd + '@'
elif len(usernm) > 0: usernm = usernm + '@'
host = get_or(esconfig, 'host', 'localhost')
port = get_or(esconfig, 'port', '9200')
esi = es.Elasticsearch('{}://{}{}{}:{}'.format(
protocol, usernm, passwd, host, port))
if not esi.ping():
raise Exception('ERROR: Elasticsearch server did not respond to ping.')
n = (end - start).days + 1
ts_baseline = np.empty(n)
ts_target = np.empty(n)
if constant_baseline: ts_baseline.fill(1)
else:
qb = es_query_generator(start, end, baseline_filters, keylist, time_field)
rb = esi.search(index = index, body = qb)
for idx, val in enumerate(rb['aggregations']['counts']['buckets']):
ts_baseline[idx] = int(val['doc_count'])
qt = es_query_generator(start, end, target_filters , keylist, time_field)
rt = esi.search(index = index, body = qt)
for idx, val in enumerate(rt['aggregations']['counts']['buckets']):
ts_target[idx] = int(val['doc_count'])
return {'baseline': ts_baseline, 'target': ts_target}
except Exception as e:
print('ERROR: Could not query elastic search.')
print(str(e))
raise
@staticmethod
def load_configuration( filename ):
if os.path.isfile(filename):
config = DictParser()
config.read(filename)
EventDetector.cfg = config.as_dict()
else:
print('ERROR: Could not find configuration file "{}"!'.format(filename))
EventDetector.cfg = []
@staticmethod
def temporal_scan( \
baseline_filters, target_filters, analysis_start, analysis_end,
keylist = None, cur_window = 7, ref_window = 91, lag = 0, constant_baseline = False,
index = None, time_field = None):
start = None
end = None
if EventDetector.cfg == None:
EventDetector.load_configuration('config/tad.cfg')
if start is None:
start = analysis_start - dt.timedelta(days = cur_window + lag + ref_window - 1)
if end is None:
end = analysis_end
counts = EventDetector.get_counts(
start, end, baseline_filters, target_filters,
keylist, index, time_field, constant_baseline)
if isinstance(counts, str):
raise Exception(counts)
elif len(counts) == 0:
raise Exception('ERROR: No results returned. Valid analysis range specified?')
kernel_ref = np.ones(ref_window)
kernel_cur = np.ones(cur_window)
n_days = (analysis_end - analysis_start).days + 1
baseline_ref = np.correlate(counts['baseline'], kernel_ref)[:n_days]
target_ref = np.correlate(counts['target'] , kernel_ref)[:n_days]
baseline_cur = np.correlate(counts['baseline'], kernel_cur)[-n_days:]
target_cur = np.correlate(counts['target'] , kernel_cur)[-n_days:]
on_date = analysis_start
results = []
for si in xrange(n_days):
p = pvalue(baseline_ref[si], target_ref[si], baseline_cur[si], target_cur[si])
results.append([
on_date, baseline_ref[si], target_ref[si], baseline_cur[si],
target_cur[si], p.left_tail, p.two_tail, p.right_tail])
on_date += dt.timedelta(days = 1)
return results
if __name__ == '__main__':
result = EventDetector.temporal_scan(
baseline_filters = {},
target_filters = {'Location': 'MINOT_NORTH_DAKOTA'},
analysis_start = dt.datetime.strptime('2013-07-25', '%Y-%m-%d').date(),
analysis_end = dt.datetime.strptime('2013-07-27', '%Y-%m-%d').date(),
cur_window = 1,
ref_window = 1,
lag = 0,
index = 'trafficking',
time_field = 'Date')
for r in result:
print(r)
|
"""Edit a genome
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import json
import argparse
if "FABRIC_API_PASSWORD" not in os.environ:
sys.exit("FABRIC_API_PASSWORD environment variable missing")
if "FABRIC_API_LOGIN" not in os.environ:
sys.exit("FABRIC_API_LOGIN environment variable missing")
FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']
FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']
FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')
auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)
def put_genome(genome_id, name=None, external_id=None, project_id=None):
"""Use the Omicia API to edit an existing genome or move it to a different project.
"""
# Construct request
url = "{}/genomes/{}"
url = url.format(FABRIC_API_URL, genome_id)
url_payload = json.dumps({"name": name,
"external_id": external_id,
"project_id": project_id
})
result = requests.put(url, auth=auth, data=url_payload)
return result.json()
def main():
"""Main function. Edit a genome.
"""
parser = argparse.ArgumentParser(
description='Edit a genome or move it to another project.')
parser.add_argument('g', metavar='genome_id')
parser.add_argument('--n', metavar='name')
parser.add_argument('--e', metavar='external_id')
parser.add_argument('--p', metavar='project_id')
args = parser.parse_args()
genome_id = args.g
name = args.n
external_id = args.e
project_id = args.p
json_response = put_genome(genome_id,
name=name,
external_id=external_id,
project_id=project_id)
try:
sys.stdout.write(json.dumps(json_response, indent=4))
sys.stdout.write('\n')
except TypeError:
sys.stdout.write("Unexpected error. Perhaps the genome you specified no longer exists?\n\n")
if __name__ == "__main__":
main()
|
"""Attempt #3 at organizing neuron models
- We specify types of neurons using subclasses of Neuron
- This includes things like LIF vs HH and also Float vs Fixed, Rate vs Spiking
- We build a NeuronPool object which actually has code for running neurons
- We keep a list of known Neuron types around so if we're asked for just
a Rate neuron, we can pick the first on on the list that matches
- Configuration of parameters is done via descriptors
- NeuronPools use multiple inheritence off neuron types
- build() step is delayed until after constructor, as we don't want that
to happen until build time
"""
import numpy as np
import weakref
"""
Neuron type specifications
"""
class FloatParameter(object):
def __init__(self, default, min=None, max=None):
self.default = float(default)
self.min = min
self.max = max
self.data = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
return self.data.get(instance, self.default)
def __set__(self, instance, value):
if self.min is not None and value < self.min:
raise AttributeError('parameter value must be >=%g' % self.min)
if self.max is not None and value > self.max:
raise AttributeError('parameter value must be <=%g' % self.max)
self.data[instance] = float(value)
class Neuron(object):
def __init__(self, **kwargs):
self._allow_new_attributes = False
for key, value in kwargs.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if (not key.startswith('_') and not self._allow_new_attributes
and key not in dir(self)):
raise AttributeError('Unknown parameter "%s"' % key)
super(Neuron, self).__setattr__(key, value)
class LIF(Neuron):
tau_rc = FloatParameter(0.02, min=0)
tau_ref = FloatParameter(0.002, min=0)
class Rate(Neuron):
pass
class Spiking(Neuron):
pass
class Fixed(Neuron):
pass
class Izhikevich(Neuron):
a = FloatParameter(0.02)
b = FloatParameter(0.2)
c = FloatParameter(-65)
d = FloatParameter(8)
"""
Base class for neuron pools
Pass in a list of neuron_types to set parameters
"""
class NeuronPool(Neuron):
def __init__(self, neuron_types=None):
self._allow_new_attributes = False
for n in neuron_types:
for key in dir(n):
if not key.startswith('_'):
setattr(self, key, getattr(n, key))
self._allow_new_attributes = True
def build(self, n_neurons):
raise NotImplementedError('NeuronPools must provide "make"')
def step(self, dt, J):
raise NotImplementedError('NeuronPools must provide "step"')
"""
Various neuron models
"""
class LIFRatePool(NeuronPool, LIF, Rate):
def build(self, n_neurons):
pass
def step(self, dt, J):
old = np.seterr(divide='ignore', invalid='ignore')
try:
r = 1.0 / (self.tau_ref + self.tau_rc * np.log1p(1.0 / (J-1)))
r[J <= 1] = 0
finally:
np.seterr(**old)
return r * dt # multiply by dt to do rate per timestep
class LIFSpikingPool(NeuronPool, LIF, Spiking):
def build(self, n_neurons):
self.voltage = np.zeros(n_neurons)
self.refractory_time = np.zeros(n_neurons)
def step(self, dt, J):
dv = (dt / self.tau_rc) * (J - self.voltage)
self.voltage += dv
self.voltage[self.voltage < 0] = 0
self.refractory_time -= dt
self.voltage *= (1-self.refractory_time / dt).clip(0, 1)
spiked = self.voltage > 1
overshoot = (self.voltage[spiked > 0] - 1) / dv[spiked > 0]
spiketime = dt * (1 - overshoot)
self.voltage[spiked > 0] = 0
self.refractory_time[spiked > 0] = self.tau_ref + spiketime
return spiked
class LIFFixedPool(NeuronPool, LIF, Spiking, Fixed):
def build(self, n_neurons):
self.voltage = np.zeros(n_neurons, dtype='i32')
self.refractory_time = np.zeros(n_neurons, dtype='u8')
self.dt = None
self.lfsr = 1
def step(self, dt, J):
if self.dt != dt:
self.dt = dt
self.dt_over_tau_rc = int(dt * 0x10000 / self.tau_rc)
self.ref_steps = int(self.tau_ref / dt)
J = np.asarray(J * 0x10000, dtype='i32')
dv = ((J - self.voltage) * self.dt_over_tau_rc) >> 16
dv[self.refractory_time > 0] = 0
self.refractory_time[self.refractory_time > 0] -= 1
self.voltage += dv
self.voltage[self.voltage < 0] = 0
spiked = self.voltage > 0x10000
self.refractory_time[spiked > 0] = self.ref_steps
# randomly adjust the refractory period to account for overshoot
for i in np.where(spiked > 0)[0]:
p = ((self.voltage[i] - 0x10000) << 16) / dv[i]
if self.lfsr < p:
self.refractory_time[i] -= 1
self.lfsr = (self.lfsr >> 1) ^ (-(self.lfsr & 0x1) & 0xB400)
self.voltage[spiked > 0] = 0
return spiked
class IzhikevichPool(NeuronPool, Izhikevich, Spiking):
def build(self, n_neurons):
self.v = np.zeros(n_neurons) + self.c
self.u = self.b * self.v
def step(self, dt, J):
dv = (0.04 * self.v ** 2 + 5 * self.v + 140 - self.u + J) * 1000
du = (self.a * (self.b * self.v - self.u)) * 1000
self.v += dv * dt
self.u += du * dt
spiked = self.v >= 30
self.v[spiked > 0] = self.c
self.u[spiked > 0] = self.u[spiked > 0] + self.d
return spiked
"""
List of known neuron models, in order of preference
"""
neuron_models = [
LIFSpikingPool,
LIFRatePool,
LIFFixedPool,
IzhikevichPool,
]
"""
Create a pool of neurons, given the required type specifications
"""
import inspect
def create(n_neurons, neuron_type):
# make sure it's a list
try:
len(neuron_type)
except TypeError:
neuron_type = [neuron_type]
# make sure elements in the list are instances, not classes
for i, type in enumerate(neuron_type):
if inspect.isclass(type):
neuron_type[i] = type()
# look through the list of neuron models to see if we can
# find a match
for model in neuron_models:
for type in neuron_type:
if not issubclass(model, type.__class__):
break
else:
n = model(neuron_type)
n.build(n_neurons)
return n
raise Exception('Could not find suitable neuron model')
if __name__ == '__main__':
default = create(100, [])
spiking = create(100, [LIF, Spiking])
rate = create(100, [LIF, Rate])
fixed = create(100, [LIF, Fixed])
iz = create(100, [Izhikevich])
#iz = create(100, [Izhikevich(a=0.02, b=0.2, c=-50, d=2)])
J = np.linspace(-2, 10, 100)
dt = 0.001
T = 1
default_data = []
spiking_data = []
rate_data = []
iz_data = []
fixed_data = []
v = []
for i in range(int(T/dt)):
default_data.append(default.step(dt, J))
spiking_data.append(spiking.step(dt, J))
rate_data.append(rate.step(dt, J))
iz_data.append(iz.step(dt, J))
fixed_data.append(fixed.step(dt, J))
v.append(fixed.voltage[-1])
default_tuning = np.sum(default_data, axis=0)/T
spiking_tuning = np.sum(spiking_data, axis=0)/T
rate_tuning = np.sum(rate_data, axis=0)/T
iz_tuning = np.sum(iz_data, axis=0)/T
fixed_tuning = np.sum(fixed_data, axis=0)/T
import pylab
pylab.subplot(2, 1, 1)
pylab.plot(J, default_tuning, label='default')
pylab.plot(J, spiking_tuning, label='spiking LIF')
pylab.plot(J, rate_tuning, label='rate LIF')
pylab.plot(J, iz_tuning, label='Iz')
pylab.plot(J, fixed_tuning, label='fixed LIF')
pylab.legend(loc='best')
pylab.subplot(2, 1, 2)
pylab.plot(v)
#pylab.plot(np.array(fixed_data)[:,-1])
pylab.show()
|
import struct
def read_fmt(handle, fmt):
size = struct.calcsize(fmt)
read = handle.read(size)
if len(read) != size:
raise EOFError()
return struct.unpack(fmt, read)[0]
|
from setuptools import setup
setup(
name='wildpath',
version='0.3.1',
description='easy data structure access utility',
long_description='see <https://github.com/gemerden/wildpath>',
author='Lars van Gemerden',
author_email='gemerden@gmail.com',
license='MIT License',
packages=['wildpath'],
install_requires=['boolean.py'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
python_requires='>=3.3',
keywords='access data structure getter setter deleter iterator utility tool path wildcard slice',
)
|
import socket
host = ''
port = 7000
addr = (host, port)
serv_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv_socket.bind(addr)
serv_socket.listen(10)
print 'aguardando conexao'
con, cliente = serv_socket.accept()
print 'conectado'
print "aguardando mensagem"
while True:
recebe = con.recv(1024)
print "mensagem recebida: "+ recebe
|
from .key_value_pair import KeyValuePair
from .tag import Tag
from .frame import Frame
from .frames import Frames
from .classification import Classification
from .status import Status
from .email import Email
from .ipa import IPA
from .phone import Phone
from .address import Address
from .pii import PII
from .detected_terms import DetectedTerms
from .screen import Screen
from .face import Face
from .found_faces import FoundFaces
from .candidate import Candidate
from .ocr import OCR
from .evaluate import Evaluate
from .match import Match
from .match_response import MatchResponse
from .detected_language import DetectedLanguage
from .image_list_metadata import ImageListMetadata
from .image_list import ImageList
from .term_list_metadata import TermListMetadata
from .term_list import TermList
from .refresh_index_advanced_info_item import RefreshIndexAdvancedInfoItem
from .refresh_index import RefreshIndex
from .image_additional_info_item import ImageAdditionalInfoItem
from .image import Image
from .image_ids import ImageIds
from .terms_in_list import TermsInList
from .terms_data import TermsData
from .terms_paging import TermsPaging
from .terms import Terms
from .review import Review
from .job_execution_report_details import JobExecutionReportDetails
from .job import Job
from .job_list_result import JobListResult
from .job_id import JobId
from .error import Error
from .api_error import APIError, APIErrorException
from .body_metadata import BodyMetadata
from .body import Body
from .create_review_body_item_metadata_item import CreateReviewBodyItemMetadataItem
from .create_review_body_item import CreateReviewBodyItem
from .content import Content
from .transcript_moderation_body_item_terms_item import TranscriptModerationBodyItemTermsItem
from .transcript_moderation_body_item import TranscriptModerationBodyItem
from .body_model import BodyModel
from .create_video_reviews_body_item_video_frames_item_reviewer_result_tags_item import CreateVideoReviewsBodyItemVideoFramesItemReviewerResultTagsItem
from .create_video_reviews_body_item_video_frames_item_metadata_item import CreateVideoReviewsBodyItemVideoFramesItemMetadataItem
from .create_video_reviews_body_item_video_frames_item import CreateVideoReviewsBodyItemVideoFramesItem
from .create_video_reviews_body_item_metadata_item import CreateVideoReviewsBodyItemMetadataItem
from .create_video_reviews_body_item import CreateVideoReviewsBodyItem
from .video_frame_body_item_reviewer_result_tags_item import VideoFrameBodyItemReviewerResultTagsItem
from .video_frame_body_item_metadata_item import VideoFrameBodyItemMetadataItem
from .video_frame_body_item import VideoFrameBodyItem
from .content_moderator_client_enums import (
AzureRegionBaseUrl,
)
__all__ = [
'KeyValuePair',
'Tag',
'Frame',
'Frames',
'Classification',
'Status',
'Email',
'IPA',
'Phone',
'Address',
'PII',
'DetectedTerms',
'Screen',
'Face',
'FoundFaces',
'Candidate',
'OCR',
'Evaluate',
'Match',
'MatchResponse',
'DetectedLanguage',
'ImageListMetadata',
'ImageList',
'TermListMetadata',
'TermList',
'RefreshIndexAdvancedInfoItem',
'RefreshIndex',
'ImageAdditionalInfoItem',
'Image',
'ImageIds',
'TermsInList',
'TermsData',
'TermsPaging',
'Terms',
'Review',
'JobExecutionReportDetails',
'Job',
'JobListResult',
'JobId',
'Error',
'APIError', 'APIErrorException',
'BodyMetadata',
'Body',
'CreateReviewBodyItemMetadataItem',
'CreateReviewBodyItem',
'Content',
'TranscriptModerationBodyItemTermsItem',
'TranscriptModerationBodyItem',
'BodyModel',
'CreateVideoReviewsBodyItemVideoFramesItemReviewerResultTagsItem',
'CreateVideoReviewsBodyItemVideoFramesItemMetadataItem',
'CreateVideoReviewsBodyItemVideoFramesItem',
'CreateVideoReviewsBodyItemMetadataItem',
'CreateVideoReviewsBodyItem',
'VideoFrameBodyItemReviewerResultTagsItem',
'VideoFrameBodyItemMetadataItem',
'VideoFrameBodyItem',
'AzureRegionBaseUrl',
]
|
KOI8R_CharToOrderMap = (
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10
253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30
253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50
253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60
67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70
191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, # 80
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, # 90
223, 224, 225, 68, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, # a0
238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10
253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30
253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50
253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60
67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70
191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
239, 240, 241, 242, 243, 244, 245, 246, 68, 247, 248, 249, 250, 251, 252, 253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10
253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30
253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50
253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60
67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70
191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 255,
)
macCyrillic_CharToOrderMap = (
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10
253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30
253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50
253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60
67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 255,
)
IBM855_CharToOrderMap = (
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10
253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30
253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50
253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60
67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70
191, 192, 193, 194, 68, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46, 218, 219,
220, 221, 222, 223, 224, 26, 55, 4, 42, 225, 226, 227, 228, 23, 60, 229,
230, 231, 232, 233, 234, 235, 11, 36, 236, 237, 238, 239, 240, 241, 242, 243,
8, 49, 12, 38, 5, 31, 1, 34, 15, 244, 245, 246, 247, 35, 16, 248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61, 249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50, 251, 252, 255,
)
IBM866_CharToOrderMap = (
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10
253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30
253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50
253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60
67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 255,
)
RussianLangModel = (
0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 1, 3, 3, 3, 2, 3, 2, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 2, 2, 2, 2, 0, 0, 2,
3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 2, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 1, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1,
0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 1, 3, 3, 1, 3, 3, 3, 3, 2, 2, 3, 0, 2, 2, 2, 3, 3, 2, 1, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 1, 2, 2, 0, 1, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 0, 2, 2, 3, 3, 2, 1, 2, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 2, 3, 2, 3, 3, 3, 3, 2, 2, 3, 0, 3, 2, 2, 3, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 2, 2, 2, 0, 3, 3, 3, 2, 2, 2, 2, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3, 2, 2, 0, 1, 3, 2, 1, 2, 2, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 3, 0, 1, 1, 1, 1, 2, 1, 1, 0, 2, 2, 2, 1, 2, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 2, 3, 3, 2, 2, 2, 2, 1, 3, 2, 3, 2, 3, 2, 1, 2, 2, 0, 1, 1, 2, 1, 2, 1, 2, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 2, 2, 2, 0, 2, 2, 2, 2, 3, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 2, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 2, 0, 0, 3, 3, 3, 3, 2, 3, 3, 3, 3, 2, 3, 2, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 3, 3, 3, 3, 3, 2, 2, 3, 3, 0, 2, 1, 0, 3, 2, 3, 2, 3, 0, 0, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 3, 0, 2, 3, 3, 3, 3, 2, 3, 3, 3, 3, 1, 2, 2, 0, 0, 2, 3, 2, 2, 2, 3, 2, 3, 2, 2, 3, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 2, 3, 0, 2, 3, 2, 3, 0, 1, 2, 3, 3, 2, 0, 2, 3, 0, 0, 2, 3, 2, 2, 0, 1, 3, 1, 3, 2, 2, 1, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 1, 3, 0, 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 2, 0, 0, 2, 2, 3, 3, 3, 2, 3, 3, 0, 2, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 3, 3, 0, 0, 1, 1, 1, 1, 1, 2, 0, 0, 1, 1, 1, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 3, 3, 2, 3, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 2, 2, 2, 2, 3, 1, 3, 2, 3, 1, 1, 2, 1, 0, 2, 2, 2, 2, 1, 3, 1, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 2, 3, 3, 3, 3, 3, 1, 2, 2, 1, 3, 1, 0, 3, 0, 0, 3, 0, 0, 0, 1, 1, 0, 1, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 2, 2, 1, 1, 3, 3, 3, 2, 2, 1, 2, 2, 3, 1, 1, 2, 0, 0, 2, 2, 1, 3, 0, 0, 2, 1, 1, 2, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 2, 3, 3, 3, 3, 1, 2, 2, 2, 1, 2, 1, 3, 3, 1, 1, 2, 1, 2, 1, 2, 2, 0, 2, 0, 0, 1, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 3, 3, 3, 3, 3, 2, 1, 3, 2, 2, 3, 2, 0, 3, 2, 0, 3, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 1, 2, 1, 2, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 2, 1, 1, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 1, 1, 2, 1, 2, 3, 3, 2, 2, 1, 2, 2, 3, 0, 2, 1, 0, 0, 2, 2, 3, 2, 1, 2, 2, 2, 2, 2, 3, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 1, 1, 0, 1, 1, 2, 2, 1, 1, 3, 0, 0, 1, 3, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 1, 3, 3, 3, 2, 0, 0, 0, 2, 1, 0, 1, 0, 2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 0, 1, 0, 0, 2, 3, 2, 2, 2, 1, 2, 2, 2, 1, 2, 1, 0, 0, 1, 1, 1, 0, 2, 0, 1, 1, 1, 0, 0, 1, 1,
1, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 3, 3, 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 3, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
1, 0, 1, 0, 1, 2, 0, 0, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0,
2, 2, 3, 2, 2, 2, 3, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 2, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0,
3, 3, 3, 2, 2, 2, 2, 3, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 3, 1, 2, 1, 2, 0, 0, 1, 1, 0, 1, 0, 2, 1,
1, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0,
2, 0, 0, 1, 0, 3, 2, 2, 2, 2, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 1, 2, 2, 1, 1, 2, 2, 0, 1, 1, 0, 2,
1, 1, 1, 1, 1, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 0, 1, 2, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1,
1, 3, 2, 2, 2, 1, 1, 1, 2, 3, 0, 0, 0, 0, 2, 0, 2, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1,
1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 2, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,
2, 3, 2, 3, 2, 1, 2, 2, 2, 2, 1, 0, 0, 0, 2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 2, 1,
1, 1, 2, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
3, 0, 0, 1, 0, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 1, 1, 2, 2, 0, 0, 0, 1, 2,
1, 1, 1, 1, 1, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1,
2, 3, 2, 3, 3, 2, 0, 1, 1, 1, 0, 0, 1, 0, 2, 0, 1, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1,
1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0,
2, 3, 3, 3, 3, 1, 2, 2, 2, 2, 0, 1, 1, 0, 2, 1, 1, 1, 2, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 2, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 3, 3, 3, 2, 0, 0, 1, 1, 2, 2, 1, 0, 0, 2, 0, 1, 1, 3, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 2, 1,
1, 1, 2, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0,
1, 3, 2, 3, 2, 1, 0, 0, 2, 2, 2, 0, 1, 0, 2, 0, 1, 1, 1, 0, 1, 0, 0, 0, 3, 0, 1, 1, 0, 0, 2, 1,
1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 2, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0,
3, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 0, 0, 0, 1, 2, 1, 0, 1, 0, 1,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1,
3, 0, 0, 0, 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1,
1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
1, 3, 3, 2, 2, 0, 0, 0, 2, 2, 0, 0, 0, 1, 2, 0, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1,
0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
2, 3, 2, 3, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1,
1, 1, 2, 0, 1, 2, 1, 0, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0,
1, 3, 2, 2, 2, 1, 0, 0, 2, 2, 1, 0, 1, 2, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 1, 0, 2, 3, 1, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 1, 0, 1, 0, 2, 1, 1, 1, 0, 0, 0, 0, 1,
1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
2, 0, 2, 0, 0, 1, 0, 3, 2, 1, 2, 1, 2, 2, 0, 1, 0, 0, 0, 2, 1, 0, 0, 2, 1, 1, 1, 1, 0, 2, 0, 2,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1,
1, 2, 2, 2, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 2, 0, 0, 2, 0,
1, 0, 1, 1, 1, 2, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0,
2, 1, 2, 2, 2, 0, 3, 0, 1, 1, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
1, 2, 2, 3, 2, 2, 0, 0, 1, 1, 2, 0, 1, 2, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,
2, 2, 1, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1,
1, 2, 2, 2, 2, 0, 1, 0, 2, 2, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0,
0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 0, 0, 1, 1, 2, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1,
0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 2, 2, 1, 1, 2, 0, 2, 1, 1, 1, 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1,
0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 2, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 2, 0, 1, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,
0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import aspen
from psycopg2 import IntegrityError
from gratipay.billing.payday import Payday
class PaydayRunner(object):
"""The Gratipay application can start a weekly payday process.
"""
def __init__(self, app):
self.app = app
def run_payday(self):
"""Run Gratipay's weekly payday.
If there is a Payday that hasn't finished yet, then the UNIQUE
constraint on ts_end will kick in and notify us of that. In that case
we load the existing Payday and work on it some more. We use the start
time of the current Payday to synchronize our work.
"""
self._start_payday().run()
def _start_payday(self):
try:
d = self.app.db.one("""
INSERT INTO paydays DEFAULT VALUES
RETURNING id, (ts_start AT TIME ZONE 'UTC') AS ts_start, stage
""", back_as=dict)
aspen.log("Starting a new payday.")
except IntegrityError: # Collision, we have a Payday already.
d = self.app.db.one("""
SELECT id, (ts_start AT TIME ZONE 'UTC') AS ts_start, stage
FROM paydays
WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz
""", back_as=dict)
aspen.log("Picking up with an existing payday.")
d['ts_start'] = d['ts_start'].replace(tzinfo=aspen.utils.utc)
aspen.log("Payday started at %s." % d['ts_start'])
payday = Payday(self)
payday.__dict__.update(d)
return payday
|
from setuptools import setup
setup(name='kurdishmorph',
version='0.0.1',
description='Kurmanji Morphology Analyzer',
url='http://github.com/halilagin/kurjmorph',
author='Halil Agin',
author_email='halil.agin@gmail.com',
license='MIT',
packages=['kurdish','kurdish.kurmanji', 'kurdish.kurmanji.verb', 'kurdish.kurmanji.verb.inp'],
zip_safe=False)
|
from django.conf.urls import url
from . import views
from .apps import NewBuildingsConfig
app_name = NewBuildingsConfig.name
urlpatterns = [
url(r'^$', views.ResidentalComplexList.as_view(),
name='residental-complex-list'),
# url(r'^(?P<pk>\d+)/$',
url(r'^(?P<slug>[-\w]+)/$',
views.ResidentalComplexDetail.as_view(),
name='residental-complex-detail',
),
url(r'^feeds/yandex-new-buildings$',
views.NewApartmentsFeed.as_view(),
name='new-aparments-feed',
),
url(r'^json/residental-complex',
views.ResidentalComplexAutocompleteView.as_view(),
name='rc-autocomplete'
),
]
|
import io
import builtins
from unittest.mock import patch
from unittest.mock import MagicMock
from mt_shared import mt_io
def test_read_file_non_user_expand():
file_name = '/path/2/file_name.txt'
mock_content = io.StringIO('some text')
mock = MagicMock(return_value=mock_content)
with patch('builtins.open', mock):
mt_io.read_file(file_name)
mock.assert_called_with(file_name, 'r')
@patch('mt_shared.mt_io.os.path.expanduser')
def test_read_file_with_user_expand(mock_expand):
file_name = '~/file_name.txt'
expanded_file_name = '/full/expanded/path/2/file_name.txt'
mock_content = io.StringIO('some text')
mock_content.read = MagicMock()
mock_expand.return_value = expanded_file_name
mock = MagicMock(return_value=mock_content)
with patch('builtins.open', mock):
mt_io.read_file(file_name)
mock.assert_called_with(expanded_file_name, 'r')
mock_content.read.assert_called_once()
@patch('mt_shared.mt_io.os')
def test_write_file_non_user_expand(mock_os):
data = 'data 2 write'
file_name = '/path/2/file_name.txt'
mock_stream = io.StringIO('some text')
mock_stream.write = MagicMock()
mock = MagicMock(return_value=mock_stream)
with patch('builtins.open', mock):
mt_io.write_file(file_name, data)
mock.assert_called_with(file_name, 'w')
mock_stream.write.assert_called_with(data)
@patch('mt_shared.mt_io.os')
def test_write_file_with_user_expand(mock_os):
file_name = '~/file_name.txt'
data = 'data to write to the file'
expanded_file_name = '/full/expanded/path/2/file_name.txt'
mock_content = io.StringIO('some text')
mock_os.path.expanduser.return_value = expanded_file_name
mock = MagicMock(return_value=mock_content)
with patch('builtins.open', mock):
mt_io.write_file(file_name, data)
mock.assert_called_with(expanded_file_name, 'w')
@patch('mt_shared.mt_io.os')
@patch('mt_shared.mt_io._get_file_name')
def test_write_file_writes_directory(mock_gfn, mock_os):
full_fn = '/Usr/whatever/file_name.txt'
mock_content = io.StringIO('some text')
mock_gfn.return_value = full_fn
mock = MagicMock(return_value=mock_content)
with patch('builtins.open', mock):
mt_io.write_file('~/file_name.txt', 'data')
dirname = mock_os.path.dirname.return_value
mock_os.path.dirname.assert_called_with(full_fn)
mock_os.makedirs.assert_called_with(dirname, exist_ok=True)
@patch('mt_shared.mt_io.os.path.isfile')
@patch('mt_shared.mt_io._get_file_name')
def test_exists(mock_gfn, mock_isfile):
file_name = 'full/path/to/file.txt'
is_file_result = 'YES IT IS DA FILE'
mock_isfile.return_value = is_file_result
result = mt_io.exists(file_name)
assert result == is_file_result
@patch('mt_shared.mt_io.os.path.isfile')
@patch('mt_shared.mt_io._get_file_name')
def test_exists_expands_path(mock_gfn, mock_isfile):
file_name = 'full/path/to/file.txt'
is_file_result = 'YES IT IS DA FILE'
mock_isfile.return_value = is_file_result
result = mt_io.exists(file_name)
mock_isfile.assert_called_with(mock_gfn.return_value)
|
from unittest import TestCase, main
import numpy as np
import datetime
from bcp.ethoscan import (parse_ethoscan_line, parse_ethoscan_report,
align_ethoscan_data)
class TestEthoscan(TestCase):
'''Test Ethoscan parameters are correctly calculated.'''
def setUp(self):
self.ethoscan_report_lines_1 = [\
'EthoScan: Data for cage 1, from file 070815 Infection.exp.\r\n',
'Analyzed from 7/8/2015 10:25:47 to 7/16/2015 15:56:45\r\n',
'*****************************************************************************************************************\r\n',
'\r\n',
'Behavior codes (not all may be present in this recording):\r\n',
'EFODA,Interaction with food hopper A (significant uptake found)\r\n',
'TFODA,Interaction with food hopper A (no significant uptake)\r\n',
'DWATR,Interaction with water dispenser (significant uptake found)\r\n',
'TWATR,Interaction with water dispenser (no significant uptake)\r\n',
'WHEEL,Interaction with wheel (>= 1 revolution)\r\n',
'IHOME,Entered habitat (stable mass reading)\r\n',
'THOME,Interaction with habitat (no stable mass reading)\r\n',
'LLNGE,Long lounge (> 60 sec, no non-XY sensor interactions)\r\n',
'SLNGE,Short lounge (5 - 60 sec, no non-XY sensor interactions)\r\n',
'EFODB,Interaction with food hopper B (significant uptake found)\r\n',
'TFODB,Interaction with food hopper B (no significant uptake)\r\n',
'\r\n',
'Time Budget (total = 197.5 hours), Animal #1\r\n',
'(Time before first and after last behavior are not included in the total)\r\n',
'Behav,Minutes,Percent\r\n',
'efoda,096.4,00.81\r\n',
'tfoda,389.1,03.28\r\n',
'dwatr,013.5,00.11\r\n',
'twatr,008.2,00.07\r\n',
'wheel,4064.3,34.30\r\n',
'ihome,5764.5,48.64\r\n',
'thome,006.8,00.06\r\n',
'llnge,987.9,08.34\r\n',
'slnge,520.3,04.39\r\n',
'efodb,000.0,00.00\r\n',
'tfodb,000.0,00.00\r\n',
'\r\n',
'Non-wheel XY Ambulatory Budget (total = 511.7 meters), Animal #1\r\n',
'Behav,Meters,Percent\r\n',
'efoda,14.6,02.85\r\n',
'tfoda,45.4,08.88\r\n',
'dwatr,07.8,01.53\r\n',
'twatr,05.9,01.15\r\n',
'ihome,00.0,00.00\r\n',
'thome,00.1,00.02\r\n',
'llnge,194.1,37.94\r\n',
'slnge,243.7,47.63\r\n',
'efodb,00.0,00.00\r\n',
'tfodb,00.0,00.00\r\n',
'\r\n',
'With-wheel XY Ambulatory Budget (wheel = 92590.0 meters), Animal #1\r\n',
'(Wheel diameter of 11.43 cm assumed; scale if necessary)\r\n',
'Behav,Meters,Percent\r\n',
'efoda,14.6,00.02\r\n',
'tfoda,45.4,00.05\r\n',
'dwatr,07.8,00.01\r\n',
'twatr,05.9,00.01\r\n',
'wheel,92590.0,99.45\r\n',
'ihome,00.0,00.00\r\n',
'thome,00.1,00.00\r\n',
'llnge,194.1,00.21\r\n',
'slnge,243.7,00.26\r\n',
'efodb,00.0,00.00\r\n',
'tfodb,00.0,00.00\r\n',
'\r\n',
'Transition matrix, animal #1\r\n',
',efoda,tfoda,dwatr,twatr,wheel,ihome,thome,llnge,slnge,efodb,tfodb\r\n',
'efoda,00.00,00.00,00.00,00.65,14.19,00.00,00.00,18.71,66.45,00.00,00.00,sum:100%\r\n',
'tfoda,00.00,00.00,01.22,00.97,06.81,00.00,00.00,14.11,76.89,00.00,00.00,sum:100%\r\n',
'dwatr,00.00,06.25,00.00,00.00,00.00,00.00,00.00,15.63,78.13,00.00,00.00,sum:100%\r\n',
'twatr,06.06,03.03,00.00,00.00,03.03,03.03,00.00,15.15,69.70,00.00,00.00,sum:100%\r\n',
'wheel,02.06,03.04,00.00,00.00,00.00,00.00,00.00,09.62,85.28,00.00,00.00,sum:100%\r\n',
'ihome,00.00,00.00,00.00,00.00,00.00,00.00,00.00,09.05,90.95,00.00,00.00,sum:100%\r\n',
'thome,00.00,00.00,00.00,00.00,00.00,00.00,00.00,03.45,96.55,00.00,00.00,sum:100%\r\n',
'llnge,04.59,16.51,03.67,03.21,44.95,25.23,01.83,00.00,00.00,00.00,00.00,sum:100%\r\n',
'slnge,07.66,21.41,01.19,01.32,54.61,10.42,03.39,00.00,00.00,00.00,00.00,sum:100%\r\n',
'efodb,,,,,,,,,,,,[not found]\r\n',
'tfodb,,,,,,,,,,,,[not found]\r\n',
'The matrix is viewed left to right (not vertically). The first column shows the initial behaviors; the\r\n',
'other columns along a given row show the percent probability of the first subsequent behavior.\r\n',
'\r\n',
"Behavior list follows. 'Amount' is cm (SLNGE, LLNGE), revolutions (WHEEL), grams (EFODx, IHOME) or mL (DWATR)\r\n",
' Sample,Start_Date,Start_Time,End_Time,Durat_Sec,Activity,Amount,Rear%,X_cm,Y_cm,S_cm\r\n',
' 000001,7/8/2015\t10:25:48,14:43:41,15474,LLNGE,126,00.0,10.2,14.6,126\r\n',
' 015475,7/8/2015\t14:43:42,18:29:43,13562,IHOME,13.315,00.0,8.0,15.3,000\r\n',
' 029037,7/8/2015\t18:29:44,18:35:40,357,LLNGE,210,04.2,9.3,24.1,210\r\n',
' 029394,7/8/2015\t18:35:41,18:35:47,7,TWATR,0,85.7,5.8,27.5,004\r\n',
' 029401,7/8/2015\t18:35:48,18:42:13,386,LLNGE,125,08.0,8.5,24.0,125\r\n',
' 029787,7/8/2015\t18:42:14,18:42:16,3,DWATR,0.033,33.3,6.3,20.5,000\r\n',
' 029790,7/8/2015\t18:42:17,18:42:23,7,SLNGE,013,71.4,6.6,16.2,013\r\n',
' 029797,7/8/2015\t18:42:24,18:43:01,38,TFODA,0,68.4,10.0,18.7,000\r\n',
' 029835,7/8/2015\t18:43:02,18:43:06,5,SLNGE,007,80.0,8.8,17.4,007\r\n',
' 029840,7/8/2015\t18:43:07,18:44:25,79,IHOME,13.369,00.0,7.8,13.5,000\r\n',
' 029919,7/8/2015\t18:44:26,18:44:45,20,SLNGE,020,00.0,8.0,19.2,020\r\n',
' 029939,7/8/2015\t18:44:46,18:45:39,54,DWATR,0.024,48.1,5.9,25.7,037\r\n',
' 029993,7/8/2015\t18:45:40,18:45:50,11,SLNGE,019,81.8,7.6,28.3,019\r\n',
' 030004,7/8/2015\t18:45:51,18:45:51,1,TFODA,0,100.0,8.5,20.3,000\r\n',
' 030005,7/8/2015\t18:45:52,19:21:38,2147,LLNGE,250,01.4,12.1,19.6,250\r\n',
' 032152,7/8/2015\t19:21:39,19:21:52,14,WHEEL,007,00.0,9.0,9.0,000\r\n',
' 032166,7/8/2015\t19:21:53,19:22:02,10,SLNGE,000,00.0,9.0,9.0,000\r\n',
' 032176,7/8/2015\t19:22:03,19:22:04,2,TFODA,0,100.0,8.8,11.4,006\r\n',
' 032178,7/8/2015\t19:22:05,19:22:31,27,SLNGE,043,40.7,9.6,11.7,043\r\n',
' 032205,7/8/2015\t19:22:32,19:27:37,306,IHOME,13.368,00.0,8.3,14.0,000\r\n',
' 032511,7/8/2015\t19:27:38,19:28:23,46,SLNGE,061,26.1,8.4,16.8,061\r\n',
' 032557,7/8/2015\t19:28:24,19:28:26,3,TFODA,0,66.7,10.3,28.8,007\r\n']
def test_parse_ethoscan_line(self):
# input line
# ' 032557,7/8/2015\t19:28:24,19:28:26,3,TFODA,0,66.7,10.3,28.8,007\r\n'
exp = ['032557', 'TFODA', '3', '0', '66.7', '10.3', '28.8', '007']
obs = parse_ethoscan_line(self.ethoscan_report_lines_1[-1])
self.assertEqual(obs, exp)
def test_parse_ethoscan_report(self):
# Test without a start_time.
exp = np.array([
[1.00000000e+00, 7.00000000e+00, 1.54740000e+04, 1.26000000e+02, 0.00000000e+00, 1.02000000e+01, 1.46000000e+01, 1.26000000e+02],
[1.54750000e+04, 5.00000000e+00, 1.35620000e+04, 1.33150000e+01, 0.00000000e+00, 8.00000000e+00, 1.53000000e+01, 0.00000000e+00],
[2.90370000e+04, 7.00000000e+00, 3.57000000e+02, 2.10000000e+02, 4.20000000e+00, 9.30000000e+00, 2.41000000e+01, 2.10000000e+02],
[2.93940000e+04, 3.00000000e+00, 7.00000000e+00, 0.00000000e+00, 8.57000000e+01, 5.80000000e+00, 2.75000000e+01, 4.00000000e+00],
[2.94010000e+04, 7.00000000e+00, 3.86000000e+02, 1.25000000e+02, 8.00000000e+00, 8.50000000e+00, 2.40000000e+01, 1.25000000e+02],
[2.97870000e+04, 2.00000000e+00, 3.00000000e+00, 3.30000000e-02, 3.33000000e+01, 6.30000000e+00, 2.05000000e+01, 0.00000000e+00],
[2.97900000e+04, 8.00000000e+00, 7.00000000e+00, 1.30000000e+01, 7.14000000e+01, 6.60000000e+00, 1.62000000e+01, 1.30000000e+01],
[2.97970000e+04, 1.00000000e+00, 3.80000000e+01, 0.00000000e+00, 6.84000000e+01, 1.00000000e+01, 1.87000000e+01, 0.00000000e+00],
[2.98350000e+04, 8.00000000e+00, 5.00000000e+00, 7.00000000e+00, 8.00000000e+01, 8.80000000e+00, 1.74000000e+01, 7.00000000e+00],
[2.98400000e+04, 5.00000000e+00, 7.90000000e+01, 1.33690000e+01, 0.00000000e+00, 7.80000000e+00, 1.35000000e+01, 0.00000000e+00],
[2.99190000e+04, 8.00000000e+00, 2.00000000e+01, 2.00000000e+01, 0.00000000e+00, 8.00000000e+00, 1.92000000e+01, 2.00000000e+01],
[2.99390000e+04, 2.00000000e+00, 5.40000000e+01, 2.40000000e-02, 4.81000000e+01, 5.90000000e+00, 2.57000000e+01, 3.70000000e+01],
[2.99930000e+04, 8.00000000e+00, 1.10000000e+01, 1.90000000e+01, 8.18000000e+01, 7.60000000e+00, 2.83000000e+01, 1.90000000e+01],
[3.00040000e+04, 1.00000000e+00, 1.00000000e+00, 0.00000000e+00, 1.00000000e+02, 8.50000000e+00, 2.03000000e+01, 0.00000000e+00],
[3.00050000e+04, 7.00000000e+00, 2.14700000e+03, 2.50000000e+02, 1.40000000e+00, 1.21000000e+01, 1.96000000e+01, 2.50000000e+02],
[3.21520000e+04, 4.00000000e+00, 1.40000000e+01, 7.00000000e+00, 0.00000000e+00, 9.00000000e+00, 9.00000000e+00, 0.00000000e+00],
[3.21660000e+04, 8.00000000e+00, 1.00000000e+01, 0.00000000e+00, 0.00000000e+00, 9.00000000e+00, 9.00000000e+00, 0.00000000e+00],
[3.21760000e+04, 1.00000000e+00, 2.00000000e+00, 0.00000000e+00, 1.00000000e+02, 8.80000000e+00, 1.14000000e+01, 6.00000000e+00],
[3.21780000e+04, 8.00000000e+00, 2.70000000e+01, 4.30000000e+01, 4.07000000e+01, 9.60000000e+00, 1.17000000e+01, 4.30000000e+01],
[3.22050000e+04, 5.00000000e+00, 3.06000000e+02, 1.33680000e+01, 0.00000000e+00, 8.30000000e+00, 1.40000000e+01, 0.00000000e+00],
[3.25110000e+04, 8.00000000e+00, 4.60000000e+01, 6.10000000e+01, 2.61000000e+01, 8.40000000e+00, 1.68000000e+01, 6.10000000e+01],
[3.25570000e+04, 1.00000000e+00, 3.00000000e+00, 0.00000000e+00, 6.67000000e+01, 1.03000000e+01, 2.88000000e+01, 7.00000000e+00]])
obs = parse_ethoscan_report(self.ethoscan_report_lines_1)
np.testing.assert_array_equal(obs, exp)
# Test with a start time.
start_time = 3435.
obs = parse_ethoscan_report(self.ethoscan_report_lines_1, start_time)
exp[:, 0] += start_time
np.testing.assert_array_equal(obs, exp)
def test_align_ethoscan_data(self):
# Simulate a situation where 1 day has elapsed since the beginning of
# the experiment and the beginning of the Ethoscan. The Ethoscan will
# report will be for 1h of activity.
exp_start = datetime.datetime(2015, 1, 1, 6, 0, 0)
eth_start = datetime.datetime(2015, 1, 2, 6, 0, 0)
# Assume that the experiment has been stopped for 1h to collect samples
# from the mice.
times = np.concatenate((np.arange(23 * 3600),
np.arange(3600) + 24 * 3600))
# Mock Ethoscan behavioral classification.
edata = np.array([[1, 3, 9, 0, 0, 15.25, 13.5, 0],
[10, 4, 3000, 1560, 0, 2.5, 2.5, 0],
[3010, 0, 570, .6, 0, 14.5, 14.5, 0],
[3580, 6, 19, 0, 13, 16.5, 7.5, 300]])
# The times post experiment start that our Ethoscan observations occur
# at are different than the actual indices in the times vector due to
# the experiment being paused. The actual index of these observations
# will be shifted back by 3600 (3600 observations in 1h).
times_exp = np.array([[86401, 86410],
[86410, 89410],
[89410, 89980],
[89980, 89999]])
exp = times_exp - 3600
# Generate observed data.
obs = np.empty((4,2))
for i, e in enumerate(edata):
obs[i] = align_ethoscan_data(exp_start, eth_start, e, times)
np.testing.assert_array_equal(obs, exp)
# Simulate a situation where 2 pauses have occurred in experimental
# recording (i.e. 2 days of sampling). Seconds between experiment start
# and experiment end: 487671.
exp_start = datetime.datetime(2015, 2, 28, 7, 42, 15)
exp_end = datetime.datetime(2015, 3, 5, 23, 10, 6)
# The Ethoscan will start with 97200 seconds to go.
eth_start = datetime.datetime(2015, 3, 4, 20, 10, 6)
# Our mock times data has a loss of 1000 samples (1000 seconds where
# Promethion machine was off) at the first break, and 2000 samples at
# the second break.
times = np.concatenate((np.arange(79000),
80000 + np.arange(115425),
197425 + np.arange(290246)))
# Mock Ethoscan behavioral classifications.
edata = np.array([[1, 3, 9, 0, 0, 15.25, 13.5, 0],
[10, 4, 3000, 1560, 0, 2.5, 2.5, 0],
[3010, 0, 570, .6, 0, 14.5, 14.5, 0],
[3580, 6, 19, 0, 13, 16.5, 7.5, 300],
[3599, 8, 93600, 0, 15.25, 2.5, 100]])
times_exp = np.array([[390472, 390481],
[390481, 393481],
[393481, 394051],
[394051, 394070],
[394070, 487670]])
exp = times_exp - 3000
# Generate observed data.
obs = np.empty((5,2))
for i, e in enumerate(edata):
obs[i] = align_ethoscan_data(exp_start, eth_start, e, times)
np.testing.assert_array_equal(obs, exp)
if __name__ == '__main__':
main()
|
import os
MONGO_HOST = os.environ.get('MONGO_SERVICE_HOST')
MONGO_PORT = int(os.environ.get('MONGO_SERVICE_PORT'))
MONGO_DBNAME = 'reports'
XML = False
X_DOMAINS = "*"
X_HEADERS = "Content-Type, Accept, Authorization, X-Requested-With, " \
" Access-Control-Request-Headers, Access-Control-Allow-Origin, " \
" Access-Control-Allow-Credentials, X-HTTP-Method-Override, mozSystem, " \
" Access-Control-Allow-Methods, If-Match "
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
ITEM_METHODS = ['GET', 'PATCH', 'PUT', 'DELETE']
PUBLIC_METHODS = ['GET', 'POST', 'PATCH', 'PUT', 'DELETE']
PUBLIC_ITEM_METHODS = ['GET', 'PATCH', 'PUT', 'POST', 'DELETE']
reports = {
'schema': {
'build': {'type': 'string'},
'branch': {'type': 'string'},
'repo': {'type': 'string'},
'commit': {'type': 'string'},
'report': {'type': 'string'}
}
}
DOMAIN = {'reports': reports}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jormungandr.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20161014_0109'),
]
operations = [
migrations.AddField(
model_name='event',
name='subtitle',
field=models.CharField(default='', max_length=150),
preserve_default=False,
),
]
|
import sys
from cx_Freeze import setup, Executable
setup(
name = "Lockwatcher",
version = "0.1",
description = "Anti-tampering monitor",
executables = [Executable("lockwatcher-gui.py", base = "Win32GUI",icon='favicon.ico'),
Executable('serviceconfig.py', base='Win32Service',targetName='LockWatcherSvc.exe'),
Executable("locker.py", base = "Win32GUI")],
data_files=[('', ['favicon.ico']),
('', ['btscanner.exe','chastrigger.exe','roomtrigger.exe','install-interception.exe']),
('', ['roomcam.png','chascam.png','camid.png']),
('', ['cygwin1.dll','interception.32.dll','interception.64.dll'])
],
options = {'build_exe': {'includes': ['devdetect']}},
)
|
"""Test the invalidateblock RPC."""
from test_framework.test_framework import TrollcoinTestFramework
from test_framework.util import *
class InvalidateTest(TrollcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir))
self.nodes.append(start_node(1, self.options.tmpdir))
self.nodes.append(start_node(2, self.options.tmpdir))
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
time.sleep(5)
assert_equal(self.nodes[2].getblockcount(), 3)
assert_equal(self.nodes[0].getblockcount(), 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
|
from .base import TestCase
from mongu import Client, Model, ModelAttributeError
from pymongo import MongoClient
class ClientTests(TestCase):
def test_default(self):
self.assertEqual(Client().client, MongoClient())
def test_uri(self):
self.assertEqual(
Client('mongodb://localhost:27017').client,
MongoClient('mongodb://localhost:27017'))
def test_warning(self):
self.assert_warn(SyntaxWarning, Client, 'mongodb://localhost:27017/database')
def test_no_database(self):
class BrokenModel(Model):
_collection_ = 'test'
self.assertRaises(ModelAttributeError, self.client.register_model, BrokenModel)
def test_no_collection(self):
class BrokenModel(Model):
_database_ = 'test'
self.assertRaises(ModelAttributeError, self.client.register_model, BrokenModel)
def test_no_registration(self):
class MyModel(Model):
_database_ = 'test'
_collection_ = 'test'
self.assertRaises(ModelAttributeError, getattr, MyModel, 'collection')
|
from django.core.management import call_command
from django.test.testcases import TestCase
from 臺灣言語服務.models import 訓練過渡格式
from 匯入.management.commands.教典詞條 import Command
from 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器
class 教典詞條試驗(TestCase):
@classmethod
def setUpClass(cls):
call_command('教典詞條')
return super().setUpClass()
def test句數正確(self):
self.assertGreater(訓練過渡格式.資料數量(), 25000)
def test切腔口又音(self):
self.assertEqual(
Command().tsheh_iuim('tsa̍p-jī-tsí-tn̂g/tsa̍p-lī-tsí-tn̂g'),
['tsa̍p-jī-tsí-tn̂g', 'tsa̍p-lī-tsí-tn̂g']
)
def test切tsē又音(self):
self.assertEqual(
Command().tsheh_iuim('ē-kì--tsit、ē-kì--lit、ē-kì--lih、ē-kì--eh'),
['ē-kì--tsit', 'ē-kì--lit', 'ē-kì--lih', 'ē-kì--eh']
)
def test主詞條(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('上青苔', 'tshiūnn-tshenn-thî').看分詞()
).exists()
)
def test第二優勢腔(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('上青苔', 'tshiūnn-tshinn-thî').看分詞()
).exists()
)
def test又音(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('上青苔', 'tshiūnn-tshenn-tî').看分詞()
).exists()
)
def test詞luī方言差(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('面巾', 'bīn-kirn').看分詞()
).exists()
)
def test詞luī方言差2ê做伙(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('病院', 'pǐnn-ǐnn').看分詞()
).exists()
)
def test詞luī方言差日語kâng長度莫(self):
self.assertFalse(
訓練過渡格式.objects.filter(
文本__contains='ガラ油'
).exists()
)
def test詞luī方言差2ê羅馬字(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('冰雹', 'ping-pha̍uh').看分詞()
).exists()
)
def test詞luī方言差一堆點(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('咪咪', 'bi̋-bi').看分詞()
).exists()
)
def test漢字方言差(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('予', 'hǒo').看分詞()
).exists()
)
def test漢字方言差_2ê分號隔開(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('反', 'puínn').看分詞()
).exists()
)
def test漢字方言差_提掉解說(self):
self.assertTrue(
訓練過渡格式.objects.filter(
文本=拆文分析器.建立句物件('濟', 'tserē').看分詞()
).exists()
)
|
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._resolve_private_link_service_id_operations import build_post_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResolvePrivateLinkServiceIdOperations:
"""ResolvePrivateLinkServiceIdOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def post(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.PrivateLinkResource",
**kwargs: Any
) -> "_models.PrivateLinkResource":
"""Gets the private link service ID for the specified managed cluster.
Gets the private link service ID the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters (name, groupId) supplied in order to resolve a private link
service ID.
:type parameters: ~azure.mgmt.containerservice.v2021_03_01.models.PrivateLinkResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_03_01.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PrivateLinkResource')
request = build_post_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.post.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
post.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId'} # type: ignore
|
from urllib.parse import urlencode
from django.urls import reverse
from frontend.utils.bnf_hierarchy import simplify_bnf_codes
from .build_search_query import build_query_obj
from .build_rules import build_rules
from .models import VTM, VMP, VMPP, AMP, AMPP
NUM_RESULTS_PER_OBJ_TYPE = 10
def search(q, obj_types, include):
results = search_by_term(q, obj_types, include)
if not results:
results = search_by_snomed_code(q)
if not results:
results = search_by_gtin(q)
return results
def search_by_term(q, obj_types, include):
results = []
for cls in [VTM, VMP, VMPP, AMP, AMPP]:
if obj_types and cls.obj_type not in obj_types:
continue
qs = cls.objects
if "invalid" not in include:
qs = qs.valid()
if "unavailable" not in include:
qs = qs.available()
if "no_bnf_code" not in include:
qs = qs.with_bnf_code()
qs = qs.search(q)
objs = list(qs)
if objs:
results.append({"cls": cls, "objs": objs})
return results
def search_by_snomed_code(q):
try:
int(q)
except ValueError:
return []
for cls in [VTM, VMP, VMPP, AMP, AMPP]:
try:
obj = cls.objects.get(pk=q)
except cls.DoesNotExist:
continue
return [{"cls": cls, "objs": [obj]}]
return []
def search_by_gtin(q):
try:
int(q)
except ValueError:
return []
try:
obj = AMPP.objects.get(gtin__gtin=q)
except AMPP.DoesNotExist:
return []
return [{"cls": AMPP, "objs": [obj]}]
def advanced_search(cls, search, include):
"""Perform a search against all dm+d objects of a particular type.
Parameters:
cls: class of dm+d object to search
search_params: a dict with the following keys:
search: a tree describing the search to be performed, submitted when user
performs the search (see TestAdvancedSearchHelpers for an example)
include: a list of strings taken from: ["invalid", "unavailable", "no_bnf_code"]
Returns dict with the following keys:
objs: queryset of results
rules: structure used to populate a QueryBuilder instance (see
https://querybuilder.js.org/#method-setRules)
too_many_results: flag indicating whether more than 10,000 results were returned
"""
rules = build_rules(search)
query_obj = build_query_obj(cls, search)
qs = cls.objects
if "invalid" not in include:
qs = qs.valid()
if "unavailable" not in include:
qs = qs.available()
if "no_bnf_code" not in include:
qs = qs.with_bnf_code()
# 10,000 is an arbitrary cut off. We should do pagination properly.
objs = qs.filter(query_obj)[:10001]
if len(objs) == 10001:
too_many_results = True
objs = objs[:10000]
analyse_url = None
else:
too_many_results = False
analyse_url = _build_analyse_url(objs)
return {
"objs": objs,
"rules": rules,
"too_many_results": too_many_results,
"analyse_url": analyse_url,
}
def _build_analyse_url(objs):
bnf_codes = [obj.bnf_code for obj in objs if obj.bnf_code]
params = {
"numIds": ",".join(simplify_bnf_codes(bnf_codes)),
"denom": "total_list_size",
}
querystring = urlencode(params)
url = "{}#{}".format(reverse("analyse"), querystring)
if len(url) > 5000:
# Anything longer than 5000 characters takes too long to load. This
# matches the behaviour of import_measures.build_analyse_url().
return
return url
|
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('wishlist_app', '0017_auto_20151001_1318'),
]
operations = [
migrations.AlterField(
model_name='role',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2015, 10, 1, 14, 28, 31, 58000)),
),
migrations.AlterField(
model_name='role',
name='modified_date',
field=models.DateTimeField(default=datetime.datetime(2015, 10, 1, 14, 28, 31, 58000)),
),
migrations.AlterUniqueTogether(
name='registryassignment',
unique_together=set([('group', 'wisher')]),
),
migrations.AlterUniqueTogether(
name='secretsantaassignment',
unique_together=set([('group', 'wisher', 'giver')]),
),
]
|
import json
import time
from gbdxtools import Interface
start_time = time.time()
catalog_ids = ['1030010034AFCE00',
'103001003696B200',
'105041001126A900',
'1050410011360600',
'1050410011360700']
print 'Order imagery from GBDX'
gbdx = Interface()
order_id = gbdx.ordering.order(catalog_ids)
while True:
states, locations = zip(*[(order['state'],order['location'])
for order in gbdx.ordering.status(order_id)])
if any(state != 'delivered' for state in states):
time.sleep(300)
else:
break
print 'Elapsed time: {} min'.format(round((time.time() - start_time)/60))
s3_location = 'kostas/yunnanearthquake2014'
print 'Launch AOP workflows'
workflow_ids = [gbdx.workflow.launch_aop_to_s3(location,
s3_location,
enable_acomp='true',
enable_pansharpen='true') for location in locations]
pending_workflow_ids = workflow_ids
while len(pending_workflow_ids) > 0:
print 'Pending workflows', pending_workflow_ids
for workflow_id in pending_workflow_ids:
result = gbdx.workflow.status(workflow_id)
if result['state'] == 'complete':
pending_workflow_ids.remove(workflow_id)
time.sleep(300)
print 'Elapsed time: {} min'.format(round((time.time() - start_time)/60))
gbdx.s3.download(s3_location)
|
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
def saveBlockConfig(path, config):
fs = FileStream(path, FileMode.Create)
s = XmlSerializer(BlockConfig)
s.Serialize(fs,config)
fs.Close()
def loadBlockConfig(path):
fs = FileStream(path, FileMode.Open)
s = XmlSerializer(BlockConfig)
bc = s.Deserialize(fs)
fs.Close()
return bc
def writeLatestBlockNotificationFile(cluster, blockIndex):
fs = FileStream(Environs.FileSystem.Paths["settingsPath"] + "\\BlockHead\\latestBlock.txt", FileMode.Create)
sw = StreamWriter(fs)
sw.WriteLine(cluster + "\t" + str(blockIndex))
sw.Close()
fs.Close()
def checkYAGAndFix():
interlockFailed = hc.YAGInterlockFailed;
if (interlockFailed):
bh.StopPattern();
bh.StartPattern();
def printWaveformCode(bc, name):
print(name + ": " + str(bc.GetModulationByName(name).Waveform.Code) + " -- " + str(bc.GetModulationByName(name).Waveform.Inverted))
def prompt(text):
sys.stdout.write(text)
return sys.stdin.readline().strip()
def measureParametersAndMakeBC(cluster, eState, bState):
fileSystem = Environs.FileSystem
print("Measuring parameters ...")
bh.StopPattern()
hc.UpdateRFPowerMonitor()
hc.UpdateRFFrequencyMonitor()
bh.StartPattern()
hc.UpdateBCurrentMonitor()
hc.UpdateVMonitor()
hc.UpdateI2AOMFreqMonitor()
print("V plus: " + str(hc.CPlusMonitorVoltage * hc.CPlusMonitorScale))
print("V minus: " + str(hc.CMinusMonitorVoltage * hc.CMinusMonitorScale))
print("Bias: " + str(hc.BiasCurrent))
print("B step: " + str(abs(hc.FlipStepCurrent)))
print("DB step: " + str(abs(hc.CalStepCurrent)))
# load a default BlockConfig and customise it appropriately
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
bc = loadBlockConfig(settingsPath + "default.xml")
bc.Settings["cluster"] = cluster
bc.Settings["eState"] = eState
bc.Settings["bState"] = bState
bc.Settings["rfState"] = False
bc.Settings["ePlus"] = hc.CPlusMonitorVoltage * hc.CPlusMonitorScale
bc.Settings["eMinus"] = hc.CMinusMonitorVoltage * hc.CMinusMonitorScale
bc.GetModulationByName("B").Centre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").Step = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").Step = abs(hc.CalStepCurrent)/1000
# these next 3, seemingly redundant, lines are to preserve backward compatibility
bc.GetModulationByName("B").PhysicalCentre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").PhysicalStep = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").PhysicalStep = abs(hc.CalStepCurrent)/1000
bc.GetModulationByName("RF1A").Centre = hc.RF1AttCentre
bc.GetModulationByName("RF1A").Step = hc.RF1AttStep
bc.GetModulationByName("RF1A").PhysicalCentre = hc.RF1PowerCentre
bc.GetModulationByName("RF1A").PhysicalStep = hc.RF1PowerStep
bc.GetModulationByName("RF2A").Centre = hc.RF2AttCentre
bc.GetModulationByName("RF2A").Step = hc.RF2AttStep
bc.GetModulationByName("RF2A").PhysicalCentre = hc.RF2PowerCentre
bc.GetModulationByName("RF2A").PhysicalStep = hc.RF2PowerStep
bc.GetModulationByName("RF1F").Centre = hc.RF1FMCentre
bc.GetModulationByName("RF1F").Step = hc.RF1FMStep
bc.GetModulationByName("RF1F").PhysicalCentre = hc.RF1FrequencyCentre
bc.GetModulationByName("RF1F").PhysicalStep = hc.RF1FrequencyStep
bc.GetModulationByName("RF2F").Centre = hc.RF2FMCentre
bc.GetModulationByName("RF2F").Step = hc.RF2FMStep
bc.GetModulationByName("RF2F").PhysicalCentre = hc.RF2FrequencyCentre
bc.GetModulationByName("RF2F").PhysicalStep = hc.RF2FrequencyStep
bc.GetModulationByName("LF1").Centre = hc.FLPZTVoltage
bc.GetModulationByName("LF1").Step = hc.FLPZTStep
bc.GetModulationByName("LF1").PhysicalCentre = hc.I2LockAOMFrequencyCentre
bc.GetModulationByName("LF1").PhysicalStep = hc.I2LockAOMFrequencyStep
# generate the waveform codes
print("Generating waveform codes ...")
eWave = bc.GetModulationByName("E").Waveform
eWave.Name = "E"
lf1Wave = bc.GetModulationByName("LF1").Waveform
lf1Wave.Name = "LF1"
ws = WaveformSetGenerator.GenerateWaveforms( (eWave, lf1Wave), ("B","DB","PI","RF1A","RF2A","RF1F","RF2F") )
bc.GetModulationByName("B").Waveform = ws["B"]
bc.GetModulationByName("DB").Waveform = ws["DB"]
bc.GetModulationByName("PI").Waveform = ws["PI"]
bc.GetModulationByName("RF1A").Waveform = ws["RF1A"]
bc.GetModulationByName("RF2A").Waveform = ws["RF2A"]
bc.GetModulationByName("RF1F").Waveform = ws["RF1F"]
bc.GetModulationByName("RF2F").Waveform = ws["RF2F"]
# change the inversions of the static codes E and LF1
bc.GetModulationByName("E").Waveform.Inverted = WaveformSetGenerator.RandomBool()
bc.GetModulationByName("LF1").Waveform.Inverted = WaveformSetGenerator.RandomBool()
# print the waveform codes
printWaveformCode(bc, "E")
printWaveformCode(bc, "B")
printWaveformCode(bc, "DB")
printWaveformCode(bc, "PI")
printWaveformCode(bc, "RF1A")
printWaveformCode(bc, "RF2A")
printWaveformCode(bc, "RF1F")
printWaveformCode(bc, "RF2F")
printWaveformCode(bc, "LF1")
# store e-switch info in block config
print("Storing E switch parameters ...")
bc.Settings["eRampDownTime"] = hc.ERampDownTime
bc.Settings["eRampDownDelay"] = hc.ERampDownDelay
bc.Settings["eBleedTime"] = hc.EBleedTime
bc.Settings["eSwitchTime"] = hc.ESwitchTime
bc.Settings["eRampUpTime"] = hc.ERampUpTime
bc.Settings["eRampUpDelay"] = hc.ERampUpDelay
# this is for legacy analysis compatibility
bc.Settings["eDischargeTime"] = hc.ERampDownTime + hc.ERampDownDelay
bc.Settings["eChargeTime"] = hc.ERampUpTime + hc.ERampUpDelay
return bc
kSteppingBiasCurrentPerVolt = 1000.0
kBMaxChange = 0.05
kRFAVoltsPerCal = 3.2
kRFAMaxChange = 0.1
kRFFVoltsPerCal = 8
kRFFMaxChange = 0.1
def updateLocks(bState):
pmtChannelValues = bh.DBlock.ChannelValues[0]
# note the weird python syntax for a one element list
sigIndex = pmtChannelValues.GetChannelIndex(("SIG",))
sigValue = pmtChannelValues.GetValue(sigIndex)
bIndex = pmtChannelValues.GetChannelIndex(("B",))
bValue = pmtChannelValues.GetValue(bIndex)
#bError = pmtChannelValues.GetError(bIndex)
dbIndex = pmtChannelValues.GetChannelIndex(("DB",))
dbValue = pmtChannelValues.GetValue(dbIndex)
#dbError = pmtChannelValues.GetError(dbIndex)
rf1aIndex = pmtChannelValues.GetChannelIndex(("RF1A",))
rf1aValue = pmtChannelValues.GetValue(rf1aIndex)
#rf1aError = pmtChannelValues.GetError(rf1aIndex)
rf2aIndex = pmtChannelValues.GetChannelIndex(("RF2A",))
rf2aValue = pmtChannelValues.GetValue(rf2aIndex)
#rf2aError = pmtChannelValues.GetError(rf2aIndex)
rf1fIndex = pmtChannelValues.GetChannelIndex(("RF1F",))
rf1fValue = pmtChannelValues.GetValue(rf1fIndex)
#rf1fError = pmtChannelValues.GetError(rf1fIndex)
rf2fIndex = pmtChannelValues.GetChannelIndex(("RF2F",))
rf2fValue = pmtChannelValues.GetValue(rf2fIndex)
#rf2fError = pmtChannelValues.GetError(rf2fIndex)
lf1Index = pmtChannelValues.GetChannelIndex(("LF1",))
lf1Value = pmtChannelValues.GetValue(lf1Index)
#lf1Error = pmtChannelValues.GetError(lf1Index)
lf1dbIndex = pmtChannelValues.GetChannelIndex(("LF1","DB"))
lf1dbValue = pmtChannelValues.GetValue(lf1dbIndex)
print "SIG: " + str(sigValue)
print "B: " + str(bValue) + " DB: " + str(dbValue)
print "RF1A: " + str(rf1aValue) + " RF2A: " + str(rf2aValue)
print "RF1F: " + str(rf1fValue) + " RF2F: " + str(rf2fValue)
print "LF1: " + str(lf1Value) + " LF1.DB: " + str(lf1dbValue)
# B bias lock
# the sign of the feedback depends on the b-state
if bState:
feedbackSign = 1
else:
feedbackSign = -1
deltaBias = - (1.0/3.0) * feedbackSign * (hc.CalStepCurrent * (bValue / dbValue)) / kSteppingBiasCurrentPerVolt
deltaBias = windowValue(deltaBias, -kBMaxChange, kBMaxChange)
print "Attempting to change stepping B bias by " + str(deltaBias) + " V."
newBiasVoltage = windowValue( hc.SteppingBiasVoltage - deltaBias, 0, 5)
hc.SetSteppingBBiasVoltage( newBiasVoltage )
# RFA locks
deltaRF1A = - (1.0/4.0) * (rf1aValue / dbValue) * kRFAVoltsPerCal
deltaRF1A = windowValue(deltaRF1A, -kRFAMaxChange, kRFAMaxChange)
print "Attempting to change RF1A by " + str(deltaRF1A) + " V."
newRF1A = windowValue( hc.RF1AttCentre - deltaRF1A, hc.RF1AttStep, 5 - hc.RF1AttStep)
hc.SetRF1AttCentre( newRF1A )
#
deltaRF2A = - (1.0/4.0) * (rf2aValue / dbValue) * kRFAVoltsPerCal
deltaRF2A = windowValue(deltaRF2A, -kRFAMaxChange, kRFAMaxChange)
print "Attempting to change RF2A by " + str(deltaRF2A) + " V."
newRF2A = windowValue( hc.RF2AttCentre - deltaRF2A, hc.RF2AttStep, 5 - hc.RF2AttStep )
hc.SetRF2AttCentre( newRF2A )
# RFF locks
deltaRF1F = - (1.0/5.0) * (rf1fValue / dbValue) * kRFFVoltsPerCal
deltaRF1F = windowValue(deltaRF1F, -kRFFMaxChange, kRFFMaxChange)
print "Attempting to change RF1F by " + str(deltaRF1F) + " V."
newRF1F = windowValue( hc.RF1FMCentre - deltaRF1F, hc.RF1FMStep, 5 - hc.RF1FMStep)
hc.SetRF1FMCentre( newRF1F )
#
deltaRF2F = - (1.0/5.0) * (rf2fValue / dbValue) * kRFFVoltsPerCal
deltaRF2F = windowValue(deltaRF2F, -kRFFMaxChange, kRFFMaxChange)
print "Attempting to change RF2F by " + str(deltaRF2F) + " V."
newRF2F = windowValue( hc.RF2FMCentre - deltaRF2F, hc.RF2FMStep, 5 - hc.RF2FMStep )
hc.SetRF2FMCentre( newRF2F )
# Laser frequency lock
deltaLF1 = 1.25 * (lf1Value / dbValue)
deltaLF1 = windowValue(deltaLF1, -0.1, 0.1)
print "Attempting to change LF1 by " + str(deltaLF1) + " V."
newLF1 = windowValue( hc.FLPZTVoltage - deltaLF1, 0, 5 )
hc.SetFLPZTVoltage( newLF1 )
def windowValue(value, minValue, maxValue):
if ( (value < maxValue) & (value > minValue) ):
return value
else:
if (value < minValue):
return minValue
else:
return maxValue
kTargetRotationPeriod = 15
kReZeroLeakageMonitorsPeriod = 10
r = Random()
def EDMGo():
# Setup
f = None
fileSystem = Environs.FileSystem
dataPath = fileSystem.GetDataDirectory(fileSystem.Paths["edmDataPath"])
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
print("Data directory is : " + dataPath)
print("")
suggestedClusterName = fileSystem.GenerateNextDataFileName()
sm.SelectProfile("Scan B")
# User inputs data
cluster = prompt("Cluster name [" + suggestedClusterName +"]: ")
if cluster == "":
cluster = suggestedClusterName
print("Using cluster " + suggestedClusterName)
eState = Boolean.Parse(prompt("E-state: "))
bState = Boolean.Parse(prompt("B-state: "))
# this is to make sure the B current monitor is in a sensible state
hc.UpdateBCurrentMonitor()
bc = measureParametersAndMakeBC(cluster, eState, bState)
#WARNING scramble/block code added, doesn't really belong in sumer 2008 DK
#randomise Ramsey phase
scramblerV = 0.724774 * r.NextDouble()
hc.SetScramblerVoltage(scramblerV)
# loop and take data
blockIndex = 0
maxBlockIndex = 10000
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# save the block config and load into blockhead
print("Saving temp config.")
bc.Settings["clusterIndex"] = blockIndex
tempConfigFile ='%(p)stemp%(c)s_%(i)s.xml' % {'p': settingsPath, 'c': cluster, 'i': blockIndex}
saveBlockConfig(tempConfigFile, bc)
System.Threading.Thread.Sleep(500)
print("Loading temp config.")
bh.LoadConfig(tempConfigFile)
# take the block and save it
print("Running ...")
bh.AcquireAndWait()
print("Done.")
blockPath = '%(p)s%(c)s_%(i)s.zip' % {'p': dataPath, 'c': cluster, 'i': blockIndex}
bh.SaveBlock(blockPath)
print("Saved block "+ str(blockIndex) + ".")
# give mma a chance to analyse the block
print("Notifying Mathematica and waiting ...")
writeLatestBlockNotificationFile(cluster, blockIndex)
System.Threading.Thread.Sleep(5000)
print("Done.")
# increment and loop
File.Delete(tempConfigFile)
checkYAGAndFix()
blockIndex = blockIndex + 1
updateLocks(bState)
#WARNING scramble/block code added, doesn't really belong in sumer 2008 DK
#randomise Ramsey phase
scramblerV = 0.724774 * r.NextDouble()
hc.SetScramblerVoltage(scramblerV)
bc = measureParametersAndMakeBC(cluster, eState, bState)
# do things that need periodically doing
if ((blockIndex % kTargetRotationPeriod) == 0):
print("Rotating target.")
hc.StepTarget(5)
if ((blockIndex % kReZeroLeakageMonitorsPeriod) == 0):
print("Recalibrating leakage monitors.")
hc.EnableEField( False )
System.Threading.Thread.Sleep(10000)
hc.EnableBleed( True )
System.Threading.Thread.Sleep(1000)
hc.EnableBleed( False )
System.Threading.Thread.Sleep(5000)
hc.CalibrateIMonitors()
hc.EnableEField( True )
bh.StopPattern()
def run_script():
EDMGo()
|
people = 20
cats = 30
dogs = 15
if people < cats:
print("Too many cats! The world is doomed!")
if people > cats:
print("Not many cats! The world is saved!")
if people < dogs:
print("The world is drooled on!")
if people > dogs:
print("The world is dry!")
dogs += 5
if people >= dogs:
print("People are greater than or equal to dogs.")
if people <= dogs:
print("People are less than or equal to dogs.")
if people == dogs:
print("People are dogs.")
|
from .air import air
from .water import water
from .mercury import mercury
|
from unittest import TestCase
from day6 import Santa
class TestSanta(TestCase):
def test_coordinates_from_single_digit(self):
santa = Santa()
actual = santa._coordinates_from('turn on 0,0 through 0,0')
expected = [0, 0, 0, 0]
self.assertEqual(expected, actual)
def test_coordinates_from_multiple_digit(self):
santa = Santa()
actual = santa._coordinates_from('toggle 900,100 through 999,555')
expected = [900, 100, 999, 555]
self.assertEqual(expected, actual)
def test_send_instructions_on_one(self):
santa = Santa()
santa.send_instructions('turn on 0,0 through 0,0')
self.assertEqual(1, santa._grid.lights_on_count())
def test_send_instructions_on_all(self):
santa = Santa()
santa.send_instructions('turn on 0,0 through 999,999')
self.assertEqual(1000000, santa._grid.lights_on_count())
def test_send_instructions_toggle(self):
santa = Santa()
santa.send_instructions('toggle 0,0 through 999,0')
self.assertEqual(1000, santa._grid.lights_on_count())
santa.send_instructions('toggle 500,0 through 999,0')
self.assertEqual(500, santa._grid.lights_on_count())
def test_send_instructions_off(self):
santa = Santa()
santa.send_instructions('turn on 0,0 through 999,0')
self.assertEqual(1000, santa._grid.lights_on_count())
santa.send_instructions('turn off 0,0 through 499,0')
self.assertEqual(500, santa._grid.lights_on_count())
def test_send_instructions_multiple_lines(self):
santa = Santa()
santa.send_instructions('turn on 0,0 through 999,0\nturn off 0,0 through 499,0\ntoggle 999,999 through 999,999')
self.assertEqual(501, santa._grid.lights_on_count())
|
from __future__ import with_statement, absolute_import
import os
import sys
import re
import six
import pkg_resources
from six.moves.configparser import ConfigParser
from six.moves.urllib.parse import unquote
from .util import fix_call, lookup_object
__all__ = ['loadapp', 'loadserver', 'loadfilter', 'appconfig']
def import_string(s):
return pkg_resources.EntryPoint.parse("x=" + s).load(False)
def _aslist(obj):
"""
Turn object into a list; lists and tuples are left as-is, None
becomes [], and everything else turns into a one-element list.
"""
if obj is None:
return []
elif isinstance(obj, (list, tuple)):
return obj
else:
return [obj]
def _flatten(lst):
"""
Flatten a nested list.
"""
if not isinstance(lst, (list, tuple)):
return [lst]
result = []
for item in lst:
result.extend(_flatten(item))
return result
class NicerConfigParser(ConfigParser):
def __init__(self, filename, *args, **kw):
ConfigParser.__init__(self, *args, **kw)
self.filename = filename
if hasattr(self, '_interpolation'):
self._interpolation = self.InterpolateWrapper(self._interpolation)
read_file = getattr(ConfigParser, 'read_file', ConfigParser.readfp)
def defaults(self):
"""Return the defaults, with their values interpolated (with the
defaults dict itself)
Mainly to support defaults using values such as %(here)s
"""
defaults = ConfigParser.defaults(self).copy()
for key, val in six.iteritems(defaults):
defaults[key] = self.get('DEFAULT', key) or val
return defaults
def _interpolate(self, section, option, rawval, vars):
# Python < 3.2
try:
return ConfigParser._interpolate(
self, section, option, rawval, vars)
except Exception:
e = sys.exc_info()[1]
args = list(e.args)
args[0] = 'Error in file %s: %s' % (self.filename, e)
e.args = tuple(args)
e.message = args[0]
raise
class InterpolateWrapper(object):
# Python >= 3.2
def __init__(self, original):
self._original = original
def __getattr__(self, name):
return getattr(self._original, name)
def before_get(self, parser, section, option, value, defaults):
try:
return self._original.before_get(parser, section, option,
value, defaults)
except Exception:
e = sys.exc_info()[1]
args = list(e.args)
args[0] = 'Error in file %s: %s' % (parser.filename, e)
e.args = tuple(args)
e.message = args[0]
raise
class _ObjectType(object):
name = None
egg_protocols = None
config_prefixes = None
def __init__(self):
# Normalize these variables:
self.egg_protocols = [_aslist(p) for p in _aslist(self.egg_protocols)]
self.config_prefixes = [_aslist(p) for p in _aslist(self.config_prefixes)]
def __repr__(self):
return '<%s protocols=%r prefixes=%r>' % (
self.name, self.egg_protocols, self.config_prefixes)
def invoke(self, context):
assert context.protocol in _flatten(self.egg_protocols)
return fix_call(context.object,
context.global_conf, **context.local_conf)
class _App(_ObjectType):
name = 'application'
egg_protocols = ['paste.app_factory', 'paste.composite_factory',
'paste.composit_factory']
config_prefixes = [['app', 'application'], ['composite', 'composit'],
'pipeline', 'filter-app']
def invoke(self, context):
local_conf = dict(context.local_conf)
local_conf.pop('_montague_use', None) # some things won't have it
if context.protocol in ('paste.composit_factory',
'paste.composite_factory'):
return fix_call(context.object,
context.loader, context.global_conf,
**local_conf)
elif context.protocol == 'paste.app_factory':
return fix_call(context.object, context.global_conf, **local_conf)
else:
assert 0, "Protocol %r unknown" % context.protocol
APP = _App()
class _Filter(_ObjectType):
name = 'filter'
egg_protocols = [['paste.filter_factory', 'paste.filter_app_factory']]
config_prefixes = ['filter']
def invoke(self, context):
local_conf = dict(context.local_conf)
local_conf.pop('_montague_use', None) # some things won't have it
if context.protocol == 'paste.filter_factory':
return fix_call(context.object,
context.global_conf, **local_conf)
elif context.protocol == 'paste.filter_app_factory':
def filter_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**local_conf)
return filter_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
FILTER = _Filter()
class _Server(_ObjectType):
name = 'server'
egg_protocols = [['paste.server_factory', 'paste.server_runner']]
config_prefixes = ['server']
def invoke(self, context):
local_conf = dict(context.local_conf)
local_conf.pop('_montague_use', None) # some things won't have it
if context.protocol == 'paste.server_factory':
return fix_call(context.object,
context.global_conf, local_conf)
elif context.protocol == 'paste.server_runner':
def server_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**local_conf)
return server_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
SERVER = _Server()
class _PipeLine(_ObjectType):
name = 'pipeline'
def invoke(self, context):
app = context.app_context.create()
filters = [c.create() for c in context.filter_contexts]
filters.reverse()
for filter in filters:
app = filter(app)
return app
PIPELINE = _PipeLine()
class _FilterApp(_ObjectType):
name = 'filter_app'
def invoke(self, context):
next_app = context.next_context.create()
filter = context.filter_context.create()
return filter(next_app)
FILTER_APP = _FilterApp()
class _FilterWith(_App):
name = 'filtered_with'
def invoke(self, context):
filter = context.filter_context.create()
filtered = context.next_context.create()
if context.next_context.object_type is APP:
return filter(filtered)
else:
# filtering a filter
def composed(app):
return filter(filtered(app))
return composed
FILTER_WITH = _FilterWith()
def loadapp(uri, name=None, **kw):
return loadobj(APP, uri, name=name, **kw)
def loadfilter(uri, name=None, **kw):
return loadobj(FILTER, uri, name=name, **kw)
def loadserver(uri, name=None, **kw):
return loadobj(SERVER, uri, name=name, **kw)
def appconfig(uri, name=None, relative_to=None, global_conf=None):
context = loadcontext(APP, uri, name=name,
relative_to=relative_to,
global_conf=global_conf)
return context.config()
_loaders = {}
def loadobj(object_type, uri, name=None, relative_to=None,
global_conf=None):
context = loadcontext(
object_type, uri, name=name, relative_to=relative_to,
global_conf=global_conf)
return context.create()
def loadcontext(object_type, uri, name=None, relative_to=None,
global_conf=None):
if '#' in uri:
if name is None:
uri, name = uri.split('#', 1)
else:
# @@: Ignore fragment or error?
uri = uri.split('#', 1)[0]
if name is None:
name = 'main'
if ':' not in uri:
raise LookupError("URI has no scheme: %r" % uri)
scheme, path = uri.split(':', 1)
scheme = scheme.lower()
if scheme not in _loaders:
raise LookupError(
"URI scheme not known: %r (from %s)"
% (scheme, ', '.join(_loaders.keys())))
return _loaders[scheme](
object_type,
uri, path, name=name, relative_to=relative_to,
global_conf=global_conf)
def _loadconfig(object_type, uri, path, name, relative_to,
global_conf):
isabs = os.path.isabs(path)
# De-Windowsify the paths:
path = path.replace('\\', '/')
if not isabs:
if not relative_to:
raise ValueError(
"Cannot resolve relative uri %r; no relative_to keyword "
"argument given" % uri)
relative_to = relative_to.replace('\\', '/')
if relative_to.endswith('/'):
path = relative_to + path
else:
path = relative_to + '/' + path
if path.startswith('///'):
path = path[2:]
path = unquote(path)
loader = ConfigLoader(path)
if global_conf:
loader.update_defaults(global_conf, overwrite=False)
return loader.get_context(object_type, name, global_conf)
_loaders['config'] = _loadconfig
def _loadegg(object_type, uri, spec, name, relative_to,
global_conf):
loader = EggLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['egg'] = _loadegg
def _loadfunc(object_type, uri, spec, name, relative_to,
global_conf):
loader = FuncLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['call'] = _loadfunc
class _Loader(object):
def get_app(self, name=None, global_conf=None):
return self.app_context(
name=name, global_conf=global_conf).create()
def get_filter(self, name=None, global_conf=None):
return self.filter_context(
name=name, global_conf=global_conf).create()
def get_server(self, name=None, global_conf=None):
return self.server_context(
name=name, global_conf=global_conf).create()
def app_context(self, name=None, global_conf=None):
return self.get_context(
APP, name=name, global_conf=global_conf)
def filter_context(self, name=None, global_conf=None):
return self.get_context(
FILTER, name=name, global_conf=global_conf)
def server_context(self, name=None, global_conf=None):
return self.get_context(
SERVER, name=name, global_conf=global_conf)
_absolute_re = re.compile(r'^[a-zA-Z]+:')
def absolute_name(self, name):
"""
Returns true if the name includes a scheme
"""
if name is None:
return False
return self._absolute_re.search(name)
class ConfigLoader(_Loader):
def __init__(self, filename):
self.filename = filename = filename.strip()
defaults = {
'here': os.path.dirname(os.path.abspath(filename)),
'__file__': os.path.abspath(filename)
}
self.parser = NicerConfigParser(filename, defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
with open(filename) as f:
self.parser.read_file(f)
def update_defaults(self, new_defaults, overwrite=True):
for key, value in six.iteritems(new_defaults):
if not overwrite and key in self.parser._defaults:
continue
self.parser._defaults[key] = value
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
relative_to=os.path.dirname(self.filename),
global_conf=global_conf)
section = self.find_config_section(
object_type, name=name)
if global_conf is None:
global_conf = {}
else:
global_conf = global_conf.copy()
defaults = self.parser.defaults()
global_conf.update(defaults)
local_conf = {}
global_additions = {}
get_from_globals = {}
for option in self.parser.options(section):
if option.startswith('set '):
name = option[4:].strip()
global_additions[name] = global_conf[name] = (
self.parser.get(section, option))
elif option.startswith('get '):
name = option[4:].strip()
get_from_globals[name] = self.parser.get(section, option)
else:
if option in defaults:
# @@: It's a global option (?), so skip it
continue
local_conf[option] = self.parser.get(section, option)
for local_var, glob_var in get_from_globals.items():
local_conf[local_var] = global_conf[glob_var]
if object_type in (APP, FILTER) and 'filter-with' in local_conf:
filter_with = local_conf.pop('filter-with')
else:
filter_with = None
if 'require' in local_conf:
for spec in local_conf['require'].split():
pkg_resources.require(spec)
del local_conf['require']
if section.startswith('filter-app:'):
context = self._filter_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif section.startswith('pipeline:'):
context = self._pipeline_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif 'use' in local_conf:
context = self._context_from_use(
object_type, local_conf, global_conf, global_additions,
section)
else:
context = self._context_from_explicit(
object_type, local_conf, global_conf, global_additions,
section)
if filter_with is not None:
filter_with_context = LoaderContext(
section=None,
obj=None,
object_type=FILTER_WITH,
protocol=None,
global_conf=global_conf, local_conf=local_conf,
loader=self)
filter_with_context.filter_context = self.filter_context(
name=filter_with, global_conf=global_conf)
filter_with_context.next_context = context
return filter_with_context
return context
def _context_from_use(self, object_type, local_conf, global_conf,
global_additions, section):
use = local_conf.pop('use')
context = self.get_context(
object_type, name=use, global_conf=global_conf)
context.global_conf.update(global_additions)
context.local_conf.update(local_conf)
if '_montague_use' not in context.local_conf:
context.local_conf['_montague_use'] = {'use': use}
if '__file__' in global_conf:
# use sections shouldn't overwrite the original __file__
context.global_conf['__file__'] = global_conf['__file__']
# @@: Should loader be overwritten?
context.loader = self
if context.protocol is None:
# Determine protocol from section type
section_protocol = section.split(':', 1)[0]
if section_protocol in ('application', 'app'):
context.protocol = 'paste.app_factory'
elif section_protocol in ('composit', 'composite'):
context.protocol = 'paste.composit_factory'
else:
# This will work with 'server' and 'filter', otherwise it
# could fail but there is an error message already for
# bad protocols
context.protocol = 'paste.%s_factory' % section_protocol
if context.section is None:
context.section = section
return context
def _context_from_explicit(self, object_type, local_conf, global_conf,
global_additions, section):
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
if protocol in local_conf:
possible.append((protocol, local_conf[protocol]))
break
if len(possible) > 1:
raise LookupError(
"Multiple protocols given in section %r: %s"
% (section, possible))
if not possible:
raise LookupError(
"No loader given in section %r" % section)
found_protocol, found_expr = possible[0]
del local_conf[found_protocol]
local_conf['_montague_use'] = {found_protocol: found_expr}
value = import_string(found_expr)
context = LoaderContext(
section, value, object_type, found_protocol,
global_conf, local_conf, self)
return context
def _filter_app_context(self, object_type, section, name,
global_conf, local_conf, global_additions):
if 'next' not in local_conf:
raise LookupError(
"The [%s] section in %s is missing a 'next' setting"
% (section, self.filename))
next_name = local_conf.pop('next')
context = LoaderContext(section, None, FILTER_APP, None, global_conf,
local_conf, self)
context.next_context = self.get_context(
APP, next_name, global_conf)
if 'use' in local_conf:
context.filter_context = self._context_from_use(
FILTER, local_conf, global_conf, global_additions,
section)
else:
context.filter_context = self._context_from_explicit(
FILTER, local_conf, global_conf, global_additions,
section)
return context
def _pipeline_app_context(self, object_type, section, name,
global_conf, local_conf, global_additions):
if 'pipeline' not in local_conf:
raise LookupError(
"The [%s] section in %s is missing a 'pipeline' setting"
% (section, self.filename))
pipeline = local_conf.pop('pipeline').split()
if local_conf:
raise LookupError(
"The [%s] pipeline section in %s has extra "
"(disallowed) settings: %s"
% (', '.join(local_conf.keys())))
context = LoaderContext(section, None, PIPELINE, None, global_conf,
local_conf, self)
context.app_context = self.get_context(
APP, pipeline[-1], global_conf)
context.filter_contexts = [
self.get_context(FILTER, _name, global_conf)
for _name in pipeline[:-1]]
return context
def find_config_section(self, object_type, name=None):
"""
Return the section name with the given name prefix (following the
same pattern as ``protocol_desc`` in ``config``. It must have the
given name, or for ``'main'`` an empty name is allowed. The
prefix must be followed by a ``:``.
Case is *not* ignored.
"""
possible = []
for name_options in object_type.config_prefixes:
for name_prefix in name_options:
found = self._find_sections(
self.parser.sections(), name_prefix, name)
if found:
possible.extend(found)
break
if not possible:
raise LookupError(
"No section %r (prefixed by %s) found in config %s"
% (name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
if len(possible) > 1:
raise LookupError(
"Ambiguous section names %r for section %r (prefixed by %s) "
"found in config %s"
% (possible, name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
return possible[0]
def _find_sections(self, sections, name_prefix, name):
found = []
if name is None:
if name_prefix in sections:
found.append(name_prefix)
name = 'main'
for section in sections:
if section.startswith(name_prefix + ':'):
if section[len(name_prefix) + 1:].strip() == name:
found.append(section)
return found
class EggLoader(_Loader):
def __init__(self, spec):
self.spec = spec
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
global_conf=global_conf)
entry_point, protocol, ep_name = self.find_egg_entry_point(
object_type, name=name)
return LoaderContext(
None,
entry_point,
object_type,
protocol,
global_conf or {}, {},
self,
distribution=pkg_resources.get_distribution(self.spec),
entry_point_name=ep_name)
def find_egg_entry_point(self, object_type, name=None):
"""
Returns the (entry_point, protocol) for the with the given
``name``.
"""
if name is None:
name = 'main'
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
pkg_resources.require(self.spec)
entry = pkg_resources.get_entry_info(
self.spec,
protocol,
name)
if entry is not None:
possible.append((entry.load(), protocol, entry.name))
break
if not possible:
# Better exception
dist = pkg_resources.get_distribution(self.spec)
msg_tpl = ("Entry point %r not found in egg %r (dir: %s; protocols: %s; "
"entry_points: %s)")
msg = msg_tpl % (
name, self.spec,
dist.location,
', '.join(_flatten(object_type.egg_protocols)),
', '.join(_flatten([list(six.iterkeys(
pkg_resources.get_entry_info(self.spec, prot, name) or {}))
for prot in protocol_options] or '(no entry points)'))
)
raise LookupError(msg)
if len(possible) > 1:
raise LookupError(
"Ambiguous entry points for %r in egg %r (protocols: %s)"
% (name, self.spec, ', '.join(_flatten(protocol_options))))
return possible[0]
class FuncLoader(_Loader):
""" Loader that supports specifying functions inside modules, without
using eggs at all. Configuration should be in the format:
use = call:my.module.path:function_name
Dot notation is supported in both the module and function name, e.g.:
use = call:my.module.path:object.method
"""
def __init__(self, spec):
self.spec = spec
if ':' not in spec:
raise LookupError("Configuration not in format module:function")
def get_context(self, object_type, name=None, global_conf=None):
obj = lookup_object(self.spec)
return LoaderContext(
None,
obj,
object_type,
None, # determine protocol from section type
global_conf or {},
{},
self,
)
class LoaderContext(object):
def __init__(self, section, obj, object_type, protocol,
global_conf, local_conf, loader,
distribution=None, entry_point_name=None):
self.section = section
self.object = obj
self.object_type = object_type
self.protocol = protocol
# assert protocol in _flatten(object_type.egg_protocols), (
# "Bad protocol %r; should be one of %s"
# % (protocol, ', '.join(map(repr, _flatten(object_type.egg_protocols)))))
self.global_conf = global_conf
self.local_conf = local_conf
self.loader = loader
self.distribution = distribution
self.entry_point_name = entry_point_name
def create(self):
return self.object_type.invoke(self)
def config(self):
conf = AttrDict(self.global_conf)
conf.update(self.local_conf)
conf.local_conf = self.local_conf
conf.global_conf = self.global_conf
conf.context = self
return conf
class AttrDict(dict):
"""
A dictionary that can be assigned to.
"""
pass
|
import re
import requests
import lxml.html
def main():
'''
main process
'''
session = requests.Session()
response = session.get('https://gihyo.jp/dp')
urls = scrape_list_page(response)
for url in urls:
response = session.get(url)
ebook = scrape_detail_page(response)
print(ebook)
break
def scrape_list_page(response):
'''
retrieve urls from web page
'''
root = lxml.html.fromstring(response.content)
root.make_links_absolute(response.url)
for a in root.cssselect('#listBook a[itemprop="url"]'):
url = a.get('href')
yield url
def scrape_detail_page(response):
"""
get detail page info as dict type
"""
root = lxml.html.fromstring(response.content)
ebook = {
'url': response.url,
'title': root.cssselect('#bookTitle')[0].text_content(),
'price': root.cssselect('.buy')[0].text,
'content': [normalize_spaces(h3.text_content()) for h3 in root.cssselect('#content > h3')],
}
return ebook
def normalize_spaces(s):
'''
replace blanks to one blank, and trim
'''
return re.sub(r'\s+', ' ', s).strip()
if __name__ == '__main__':
main()
|
import requests
__author__ = 'Rob Derksen <rob.derksen@hubsec.eu>'
__version__ = '0.1.1'
class PynoramioException(Exception):
""" PynoramioException: class used as a custom exception for Pynoramio related errors.
"""
pass
class Pynoramio:
def __init__(self):
self.base_url = 'http://www.panoramio.com/map/get_panoramas?order=popularity'
def _request(self, lat_min, lon_min, lat_max, lon_max, start, end, picture_size=None, set_=None, map_filter=None):
"""
Internal method to send requests to the Panoramio data API.
:param lat_min:
Minimum latitude of the bounding box
:type lat_min: float
:param lon_min:
Minimum longitude of the bounding box
:type lon_min: float
:param lat_max:
Maximum latitude of the bounding box
:type lat_max: float
:param lon_max:
Maximum longitude of the bounding box
:type lon_max: float
:param start:
Start number of the number of photo's to retrieve, where 0 is the most popular picture
:type start: int
:param end:
Last number of the number of photo's to retrieve, where 0 is the most popular picture
:type end: int
:param picture_size:
This can be: original, medium (*default*), small, thumbnail, square, mini_square
:type picture_size: basestring
:param set_:
This can be: public, popular or user-id; where user-id is the specific id of a user (as integer)
:type set_: basestring/int
:param map_filter:
Whether to return photos that look better together; when True, tries to avoid returning photos of the same
location
:type map_filter: bool
:return: JSON response of the request formatted as a dictionary.
"""
if not isinstance(lat_min, float):
raise PynoramioException(
'{0}._request requires the lat_min parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lon_min, float):
raise PynoramioException(
'{0}._request requires the lon_min parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lat_max, float):
raise PynoramioException(
'{0}._request requires the lat_max parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lon_max, float):
raise PynoramioException(
'{0}._request requires the lon_max parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(start, int):
raise PynoramioException(
'{0}._request requires the start parameter to be an int.'.format(self.__class__.__name__))
if not isinstance(end, int):
raise PynoramioException(
'{0}._request requires the end parameter to be an int.'.format(self.__class__.__name__))
url = self.base_url + '&minx={0}&miny={1}&maxx={2}&maxy={3}&from={4}&to={5}'.format(lon_min, lat_min,
lon_max, lat_max,
start, end)
if picture_size is not None and isinstance(picture_size, basestring) \
and picture_size in ['original', 'medium', 'small', 'thumbnail', 'square', 'mini_square']:
url += '&size={0}'.format(picture_size)
if set_ is not None and (isinstance(set_, basestring) and set_ in ['public', 'full']) \
or (isinstance(set_, int)):
url += '&set={0}'.format(set_)
else:
url += '&set=public'
if map_filter is not None and isinstance(map_filter, bool) and not map_filter:
url += '&map_filter=false'
r = requests.get(url)
try:
return r.json()
except ValueError:
# add your debugging lines here, for example, print(r.url)
raise PynoramioException(
'An invalid or malformed url was passed to {0}._request'.format(self.__class__.__name__))
def get_from_area(self, lat_min, lon_min, lat_max, lon_max, picture_size=None, set_=None, map_filter=None):
"""
Get all available photos for a specific bounding box
:param lat_min:
Minimum latitude of the bounding box
:type lat_min: float
:param lon_min:
Minimum longitude of the bounding box
:type lon_min: float
:param lat_max:
Maximum latitude of the bounding box
:type lat_max: float
:param lon_max:
Maximum longitude of the bounding box
:type lon_max: float
:param picture_size:
This can be: original, medium (*default*), small, thumbnail, square, mini_square
:type picture_size: basestring
:param set_:
This can be: public, popular or user-id; where user-id is the specific id of a user (as integer)
:type set_: basestring/int
:param map_filter:
Whether to return photos that look better together; when True, tries to avoid returning photos of the same
location
:type map_filter: bool
:return: Returns the full dataset of all available photos
"""
page_size = 100
page = 0
result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
total_photos = result['count']
if total_photos < page_size:
return result
page += 1
pages = (total_photos / page_size) + 1
while page < pages:
new_result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
result['photos'].extend(new_result['photos'])
page += 1
return result
def get_all_pictures_cursor(self, lat_min, lon_min, lat_max, lon_max, picture_size=None, set_=None,
map_filter=None):
"""
Generator to get all available photos for a given bounding box
:param lat_min:
Minimum latitude of the bounding box
:type lat_min: float
:param lon_min:
Minimum longitude of the bounding box
:type lon_min: float
:param lat_max:
Maximum latitude of the bounding box
:type lat_max: float
:param lon_max:
Maximum longitude of the bounding box
:type lon_max: float
:param picture_size:
This can be: original, medium (*default*), small, thumbnail, square, mini_square
:type picture_size: basestring
:param set_:
This can be: public, popular or user-id; where user-id is the specific id of a user (as integer)
:type set_: basestring/int
:param map_filter:
Whether to return photos that look better together; when True, tries to avoid returning photos of the same
location
:type map_filter: bool
:return: Yields individual dicts of photos
"""
page_size = 100
page = 0
result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
total_photos = result['count']
for photo in result['photos']:
yield photo
if total_photos < page_size:
raise StopIteration()
page += 1
pages = (total_photos / page_size) + 1
while page < pages:
result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
for photo in result['photos']:
yield photo
page += 1
raise StopIteration()
__all__ = ['Pynoramio', 'PynoramioException']
|
from django.db import models
import datetime
class ProductInfo(models.Model):
name = models.CharField(max_length=60, unique=True)
# Global Trade Item Number
gtin = models.CharField(max_length=60)
image_url = models.CharField(max_length=128)
# disk_size = models.PositiveIntegerField()
# Nutrient facts
# kcal
energy = models.FloatField()
# g
proteins = models.FloatField()
# g
carbohydrates = models.FloatField()
# g
fats = models.FloatField()
base_quantity = models.FloatField()
base_unit = models.CharField(max_length=6)
def __repr__(self):
return 'Product (#%s): <%s>' % (self.gtin, self.name)
def __str__(self):
return self.name
def get_fields(self):
result = {}
keys = [
'name', 'gtin', 'image_url', 'energy', 'proteins', 'carbohydrates',
'fats', 'base_unit', 'base_quantity',
]
for key in keys:
result[key] = getattr(self, key)
return result
class DiaryRecord(models.Model):
# Timestamp
consumed = models.DateTimeField(auto_now_add=True)
product = models.ForeignKey(ProductInfo, related_name='diary_entries')
@staticmethod
def get_daily_values(selected_date):
'''selected_date is '''
if isinstance(selected_date, basestring):
fmt = '%Y%m%d'
selected_date = datetime.datetime.strptime(selected_date, fmt)
values = []
for record in DiaryRecord.objects.filter(
consumed__year=selected_date.year,
consumed__month=selected_date.month,
consumed__day=selected_date.day,
):
fields = record.product.get_fields()
fields['consumed'] = record.consumed.strftime(
'%Y-%m-%d %H:%M')
values.append(fields)
return values
def __repr__(self):
return 'Record w/ <%s>' % (self.product.name)
def __str__(self):
return 'Dairy Record w/ <%s> on %s' % \
(self.product.name,
self.consumed.strftime('%Y-%m-%d %H:%M'))
|
import logging
logging.getLogger ( "scapy.runtime" ).setLevel ( logging.CRITICAL )
from scapy.all import *
load_contrib ( 'ppi_cace' )
import sys, os, time, signal, subprocess
import argparse
sys.path.insert ( 0, '../../lib/' )
from Queries import *
parser = argparse.ArgumentParser ()
parser.add_argument ( '-f', '--format', metavar='format', dest='format', action='store', help='Format JTR or Hashcat\n',required=True )
parser.add_argument ( '-s', '--ssid', metavar='SSID', dest='ssid', action='store', help=argparse.SUPPRESS, required=False )
parser.add_argument ( '-p', '--path', metavar='path', dest='path', action='store', help='path\n', required=False )
parser.add_argument ( '-w', '--workspace', metavar='database', dest='database', action='store', help='workspace name\n', required=True )
parser.add_argument ( '-i', '--inputfile', metavar='inputfile', dest='inputfile', action='store', help='input file path\n', required=False )
args = parser.parse_args ()
workspace = args.database
q = queries ()
ws = q.db_connect ( '../../' + workspace )
def test(pkts):
global outpath
if args.path:
outpath = args.path
else:
outpath = path
if args.ssid:
SSID_List = args.ssid
MAC_List = str ( q.show_MACs (SSID_List) )
MAC_List = MAC_List.split ( '\n' )
else:
sql = dp.read_sql ( 'select * from INSCOPE_SSIDS', ws )
if sql.empty:
print "No inscope SSIDSs found, please add a SSID before running this module again.\n"
return
else:
SSID_List = str ( q.show_inscope_ssids () )
SSID_List = SSID_List.split ( '\n' )
MAC_List = str ( q.show_inscope_MACs () )
MAC_List = MAC_List.split( '\n' )
for pkt in pkts:
if Dot11Beacon in pkt:
if str ( pkt[Dot11Elt:1].info ) == "":
SSID = "Hidden"
elif str ( pkt[Dot11Elt:1].info ).startswith ( "\000" ):
SSID = "Hidden"
else:
SSID = pkt[Dot11Elt:1].info
SSID = SSID.decode ( 'utf-8', 'ignore' )
if SSID in SSID_List:
wrpcap (outpath +'/filtered.pcap', pkt, append=True )
if pkt.haslayer ( EAPOL ):
EAPOLP = pkt[EAPOL]
if EAPOLP.type == 3:
if pkt.addr2 in MAC_List:
if str ( EAPOLP )[6:8].encode ( "hex" ) == "8a00":
wrpcap ( outpath + '/filtered.pcap', pkt, append=True )
ascii_ap_mac = pkt.addr2
ascii_client_mac = pkt.addr1
aNonce = str ( EAPOLP )[17:49].encode ( "hex" )
print "Frame 1"
print "AP MAC: " + ascii_ap_mac
print "Client MAC: " + ascii_client_mac
print "ANonce: " + aNonce
elif str ( EAPOLP )[6:8].encode ( "hex" ) == "0a00" and str ( EAPOLP )[99:123].encode ( "hex" ):
if pkt.addr2 in MAC_List:
wrpcap ( outpath + '/filtered.pcap', pkt, append=True )
ascii_ap_mac = pkt.addr2
ascii_client_mac = pkt.addr1
sNonce = str ( EAPOLP )[17:49].encode ( "hex" )
mic = str ( EAPOLP )[81:97].encode ( "hex" )
data = str ( EAPOLP )[99:123].encode ( "hex" )
print "Frame 2"
print "AP MAC: " + ascii_ap_mac
print "Client MAC: " + ascii_client_mac
print "SNonce: " + sNonce
print "MIC: " + mic
print "Data: " + data
else:
return
if args.inputfile == "None":
pullpath = args.inputfile
sniff(offline=fullpath, count=0, store=0, prn=test)
else:
path = workspace.split("/")
path = '/'.join(path[0:2])
path = "../../"+path
for file in os.listdir (path):
if file.endswith ( ".pcapdump" ):
fullpath = (os.path.join ( path, file ))
print fullpath
sniff ( offline=fullpath, count=0, store=0, prn=test )
if args.format == "JTR":
subprocess.call ( 'aircrack-ng -J' + outpath + '/filtered.pcap > ' +outpath + '/test1.hccap', shell=True )
subprocess.call ( 'hccap2john '+ outpath +'/test1.hccap > '+ outpath +'/hccap.john', shell=True )
print "john -wordlist=<path to wordlist> -format=wpapsk \"hccap.john\""
if args.format == "Hashcat":
subprocess.call ( './cap2hccapx.bin filtered.pcap output.hccapx >/dev/null 2>&1', shell=True )
print "oclHashcat64.exe -m 2500 -a3 capture.hccapx ?d?d?d?d?d?d?d?d"
print " or"
print "oclHashcat64.exe -m 2500 -a0 <path to wordlist> capture.hccapx"
if args.format == "both":
subprocess.call ( './cap2hccapx.bin '+ outpath +'/filtered.pcap '+ outpath +'/output.hccapx >/dev/null 2>&1', shell=True )
subprocess.call ( 'aircrack-ng -J '+ outpath +'/filtered.pcap '+ outpath +'/test1.hccap', shell=True )
subprocess.call ( 'hccap2john '+ outpath +'/test1.hccap > '+ outpath +'/hccap.john', shell=True )
print "john -wordlist=<path to wordlist> -format=wpapsk \"hccap.john\""
print "oclHashcat64.exe -m 2500 -a3 capture.hccapx ?d?d?d?d?d?d?d?d"
print " or"
print "oclHashcat64.exe -m 2500 -a0 <path to wordlist> capture.hccapx"
subprocess.call ( 'rm -rf '+ outpath +'/filtered.pcap', shell=True )
subprocess.call ( 'rm -rf '+ outpath +'/test1.hccap', shell=True )
|
class MysqlDumpCommandResult(object):
""" Represents a mysqldump command execution result """
status = -1
result_code = -1
has_error = False
error_cause = None
# MysqlDumpCommand object
command = None
def __init__(self):
status = -1
result_code = -1
has_error = False
|
import websocket
import json
import time
import threading
class Connection:
"""
A basic object that provides a simple interface of callbacks for sending and
receiving packets.
"""
def __init__(self, limit=0, site="euphoria.io"):
self.site = site
self.socket = None
self.room = ""
self.connected = False
self.idcounter = 0
self.limit = limit
self.last_packet = time.time()
#Different types of callbacks
self.type_callbacks = {}
self.id_callbacks = {}
#Thread stuff
self.lock = threading.RLock()
#Set self.connected when we receive a snapshot-event
def set_connected(packet):
self.connected = True
self.add_callback("snapshot-event", set_connected)
def add_callback(self, ptype, callback):
"""
add_callback(ptype, callback) -> None
Add a callback so that when a packet of type ptype arrives, it will be
proccessed by the callback.
"""
if ptype not in self.type_callbacks:
self.type_callbacks[ptype] = []
self.type_callbacks[ptype].append(callback)
def connect(self, room):
"""
connect(room) -> Bool
Connect to the given room. Cannot send messages without first
connecting.
"""
self.room = room
self.connected = False
url = "wss://%s/room/%s/ws" % (self.site, self.room)
try:
self.socket = websocket.create_connection(url, enable_multithread=True, timeout=40)
except (websocket.WebSocketException, IOError):
self.socket = None
return False
return True
def refresh(self, room=None):
"""
refresh(room) -> Bool
Refresh the connection with a room.
"""
sock = self.socket
if room:
self.connect(room)
else:
self.connect(self.room)
if self.socket:
sock.close()
return True
else:
self.socket = sock
return False
def close(self):
"""
close() -> None
Close the connection to the room off nicely.
"""
with self.lock:
if self.socket is not None:
try:
self.socket.abort()
self.socket.close()
except OSError:
pass
self.socket = None
def send_json(self, data):
"""
send_json(data) -> Bool
Send json data into the stream. Returns false on message fail.
"""
now = time.time()
if self.limit != 0 and now - self.last_packet < self.limit:
time.sleep(self.limit - (now - self.last_packet))
try:
self.socket.send(json.dumps(data))
except (AttributeError, websocket.WebSocketException):
with self.lock:
self.socket = None
return False
self.last_packet = time.time()
return True
def receive_data(self):
"""
reveive_data() -> Bool
Reveive a packet and send it to handle_packet() for proccessing.
Returns false on message fail.
"""
if self.socket is None:
return False
try:
raw = self.socket.recv()
self.handle_packet(json.loads(raw))
except websocket.WebSocketException:
with self.lock:
self.socket = None
return True
def send_packet(self, ptype, data, callback=None):
"""
send_packet(data, ptype, callback=None) -> None
Creates a packet of type ptype, and sends the data. Also creates a
callback entry for when a reply is received.
"""
#This is locked to prevent multiple threads from accessing the message
with self.lock:
pid = self.idcounter
self.idcounter += 1
packet = {"id": str(pid), "type": ptype, "data": data}
self.send_json(packet)
if callback is not None:
self.id_callbacks[str(pid)] = callback
def handle_packet(self, packet):
"""
handle_packet(packet) -> None
Process a packet and send it off to the appropriate callback.
"""
pid = packet.get("id")
ptype = packet.get("type")
if pid in self.id_callbacks:
callback = self.id_callbacks.pop(pid)
if callable(callback):
callback(packet)
if ptype in self.type_callbacks:
for i in self.type_callbacks[ptype]:
i(packet)
|
from __future__ import absolute_import
import logging
from datetime import timedelta
from dateutil.parser import parse
import requests
from django.contrib.auth import get_user_model
from .models import Run, RunkeeperToken
log = logging.getLogger(__name__)
RUNKEEPER_BASE_URL = 'https://api.runkeeper.com'
def rk_items_to_runs(user, items):
already_registered_sids = user.runs.exclude(source_id='')\
.filter(source="runkeeper")\
.values_list('source_id', flat=True)
buff = []
for item in items:
if item['uri'] in already_registered_sids:
continue
if item['type'].lower() != 'running':
continue
start_date = parse(item['start_time'])
# if not item['utc_offset']:
# date = date.replace(tzinfo=timezone.UTC)
duration = timedelta(seconds=item['duration'])
buff.append(Run(runner=user,
distance=item['total_distance'] / 1000.0,
start_date=start_date,
end_date=start_date,
source="runkeeper",
source_id=item['uri'],
recorded_time=duration))
if len(buff) >= 1000:
Run.objects.bulk_create(buff)
buff = []
if buff:
Run.objects.bulk_create(buff)
def create_runs_from_runkeeper(user_id=None):
user = get_user_model().objects.get(id=user_id)
token = RunkeeperToken.objects.get(runner=user).access_token
# TODO: create RunkeeperClient class or use some lib github
# FIXME: get all items from paginated lists!
headers = {
'Authorization': 'Bearer %s' % (token, ),
'Accept': '',
}
url = RUNKEEPER_BASE_URL + '/user'
headers['Accept'] = 'application/vnd.com.runkeeper.User+json'
resp = requests.get(url, headers=headers)
resp.raise_for_status()
user_data = resp.json()
url = RUNKEEPER_BASE_URL + user_data['fitness_activities']
headers['Accept'] = 'application/vnd.com.runkeeper.FitnessActivityFeed+json'
resp = requests.get(url, headers=headers)
resp.raise_for_status()
data = resp.json()
log.debug("Received %d items", len(data['items']))
rk_items_to_runs(user, data['items'])
def pull_all_users_runs_from_runkeeper():
usersids_with_tokens = RunkeeperToken.objects\
.values_list('runner_id', flat=True)
for user_id in usersids_with_tokens:
create_runs_from_runkeeper(user_id=user_id)
|
import requests
from bs4 import BeautifulSoup
import sys
import orgs
import parse
reload(sys)
sys.setdefaultencoding('utf8')
def determine_bounds():
ops = orgs.OPS()
try:
r = requests.get(ops.request_url)
except requests.exceptions.SSLError:
print "Invalid SSL cert:"
r = requests.get(ops.request_url, verify=False)
rtext = r.text
soup = BeautifulSoup(rtext, "lxml")
job_table = soup.find(ops.soup_find_list[0], ops.soup_find_list[1])
job_table = job_table.parent.table
nums = []
rows = job_table.find_all("a", target="_self")
for row in rows:
url_text = row["href"].encode('utf-8')
url_list = url_text.split("JobID=")
job_id = int(url_list[1])
nums.append(job_id)
mi = max(nums) - 100
ma = max(nums) + 100
return mi, ma
def crawl(mi, ma):
results = []
for i in range(mi, ma):
this_url = "https://www.gojobs.gov.on.ca/Preview.aspx?JobID=" + str(i)
try:
page = requests.get(this_url)
except requests.exceptions.SSLError:
print "Invalid SSL cert:"
page = requests.get(this_url, verify=False)
ptext = page.text
soup = BeautifulSoup(ptext, "lxml")
if is_posting(soup):
results.append(this_url)
print "found OPS OT: {}".format(this_url)
return results
def is_posting(soup):
pared_soup = soup.find("table", id="JobAdTable_1")
if not pared_soup:
return False
if pared_soup.find_next(text="Open Targeted "):
return True
return False
if __name__ == '__main__':
mi, ma = int(sys.argv[1]), int(sys.argv[2])
data = parse.build_ot_list(mi, ma)
parse.update_redis(data)
|
import cv2
class VideoCamera(object):
def __init__(self):
print("---VIDEOCAMERA INITIALIZED")
self.video = cv2.VideoCapture(0)
success = False
while not success:
success, image = self.video.read()
if success:
ret, jpeg = cv2.imencode('.jpg', image)
self.last_frame = jpeg.tobytes()
def __del__(self):
print("DELETING VIDEO")
self.video.release()
def get_frame(self, is_jpeg=True):
# We are using Motion JPEG, but OpenCV defaults to capture raw images. we must encode it into JPEG in order to correctly display the video stream.
success = False
while not success:
success, image = self.video.read()
if success:
image = cv2.flip(image, 1)
if is_jpeg:
ret, jpeg = cv2.imencode('.jpg', image)
jpeg = jpeg.tobytes()
self.last_frame = jpeg
return jpeg
else:
return image
def convert_to_jpeg(self, image):
ret, jpeg = cv2.imencode('.jpg', image)
jpeg = jpeg.tobytes()
self.last_frame = jpeg
return jpeg
|
import iz
import op
from iterator import Iterator
INFINITY = float('inf')
class Delta(object):
def __init__(self, ops=[]):
# Assume we are given a well formed ops
if isinstance(ops, Delta):
self.ops = ops.ops
elif iz.array(ops):
self.ops = ops
elif iz.dictionary(ops) and iz.array(ops['ops']):
self.ops = ops['ops']
else:
self.ops = []
unicode_ops = []
for op in self.ops:
if op.get('insert') and iz.string(op['insert']):
op['insert'] = unicode(op['insert'])
unicode_ops.append(op)
self.ops = unicode_ops
def insert(self, text, attributes=None):
newOp = {}
if iz.string(text):
if not len(text):
return self
newOp['insert'] = unicode(text)
elif iz.number(text):
newOp['insert'] = text
if iz.dictionary(attributes) and len(attributes):
newOp['attributes'] = attributes
return self.push(newOp)
def delete(self, length):
if length <= 0:
return self
return self.push({'delete': length})
def retain(self, length, attributes=False):
if length <= 0:
return self
newOp = {'retain': length}
if iz.dictionary(attributes) and len(attributes):
newOp['attributes'] = attributes
return self.push(newOp)
def push(self, newOp):
index = len(self.ops)
lastOp = self.ops[index - 1] if index > 0 else None
newOp = op.attributes.clone(newOp)
if iz.dictionary(lastOp):
if iz.number(newOp.get('delete')) and iz.number(lastOp.get('delete')):
self.ops[index - 1] = {'delete': lastOp['delete'] + newOp['delete']}
return self
# Since it does not matter if we insert before or after deleting at the same index,
# always prefer to insert first
if iz.number(lastOp.get('delete')) and (iz.string(newOp.get('insert')) or iz.number(newOp.get('insert'))):
index -= 1
lastOp = self.ops[index - 1] if index > 0 else None
if not iz.dictionary(lastOp):
self.ops = [newOp] + self.ops
return self
if iz.equal(newOp.get('attributes'), lastOp.get('attributes')):
if iz.string(newOp.get('insert')) and iz.string(lastOp.get('insert')):
self.ops[index - 1] = {'insert': lastOp['insert'] + newOp['insert']}
if iz.dictionary(newOp.get('attributes')):
self.ops[index - 1]['attributes'] = newOp['attributes']
return self
elif iz.number(newOp.get('retain')) and iz.number(lastOp.get('retain')):
self.ops[index - 1] = {'retain': lastOp.get('retain') + newOp.get('retain')}
if iz.dictionary(newOp.get('attributes')):
self.ops[index - 1]['attributes'] = newOp.get('attributes')
return self
self.ops = self.ops[0:index] + [newOp] + self.ops[index:-1]
return self
def length(self):
length = 0
for elem in self.ops:
length += op.length(elem)
return length
def chop(self):
try:
lastOp = self.ops[-1]
if lastOp['retain'] and not lastOp.get('attributes'):
self.ops.pop()
except:
pass
return self
def slice(self, start=0, end=INFINITY):
delta = Delta()
iterator = Iterator(self.ops)
index = 0
while index < end and iterator.hasNext():
nextOp = None
if index < start:
nextOp = iterator.next(start - index)
else:
nextOp = iterator.next(end - index)
delta.push(nextOp)
index += op.length(nextOp)
return delta
def compose(self, other):
other = Delta(other)
selfIter = Iterator(self.ops)
otherIter = Iterator(other.ops)
self.ops = []
while selfIter.hasNext() or otherIter.hasNext():
if otherIter.peekType() == 'insert':
self.push(otherIter.next())
elif selfIter.peekType() == 'delete':
self.push(selfIter.next())
else:
length = min(selfIter.peekLength(), otherIter.peekLength())
selfOp = selfIter.next(length)
otherOp = otherIter.next(length)
if iz.number(otherOp.get('retain')):
newOp = {}
if iz.number(selfOp.get('retain')):
newOp['retain'] = length
else:
newOp['insert'] = selfOp.get('insert')
# Preserve null when composing with a retain, otherwise remove it for inserts
attributes = op.attributes.compose(
selfOp.get('attributes'),
otherOp.get('attributes'),
iz.number(selfOp.get('retain'))
)
if attributes:
newOp['attributes'] = attributes
self.push(newOp)
# Other op should be delete, we could be an insert or retain
# Insert + delete cancels out
elif iz.number(otherOp.get('delete')) and iz.number(selfOp.get('retain')):
self.push(otherOp)
return self.chop()
def diff(self, other):
raise NotImplementedError()
def transform(self, other, priority=False):
raise NotImplementedError()
def transformPosition(self, index, priority):
raise NotImplementedError()
|
import numpy as np
import pylab as pl
import glob
from scipy.optimize import curve_fit as cf
import time
def gauss(x,a,b,c):
return a*np.exp(-(x-b)**2 / (2*c**2))
def twoD_Gaussian((x, y), amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = offset + amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
return g.ravel()
start1 = time.time()
fields = glob.glob('directory/to/place/cross/correlation/catalogues/*_crosscorrelation.txt')
fields.sort()
cor_all = []
for i in fields:
cor_all.append(np.loadtxt(i))
cor_all = np.array(cor_all)
end1 = time.time()
print "it took " + str(round(end1-start1)) + " seconds to import all data" + '\n'
dRA = []
dDEC = []
e_dRA = []
e_dDEC = []
peak_cc = []
arcsec_pixel = 3.0
for i in range(len(fields)):
print fields[i]
print int(np.max(cor_all[i]))
#append peak cross correlation to array
peak_cc.append(np.max(cor_all[i]))
#RA and DEC spaces
RA_linear = np.arange(-int(len(cor_all[i][0])/2.0), int(len(cor_all[i][0])/2.0)+1)
DEC_linear = np.arange(-int(len(cor_all[i][0])/2.0), int(len(cor_all[i][0])/2.0)+1)
#RA-DEC meshgrid
RA, DEC = np.meshgrid(RA_linear, DEC_linear)
################
#2D GAUSSIAN FIT
################
#limits for fitting (adjust "lower" as needed)
lower = -5
upper = -1*lower + 1
RA_fitting, DEC_fitting = np.meshgrid(RA_linear[np.where(cor_all[i] == np.max(cor_all[i]))[1][0]+lower:np.where(cor_all[i] == np.max(cor_all[i]))[1][0]+upper], DEC_linear[np.where(cor_all[i] == np.max(cor_all[i]))[0][0]+lower:np.where(cor_all[i] == np.max(cor_all[i]))[0][0]+upper])
#fitting (adjust the initial guess as needed)
popt, pcov = cf(twoD_Gaussian, (RA_fitting, DEC_fitting), cor_all[i][np.ix_(range(np.where(cor_all[i] == np.max(cor_all[i]))[0][0]+lower,np.where(cor_all[i] == np.max(cor_all[i]))[0][0]+upper),range(np.where(cor_all[i] == np.max(cor_all[i]))[1][0]+lower,np.where(cor_all[i] == np.max(cor_all[i]))[1][0]+upper))].reshape((upper-lower)**2),p0=(np.max(cor_all[i]),0,0,5,5,2,100))
#fitted data
data_fitted = twoD_Gaussian((RA, DEC), *popt)
#append fitted positional offset to arrays
dRA.append(popt[1])
dDEC.append(popt[2])
e_dRA.append(np.sqrt(np.diag(pcov))[1])
e_dDEC.append(np.sqrt(np.diag(pcov))[2])
#print values of the variables of the 2D Gaussian in terms of arcseconds
#x offset, x offset uncertainty, y offset, y offset uncertainty
print round(popt[1]*arcsec_pixel,3), round(np.sqrt(np.diag(pcov))[1],3), round(popt[2]*arcsec_pixel,3), round(np.sqrt(np.diag(pcov))[2],3) + '\n'
##########
#HEAT MAP
#########
#figure
pl.figure()
pl.subplots_adjust(hspace=0, wspace=0)
pl.rc('font', size=13)
#heat map
pl.pcolormesh(RA, DEC, cor_all[i])
#colour bar
cb = pl.colorbar(cmap='jet')
cb.set_label('cross correlation')
pl.clim(vmin=np.min(cor_all[i]),vmax=np.max(cor_all[i]))
#vertical and horizontal lines going through (0,0)
pl.axhline(y = 0.5, ls = '--', c = 'k')
pl.axvline(x = 0.5, ls = '--', c = 'k')
#contours of 2D Gaussian fit
pl.contour(RA+0.5, DEC+0.5, data_fitted.reshape(len(RA), len(DEC)), [0.5*data_fitted.max(),0.75*data_fitted.max(),0.85*data_fitted.max(),0.95*data_fitted.max(),0.98*data_fitted.max(),0.99*data_fitted.max()], colors='w')
#peak of the 2D gaussian fit position
pl.scatter(popt[1]+0.5,popt[2]+0.5,s=20,facecolor='w',edgecolor='w',alpha=1)
#limits
pl.xlim(-10,10)
pl.ylim(-10,10)
#labels: units are in terms of pixels
pl.xlabel(r'RA (pixels)') #this is actually -RA (i.e. the RA is flipped)
pl.ylabel(r'DEC (pixels)')
#save figure
pl.savefig('/directory/to/place/your/figures/'+fields[i][:-4]+'.png',bbox_inches='tight')
pl.show()
pl.close()
dRA = -arcsec_pixel*np.array(dRA)
dDEC = arcsec_pixel*np.array(dDEC)
e_dRA = arcsec_pixel*np.array(e_dRA)
e_dDEC = arcsec_pixel*np.array(e_dDEC)
offset = np.sqrt(dRA**2 + dDEC**2)
e_offset = 1.0/offset * np.sqrt(dRA**2 * e_dRA**2 + dDEC**2 * e_dDEC**2)
cat_offset = open('/directory/to/place/offset/catalogue/image_offsets.txt','w')
cat_offset.write('ID\tdRA\te_dRA\tdDEC\te_dDEC\toffset\te_offset\n--\t---\t-----\t----\t------\t------\t--------\n')
for i in range(len(offset)):
cat_offset.write(fields[i][:-4] + '\t' + str(dRA[i]) + '\t'+ str(e_dRA[i]) + '\t' + str(dDEC[i]) + '\t'+ str(e_dDEC[i]) + '\t' + str(offset[i]) + '\t' + str(e_offset[i]) + '\n')
cat_offset.close()
bins = np.linspace(0,0.16,11)
fig = pl.figure()
ax = fig.add_subplot(1,1,1)
pl.subplots_adjust(hspace=0,wspace=0)
pl.rc('font', size=13)
pl.hist(peak_cc,bins=np.logspace(np.log10(min(peak_cc)), np.log10(max(peak_cc)),10))
ax.set_xscale('log')
pl.xlabel('Peak cross correlation')
pl.ylabel('Number')
pl.savefig('/directory/to/place/figures/peak_cc.pdf',bbox_inches='tight')
pl.show()
pl.close()
pl.figure()
pl.subplots_adjust(hspace=0,wspace=0)
pl.rc('font', size=13)
pl.semilogx(peak_cc, offset, 'bo')
pl.ylabel('Offset (arcsec)')
pl.xlabel('Peak cross correlation')
pl.savefig('/directory/to/place/figures/cc_offset.pdf',bbox_inches='tight')
pl.show()
pl.close()
pl.figure()
pl.subplots_adjust(hspace=0,wspace=0)
pl.rc('font', size=13)
pl.semilogx(peak_cc, e_offset/offset*100, 'bo')
pl.xlabel('Peak cross correlation')
pl.ylabel(r'Offset uncertainty (%)')
pl.savefig('/directory/to/place/figures/offset_uncertainty_percent_cc.pdf',bbox_inches='tight')
pl.show()
pl.close()
pl.figure()
pl.subplots_adjust(hspace=0,wspace=0)
pl.rc('font', size=13)
pl.plot(offset, e_offset, 'bo')
pl.xlabel('Offset (arcsec)')
pl.ylabel(r'Offset uncertainty (arcsec)')
pl.savefig('/directory/to/place/figures/offset_uncertainty_offset1.pdf',bbox_inches='tight')
pl.show()
pl.close()
pl.figure()
pl.subplots_adjust(hspace=0,wspace=0)
pl.rc('font', size=13)
pl.loglog(offset, e_offset/offset*100, 'bo')
pl.plot(np.linspace(1E-3,2E-1,100),3.25*np.linspace(1E-3,2E-1,100)**(-1),'k-',label=r'$\propto$ offset$^{-1}$') #adjust parameters as needed
pl.xlabel('Offset (arcseconds)')
pl.ylabel(r'Offset uncertainty (%)')
pl.xlim(1E-3,2E-1)
pl.legend(loc=1)
pl.savefig('/directory/to/place/figures/offset_uncertainty_offset2.pdf',bbox_inches='tight')
pl.show()
pl.close()
pl.figure()
pl.subplots_adjust(hspace=0,wspace=0)
pl.rc('font', size=13)
pl.hist(offset,bins=bins)
pl.xlabel('Offset (arcseconds)')
pl.ylabel('Number')
pl.xlim(0,0.16)
pl.savefig('/directory/to/place/figures/offset_distribution.pdf',bbox_inches='tight')
pl.show()
pl.close()
|
"""`cssmin` - A Python port of the YUI CSS compressor."""
"""
Home page: https://github.com/zacharyvoase/cssmin
License: BSD: https://github.com/zacharyvoase/cssmin/blob/master/LICENSE
Original author: Zachary Voase
Modified for inclusion into web2py by: Ross Peoples <ross.peoples@gmail.com>
"""
from StringIO import StringIO # The pure-Python StringIO supports unicode.
import re
__version__ = '0.1.4'
def remove_comments(css):
"""Remove all CSS comment blocks."""
iemac = False
preserve = False
comment_start = css.find("/*")
while comment_start >= 0:
# Preserve comments that look like `/*!...*/`.
# Slicing is used to make sure we don"t get an IndexError.
preserve = css[comment_start + 2:comment_start + 3] == "!"
comment_end = css.find("*/", comment_start + 2)
if comment_end < 0:
if not preserve:
css = css[:comment_start]
break
elif comment_end >= (comment_start + 2):
if css[comment_end - 1] == "\\":
# This is an IE Mac-specific comment; leave this one and the
# following one alone.
comment_start = comment_end + 2
iemac = True
elif iemac:
comment_start = comment_end + 2
iemac = False
elif not preserve:
css = css[:comment_start] + css[comment_end + 2:]
else:
comment_start = comment_end + 2
comment_start = css.find("/*", comment_start)
return css
def remove_unnecessary_whitespace(css):
"""Remove unnecessary whitespace characters."""
def pseudoclasscolon(css):
"""
Prevents 'p :link' from becoming 'p:link'.
Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is
translated back again later.
"""
regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)")
match = regex.search(css)
while match:
css = ''.join([
css[:match.start()],
match.group().replace(":", "___PSEUDOCLASSCOLON___"),
css[match.end():]])
match = regex.search(css)
return css
css = pseudoclasscolon(css)
# Remove spaces from before things.
css = re.sub(r"\s+([!{};:>+\(\)\],])", r"\1", css)
# If there is a `@charset`, then only allow one, and move to the beginning.
css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css)
css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css)
# Put the space back in for a few cases, such as `@media screen` and
# `(-webkit-min-device-pixel-ratio:0)`.
css = re.sub(r"\band\(", "and (", css)
# Put the colons back.
css = css.replace('___PSEUDOCLASSCOLON___', ':')
# Remove spaces from after things.
css = re.sub(r"([!{}:;>+\(\[,])\s+", r"\1", css)
return css
def remove_unnecessary_semicolons(css):
"""Remove unnecessary semicolons."""
return re.sub(r";+\}", "}", css)
def remove_empty_rules(css):
"""Remove empty rules."""
return re.sub(r"[^\}\{]+\{\}", "", css)
def normalize_rgb_colors_to_hex(css):
"""Convert `rgb(51,102,153)` to `#336699`."""
regex = re.compile(r"rgb\s*\(\s*([0-9,\s]+)\s*\)")
match = regex.search(css)
while match:
colors = map(lambda s: s.strip(), match.group(1).split(","))
hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors))
css = css.replace(match.group(), hexcolor)
match = regex.search(css)
return css
def condense_zero_units(css):
"""Replace `0(px, em, %, etc)` with `0`."""
return re.sub(r"([\s:])(0)(px|em|%|in|cm|mm|pc|pt|ex)", r"\1\2", css)
def condense_multidimensional_zeros(css):
"""Replace `:0 0 0 0;`, `:0 0 0;` etc. with `:0;`."""
css = css.replace(":0 0 0 0;", ":0;")
css = css.replace(":0 0 0;", ":0;")
css = css.replace(":0 0;", ":0;")
# Revert `background-position:0;` to the valid `background-position:0 0;`.
css = css.replace("background-position:0;", "background-position:0 0;")
return css
def condense_floating_points(css):
"""Replace `0.6` with `.6` where possible."""
return re.sub(r"(:|\s)0+\.(\d+)", r"\1.\2", css)
def condense_hex_colors(css):
"""Shorten colors from #AABBCC to #ABC where possible."""
regex = re.compile(r"([^\"'=\s])(\s*)#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])")
match = regex.search(css)
while match:
first = match.group(3) + match.group(5) + match.group(7)
second = match.group(4) + match.group(6) + match.group(8)
if first.lower() == second.lower():
css = css.replace(match.group(), match.group(1) + match.group(2) + '#' + first)
match = regex.search(css, match.end() - 3)
else:
match = regex.search(css, match.end())
return css
def condense_whitespace(css):
"""Condense multiple adjacent whitespace characters into one."""
return re.sub(r"\s+", " ", css)
def condense_semicolons(css):
"""Condense multiple adjacent semicolon characters into one."""
return re.sub(r";;+", ";", css)
def wrap_css_lines(css, line_length):
"""Wrap the lines of the given CSS to an approximate length."""
lines = []
line_start = 0
for i, char in enumerate(css):
# It's safe to break after `}` characters.
if char == '}' and (i - line_start >= line_length):
lines.append(css[line_start:i + 1])
line_start = i + 1
if line_start < len(css):
lines.append(css[line_start:])
return '\n'.join(lines)
def cssmin(css, wrap=None):
css = remove_comments(css)
css = condense_whitespace(css)
# A pseudo class for the Box Model Hack
# (see http://tantek.com/CSS/Examples/boxmodelhack.html)
css = css.replace('"\\"}\\""', "___PSEUDOCLASSBMH___")
css = remove_unnecessary_whitespace(css)
css = remove_unnecessary_semicolons(css)
css = condense_zero_units(css)
css = condense_multidimensional_zeros(css)
css = condense_floating_points(css)
css = normalize_rgb_colors_to_hex(css)
css = condense_hex_colors(css)
if wrap is not None:
css = wrap_css_lines(css, wrap)
css = css.replace("___PSEUDOCLASSBMH___", '"\\"}\\""')
css = condense_semicolons(css)
return css.strip()
def main():
import optparse
import sys
p = optparse.OptionParser(
prog="cssmin", version=__version__,
usage="%prog [--wrap N]",
description="""Reads raw CSS from stdin, and writes compressed CSS to stdout.""")
p.add_option(
'-w', '--wrap', type='int', default=None, metavar='N',
help="Wrap output to approximately N chars per line.")
options, args = p.parse_args()
sys.stdout.write(cssmin(sys.stdin.read(), wrap=options.wrap))
if __name__ == '__main__':
main()
|
import os
from flask import Flask, request, redirect, url_for, send_file
from werkzeug import secure_filename
import src
UPLOAD_FOLDER = 'uploads/'
ALLOWED_EXTENSIONS = set(['xlsx'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/')
def date_form():
return ''''
<!doctype html>
<head>
<style>
body {
text-align:center;
margin-top 50em;
font-family: "Helvetica", "Arial", sans-serif;
line-height: 1.5;
padding: 4em 1em;
}
</style>
</head>
<body>
<h1>Goodwill Schedule Manager</h1>
<form action ="" method=POST>
<p><input type=text name=date></p>
<p><input type=submit name=dateform value=Send></p>
</form>
</body>
</html>
'''
@app.route('/', methods=['POST'])
def date_form_post():
text = request.form['date']
return redirect(url_for('upload_file', date=text))
@app.route('/uploads/<date>', methods=['GET', 'POST'])
def upload_file(date):
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file', filename=filename, date=date))
return '''
<!doctype html>
<head>
<style>
body {
text-align:center;
margin-top 50em;
font-family: "Helvetica", "Arial", sans-serif;
line-height: 1.5;
padding: 4em 1em;
}
</style>
</head>
<body>
<title>Upload new File</title>
<h1> Goodwill Schedule Manager</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file id=files class=hidden name=file></p>
<p><input type=submit value=Make a Schedule></p>
</form>
</body>
'''
@app.route('/uploads/<date>/<filename>')
def uploaded_file(filename, date):
date = date.replace('-', '/')
src.create_schedule(date)
os.remove(src.folder + '/stafflist.db')
return send_file(src.folder + '/updated_sched3.xlsx', as_attachment=True)
if __name__ == '__main__':
app.run(debug=True)
|
"""Run the app."""
from app import app
app.run(debug=True)
|
import csv
import sys
import os
searchdir = sys.argv[0]
readcsvf = sys.argv[1]
with open(readcsvf) as f:
reader = csv.DictReader(f)
for line in reader:
source = line[0]
dest = line[1]
os.rename(source, dest)
|
import json
import signal
import sys
import logging
from dateutil import parser, tz
from datetime import datetime, timedelta
from collections import OrderedDict, defaultdict
log = logging.getLogger(__name__)
def read_http(res):
while True:
data = res.read(4*1024*1024)
if not data:
break
yield data
def utc_from_iso8601(
dt_str,
naive=False,
_parser=None,
):
if _parser is None:
_parser = parser
# dateutil.parser.parse returns today's date when fed the empty string
if dt_str == '':
raise ValueError('string cannot be empty')
dt = _parser.parse(dt_str)
if dt.tzinfo is None:
# We cannot convert to UTC without knowing a timezone
# A lack of timezone implies local time.
# http://en.wikipedia.org/wiki/ISO_8601#Time_zone_designators
raise ValueError(
'string must contain timezome information'
)
if dt.tzinfo.utcoffset(dt) is not None:
dt = dt.astimezone(tz.tzutc())
if naive:
dt = dt.replace(tzinfo=None)
return dt
def utc_to_local(dt, naive=False):
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) != timedelta(0):
raise ValueError('Datetime is not in UTC')
dt = dt.astimezone(tz.tzlocal())
if naive:
dt = dt.replace(tzinfo=None)
return dt
def read_json(res):
data = [datum for datum in read_http(res)]
data = ''.join(data)
data = json.loads(data)
return data
def signal_handler(func):
def handler(signum, frame):
msg = 'Unknown'
if signum == signal.SIGTERM:
msg = 'Terminated'
elif signum == signal.SIGCONT:
msg = 'Continued'
log.warn(
'{msg}'.format(
msg=msg,
)
)
sys.exit(1)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGCONT, handler)
return func
def takeslice(iterable, step=1):
last_i = 0
for i in xrange(0, len(iterable), step):
i += step
yield iterable[last_i:i]
last_i = i
def print_odict(src):
def iso_format(dt):
if isinstance(dt, datetime):
return dt.isoformat()
def _print_odict(odict):
gen = _print_odict_rec(odict)
if gen is not None:
print 'OrderedDict(['
for pair in gen:
print str(pair) + ','
print '])'
def _print_odict_rec(odict):
if isinstance(odict, dict):
for k,v in odict.iteritems():
if isinstance(v, dict):
print "('%s', OrderedDict([" % k
pair = _print_odict_rec(v)
for p in pair:
if p is not None:
print str(p) + ','
print ']),'
print '),'
else:
if isinstance(v, unicode):
v = str(v)
try:
datetime.strptime(
v,
'%Y-%m-%dT%H:%M:%S.%f',
)
except ValueError:
pass
yield str(k), v
else:
yield None
dumped = json.dumps(src, default=iso_format)
loaded = json.loads(dumped, object_hook=OrderedDict)
_print_odict(loaded)
class DefaultOrderedDict(defaultdict, OrderedDict):
def __init__(self, default_factory):
defaultdict.__init__(self, default_factory)
OrderedDict.__init__(self)
|
from importlib import import_module
import os
import logging
PLUGIN_NAMESPACE = 'apis'
log = logging.getLogger('dash')
_loaded = []
def load_plugins(names):
plugins = list_available_plugins()
for name in names:
if name not in plugins:
log.warn("Plugin '{0}' not found in list of available plugins".format(name))
else:
obj = _load_plugin(name)
_loaded.append(obj())
def _load_plugin(name):
modname = '{0}.{1}'.format(PLUGIN_NAMESPACE, name)
namespace = import_module(modname)
return getattr(namespace, name)
def reload_plugins(name=''):
if name == '':
reload = _loaded
else:
reload = [name]
for name in reload:
if name not in plugins:
log.warn("Plugin '{0}' not found in list of available plugins".format(name))
else:
_load_plugin(name)
_loaded.append(name)
# TODO add in module info here
log.info("Loaded '{0}'".format(name))
def unload_plugins():
pass
def list_available_plugins():
available_plugins = []
for root, dirs, files in os.walk(PLUGIN_NAMESPACE):
for f in files:
if f[-3:] == '.py' and f != '__init__.py':
available_plugins.append(f[:-3])
return available_plugins
def list_loaded_plugins():
return _loaded
|
"""OracleDB Mappings."""
from asciipic.db.oracle import manager
from asciipic.db.oracle import factory
ORACLE_DB = manager.OracleDBManager()
for table in factory.TableFactory.get_items():
ORACLE_DB.register(table)
|
def mosaic(img, pad=True):
"""
Create a 2-D mosaic of images from an n-D image. An attempt is made to
make the resulting 2-D image as square as possible.
Parameters
----------
img : ndarray
n-dimensional image be tiled into a mosaic. All but last two dims are
lumped.
Returns
-------
mosaic : 2-d image
Tiled mosaic of images.
"""
from numpy import ix_, array, zeros, arange
from math import sqrt, floor, ceil
if len(img.shape) <= 2: # already 2-D, so skip the rest
return img
img = array(img)
nr, nc = img.shape[-2:] # take off last two dimensions, rest are lumped.
print('img.shape=',img.shape,end=None)
img = img.reshape((-1, nr, nc))
print(' -> ', img.shape)
nz = img.shape[0]
if pad:
n = int(ceil(sqrt(nz))) # starting guess for tiling dimensions
M = zeros((nr*n, nc*n), dtype=img.dtype)
for j2 in range(n): # stick them together
for j1 in range(n): # there is probably a better way to do this
if j1+j2*n >= nz:
break
rows = nr*j2 + arange(nr)
cols = nc*j1 + arange(nc)
M[ix_(rows, cols)] = img[j1+j2*n, :, :]
return M
else:
n = int(floor(sqrt(nz))) # starting guess for tiling dimensions
# find largest integer less than or equal to sqrt that evenly divides the
# number of 2-d images in the n-d image.
m = [x for x in range(1, n+1) if nz % x == 0]
m = m[-1]
j = nz // m # figure out the most square dimensions
n2 = min(j, m)
n1 = max(j, m)
M = zeros((nr*n2, nc*n1), dtype=img.dtype)
for j2 in range(n2): # stick them together
for j1 in range(n1): # there is probably a better way to do this
rows = nr*j2 + arange(nr)
cols = nc*j1 + arange(nc)
M[ix_(rows, cols)] = img[j1+j2*n1, :, :]
return M
def main():
import scipy.misc
import matplotlib.pyplot as pl
import numpy as np
img = scipy.misc.face()
nz = 64
n = img.shape[0]
img = np.transpose(np.tile(img, [1, 1, nz]), (2,0,1))
print(f'img.shape={img.shape}')
pl.imshow(mosaic(img), cmap='gray')
pl.show()
if __name__ == '__main__':
main()
|
'''Trains a simple deep NN on the MNIST dataset using **Focal Loss**.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from losses import focal_loss
batch_size = 128
num_classes = 10
epochs = 20
gamma = 5.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss=lambda y, y_hat: focal_loss(y, y_hat, gamma),
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
import sys
import math
import kfasta
from array import array
try:
from PIL import Image
except ImportError:
print "You don't have PIL (the Python Imaging Library) installed."
print "Please check README.txt for instructions on how to install PIL."
sys.exit(-1)
class RollingHash:
def __init__(self, s):
self.HASH_BASE = 7
self.seqlen = len(s)
n = self.seqlen - 1
h = 0
for c in s:
# ord seems to return the ascii value of a character.
# this doesn't seem to be optimal in terms of space,
# but is enough to distinguish one permutation & combination of codon
# from each other.
h += ord(c) * (self.HASH_BASE ** n)
n -= 1
self.curhash = h
# Returns the current hash value.
def current_hash(self):
return self.curhash
# Updates the hash by removing previtm and adding nextitm. Returns the updated
# hash value.
def slide(self, previtm, nextitm):
self.curhash = (self.curhash * self.HASH_BASE) + ord(nextitm)
self.curhash -= ord(previtm) * (self.HASH_BASE ** self.seqlen)
return self.curhash
class Array2D:
def __init__(self, typecode, w, h, defaultval):
self.arr = array(typecode, [defaultval]*(w*h))
self.w = w
self.h = h
def put(self, x, y, v):
assert x >= 0 and x < self.w
assert y >= 0 and y < self.h
self.arr[(y*self.w)+x] = v
def incr(self, x, y):
assert x >= 0 and x < self.w
assert y >= 0 and y < self.h
self.arr[(y*self.w)+x] += 1
def get(self, x, y):
assert x >= 0 and x < self.w
assert y >= 0 and y < self.h
return self.arr[(y*self.w)+x]
def max(self):
return max(self.arr)
def buildComparisonImage(filename, w, h, alen, blen, matches, remapfn=lambda x:math.sqrt(math.sqrt(x))):
arr = Array2D('L', w, h, 0L)
print "Sequence A length: " + str(alen)
print "Sequence B length: " + str(blen)
abinsize = int(math.ceil(alen / float(w)))
bbinsize = int(math.ceil(blen / float(h)))
assert abinsize > 0 and bbinsize > 0
print "Binning matches..."
for m in matches:
#print m, (abinsize, bbinsize), (m[0]//abinsize, m[1]//bbinsize), (w, h)
arr.incr(m[0] // abinsize, m[1] // bbinsize)
print "...done binning matches."
print "Normalizing and plotting results..."
maxval = float(arr.max())
img = Image.new('RGB', (w,h))
for y in range(0, h):
for x in range(0, w):
val = 255 - int(math.ceil(remapfn((arr.get(x,y) / maxval)) * 255.0))
img.putpixel((x,y), (val,val,val))
print "...done normalizing and plotting."
img.save(filename)
def compareSequences(getExactSubmatches, imgfile, imgsize, afile, bfile, k, m):
a = kfasta.FastaSequence(afile)
b = kfasta.FastaSequence(bfile)
matches = getExactSubmatches(a, b, k, m)
buildComparisonImage(imgfile, imgsize[0], imgsize[1],
kfasta.getSequenceLength(afile),
kfasta.getSequenceLength(bfile), matches)
|
'''
This code does the following:
1. Fixes the broken href tags for figures
'''
import os
from lxml import etree
path = '/home/heather/Desktop/books/physical-sciences-12/afrikaans/build/epubs/science12/OPS/xhtml/science12'
def fig_ref_fix(xml):
for a in xml.findall('.//a'): # find all the a tags
tempText = a.text
tempId = a.attrib['href']
tempTail = a.tail
try:
if a.text[:3] != 'css' or a.text[:3] != 'htt': # trying to remove the css and http links
a.clear() # clear the a tag
a.text = tempText
tempId = tempId.replace(':', '-')
a.set('href', tempId)
a.tail = tempTail
except TypeError:
continue
return xml
for file_name in os.listdir(path):
full_file_name = '{}/{}'.format(path, file_name)
# Skip directories
if os.path.isdir(full_file_name):
continue
xml = etree.HTML(open(full_file_name, 'r').read())
fileText = None
xml = fig_ref_fix(xml)
fileText = etree.tostring(xml, pretty_print=True)
# target_filename = '{}/heather.txt'.format(path)
if fileText != None:
with open(full_file_name, 'w') as file:
file.write(fileText)
|
import boto.sqs
import argparse
import urllib
import os
import sys
import signal
import time
import datetime
import socket
import fcntl
import struct
from boto.sqs.message import Message
from subprocess import call
import boto.s3.connection
from boto.s3.connection import S3Connection
from boto.s3.connection import Location
from boto.s3.key import Key
conn = S3Connection('AKIAINWVSI3MIXIB5N3Q', 'p5YZH9h2x6Ua+5D2qC+p4HFUHQZRVo94J9zrOE+c')
bucket=conn.get_bucket('nimbus-results')
k = Key(bucket)
for key in bucket.list():
bucket.delete_key(key.name)
print "Bucket nimbus-results is now empty"
|
"""
environment variable inventory source
If the environment variable is undefined, the variable is also undefined.
The group level environment variables are only supported, and the host level environment variables are not supported.
The environment variable name must be uppercase.
The environment variable "FOO" is assigned to the variable "foo".
```yaml
env_vars:
- foo
```
The environment variable "FOO" is assigned to the variable "bar".
```yaml
env_vars:
bar: foo
```
"""
import argparse
import json
import os
import sys
import yaml
def main():
parser = get_parser()
args = parser.parse_args()
if "ENV" not in os.environ:
sys.stdout.write("[ERROR] The environment variable 'ENV' is required.\n")
sys.exit(1)
env = os.environ["ENV"]
if args.list:
do_list(env)
if args.host:
do_host(env, args.host)
def do_host(env, hostname):
ret = {}
json.dump(ret, sys.stdout)
def do_list(env):
ret = {}
with open("inventories/{}.yml".format(env)) as r:
groups = [("all", yaml.load(r)["all"])]
while groups:
group_name, group = groups.pop()
node, children = parse_group(group)
ret[group_name] = node
for name, child in children.items():
groups.append((name, child))
json.dump(ret, sys.stdout)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--list", action="store_true")
parser.add_argument("--host")
return parser
def parse_group(group):
env_vars = group.get("env_vars", {})
ev = {}
if env_vars is None:
pass
elif isinstance(env_vars, list):
for e in env_vars:
k = e.upper()
if k in os.environ:
ev[e] = os.environ[k]
elif isinstance(env_vars, dict):
for k,v in env_vars.items():
env_name = v.upper()
if env_name in os.environ:
ev[k] = os.environ[env_name]
children = group.get("children", {})
hostvars = group.get("hosts", {})
if hostvars is None:
hostvars = {}
ret = {
"hosts": hostvars.keys() if isinstance(hostvars, dict) else hostvars,
"vars": ev,
"children": children.keys()
}
return ret, children
if __name__ == "__main__":
main()
|
def patch_range(line, strip = True):
"""This function returns a list of parts that include ranges (i.e. [2:5]) if
they were present in the original line."""
pieces = line.split(':')
parts = []
tmp = ''
for part in pieces:
if part.count('[')>part.count(']') and not(tmp):
tmp = part
continue
if tmp:
piece = tmp+':'+part
if strip:
part = piece.strip()
#parts.append()
tmp=''
else:
if strip:
part = part.strip()
parts.append(part)
if right_column_check(line):
parts = [parts[0]]
return parts
def heading_check(lines, index):
"""This function determines if the line at the provided index is a heading."""
if index >= 0:
pre_line = lines[index-1]
else:
pre_line = ''
try:
next_line = lines[index+1]
except:
next_line = ''
check = (not(pre_line) and not(next_line))
return check
def title_check(lines, index):
"""This functions checks to see if the index is pointing at the title line."""
if index >= 0:
pre_line = lines[index-1]
else:
pre_line = ''
try:
next_line = lines[index+1]
except:
next_line = ''
check = (not(pre_line) and ("=======" in next_line))
return check
def right_column_check(line):
"""This function determines if the line is part of an explanation,
e.i. it belongs in the right-hand column"""
#line = lines[index]
return (('\t' in line) or (' '*4 == line[0:4]))
def comment_check(lines, index, in_comment = False):
"""This fuinction determines if the line is a comment rather than an example or a subject header"""
if index >= 0:
pre_line = lines[index-1]
else:
pre_line = ''
try:
next_line = lines[index+1]
except:
next_line = ''
cur_line = lines[index]
#colon_not_in_line = ":" not in cur_line
line_before_multipart = len(patch_range(pre_line))>1
exists = lines[index] !=''
blank_line_before = not(pre_line)
parts = patch_range(cur_line)
cur_line_multipart = len(parts)>1
pre_parts = patch_range(pre_line)
pre_multi_part = len(pre_parts)
raw_parts = patch_range(cur_line, False)
first_part_comment = raw_parts[0].strip()==raw_parts[0] #like "Remeber: This is ..."
not_right_hand_column = not(right_column_check(cur_line))
next_not_right_hand_column = not(right_column_check(next_line))
check0 = in_comment
check1 = not(cur_line_multipart) and in_comment and len(parts[0])>20
check2 = not(cur_line_multipart) and blank_line_before
check3 = blank_line_before and cur_line_multipart and first_part_comment
check4 = not_right_hand_column
check5 = next_not_right_hand_column
check6 = "=====" not in lines[index]
line = lines[index]
return ((check0 or check1 or check2 or check3) and check4 and check5 and check6 and exists)
def multiline_start_check(lines, index, in_comments = True):#multiline_check
"""This function determines if the current line is actually the start of a multiple line example"""
try:
next_line = lines[index+1]
except:
next_line = ''
cur_line = lines[index]
if cur_line !="":
not_just_cmd = cur_line.strip()[-1]!=':'
else:
not_just_cmd = True
not_in_comments = not in_comments
not_empty = cur_line.strip() != ''
not_markdown = '=====' not in cur_line
parts = patch_range(cur_line)
one_part = len(parts) == 1
not_right_hand_column = not(right_column_check(cur_line))
next_right_hand = right_column_check(next_line)
check1 = one_part and not_right_hand_column #and not_just_cmd
check2 = not_empty and not_markdown and not_in_comments
check3 = next_right_hand #and not_just_cmd
return (check1 and check2) or check3
def check_for_range(lines, index):
"""Check for range text, i.e. [2:5], in the line """
parts = lines[index].split(':')
check = False
for part in parts:
check = check or ('[' in part and ']' not in part)
return check
|
""" Fixes wrong package names with pacman or yaourt.
For example the `llc` program is in package `llvm` so this:
yaourt -S llc
should be:
yaourt -S llvm
"""
from thefuck.utils import replace_command
from thefuck.specific.archlinux import get_pkgfile, archlinux_env
def match(command):
return (command.script_parts
and (command.script_parts[0] in ('pacman', 'yaourt')
or command.script_parts[0:2] == ['sudo', 'pacman'])
and 'error: target not found:' in command.output)
def get_new_command(command):
pgr = command.script_parts[-1]
return replace_command(command, pgr, get_pkgfile(pgr))
enabled_by_default, _ = archlinux_env()
|
from mfr.core import FileHandler, get_file_extension
from mfr_docx.render import render_html
__version__ = '0.1.0'
EXTENSIONS = [
'.docx',
]
class Handler(FileHandler):
# Renderers and exporters are callables
renderers = {
'html': render_html
}
def detect(self, fp):
return get_file_extension(fp.name) in EXTENSIONS
|
"""PyAudio example: Record a few seconds of audio and save to a WAVE file."""
import pyaudio
import wave
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
|
import os
backend = os.environ.get("CRYPTO_BACKEND", "cryptography")
if backend == "cryptodome":
from Crypto.Cipher import AES
from Crypto.Hash import HMAC, SHA1, SHA256
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Util import Padding
AES_BLOCK_BYTES = AES.block_size
def aes_cbc_pkcs7_encrypt(data, key, iv):
return AES.new(key, AES.MODE_CBC, iv).encrypt(Padding.pad(data, AES.block_size))
def aes_cbc_pkcs7_decrypt(data, key, iv):
return Padding.unpad(AES.new(key, AES.MODE_CBC, iv).decrypt(data), AES.block_size)
def aes_cfb8_encrypt(data, key, iv):
return AES.new(key, AES.MODE_CFB, iv).encrypt(data)
def aes_cfb8_decrypt(data, key, iv):
return AES.new(key, AES.MODE_CFB, iv).decrypt(data)
def aes_cfb128_encrypt(data, key, iv):
return AES.new(key, AES.MODE_CFB, iv, segment_size=128).encrypt(data)
def aes_cfb128_decrypt(data, key, iv):
return AES.new(key, AES.MODE_CFB, iv, segment_size=128).decrypt(data)
def hmac_sha256(data, key):
return HMAC.new(key, data, SHA256).digest()
def pbkdf2_sha1(password, salt, iter, length):
return PBKDF2(password, salt, length, iter, hmac_hash_module=SHA1)
elif backend == "cryptography":
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, CFB, CFB8
from cryptography.hazmat.primitives.hashes import SHA1, SHA256
from cryptography.hazmat.primitives.hmac import HMAC
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives.padding import PKCS7
AES_BLOCK_BYTES = AES.block_size // 8
def aes_cbc_pkcs7_encrypt(data, key, iv):
p = PKCS7(AES.block_size).padder()
data = p.update(data) + p.finalize()
c = Cipher(AES(key), CBC(iv)).encryptor()
return c.update(data) + c.finalize()
def aes_cbc_pkcs7_decrypt(data, key, iv):
c = Cipher(AES(key), CBC(iv)).decryptor()
data = c.update(data) + c.finalize()
p = PKCS7(AES.block_size).unpadder()
return p.update(data) + p.finalize()
def aes_cfb8_encrypt(data, key, iv):
c = Cipher(AES(key), CFB8(iv)).encryptor()
return c.update(data) + c.finalize()
def aes_cfb8_decrypt(data, key, iv):
c = Cipher(AES(key), CFB8(iv)).decryptor()
return c.update(data) + c.finalize()
def aes_cfb128_encrypt(data, key, iv):
c = Cipher(AES(key), CFB(iv)).encryptor()
return c.update(data) + c.finalize()
def aes_cfb128_decrypt(data, key, iv):
c = Cipher(AES(key), CFB(iv)).decryptor()
return c.update(data) + c.finalize()
def hmac_sha256(data, key):
h = HMAC(key, SHA256())
h.update(data)
return h.finalize()
def pbkdf2_sha1(password, salt, iter, length):
k = PBKDF2HMAC(SHA1(), length, salt, iter)
return k.derive(password)
else:
raise ValueError("unsupported backend %r" % backend)
def random_bytes(length):
return os.urandom(length)
|
import os
from flask import Flask, make_response, request, session, render_template, send_from_directory, redirect
from werkzeug import secure_filename
import json
TREE_BASE_FILENAME = 'flare.json'
DATA_DIR = 'data'
DATA_DIR_PATH = '../{}'.format(DATA_DIR)
TREE_FILENAME = '{}/{}'.format(DATA_DIR_PATH, TREE_BASE_FILENAME)
ALLOWED_EXTENSIONS = set(['json'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
app = Flask(__name__, static_url_path='')
app.config.update(
DEBUG=True,
UPLOAD_FOLDER=DATA_DIR_PATH,
)
app.secret_key = os.urandom(24)
@app.route('/tree', methods=['GET', 'POST'])
def tree():
if request.method == 'POST':
content = request.data
with open(TREE_FILENAME, 'w') as tree_file:
tree_file.write(content.decode("utf-8"))
return "saved."
else:
with open(TREE_FILENAME, 'r') as tree_file:
content = tree_file.read()
return content
@app.route('/tree/file', methods=['GET', 'POST'])
def download_tree_file():
if request.method == 'POST':
file = request.files['flare.json']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], TREE_FILENAME))
return redirect('/web/index.html')
else:
with open(TREE_FILENAME, 'r') as tree_file:
content = tree_file.read()
response = make_response(content)
response.headers["Content-Type"] = "application/json"
response.headers["Content-Disposition"] = "attachment; filename={}".format(TREE_BASE_FILENAME)
return response
@app.route('/web/<path:path>')
def send_web(path):
return send_from_directory('..', path)
@app.route('/')
def index():
return redirect('/web/index.html')
if __name__ == '__main__':
app.run(
host="0.0.0.0",
port=int("8001")
)
|
import base64
from requests import HTTPError
from blobstash.base.client import Client
from blobstash.base.error import BlobStashError
from blobstash.base.iterator import BasePaginationIterator
class KVStoreError(BlobStashError):
"""Base error for the kvstore module."""
class KeyNotFoundError(KVStoreError):
"""Error raised when a key is not found."""
class KeyValue:
def __init__(self, key, version, data=None, hash=None):
self.key = key
self.data = None
if data:
self.data = base64.b64decode(data)
self.hash = hash
self.version = version
def __str__(self):
return "KeyValue(key={!r}, version={})>".format(self.key, self.version)
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash("{}:{}".format(self.key, self.version))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return hash(self) == hash(self)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return False
return not self.__eq__(other)
class KeysIterator(BasePaginationIterator):
def __init__(self, client, **kwargs):
super().__init__(client=client, path="/api/kvstore/keys", **kwargs)
def parse_data(self, resp):
raw_keys = resp["data"]
keys = []
for data in raw_keys:
keys.append(KeyValue(**data))
return keys
class KeyVersionsIterator(BasePaginationIterator):
def __init__(self, client, key, **kwargs):
self.key = key
super().__init__(
client=client, path="/api/kvstore/key/" + self.key + "/_versions", **kwargs
)
def parse_data(self, resp):
raw_keys = resp["data"]
keys = []
for data in raw_keys:
keys.append(KeyValue(**data))
return keys
class KVStoreClient:
def __init__(self, base_url=None, api_key=None, client=None):
if client:
self._client = client
return
self._client = Client(base_url=base_url, api_key=api_key)
def put(self, key, data, ref="", version=-1):
# XXX(tsileo): check with `ref` and `data` as None
return KeyValue(
**self._client.request(
"POST",
"/api/kvstore/key/" + key,
data=dict(data=data, ref=ref, version=version),
)
)
def get(self, key, version=None):
try:
return KeyValue(**self._client.request("GET", "/api/kvstore/key/" + key))
except HTTPError as error:
if error.response.status_code == 404:
raise KeyNotFoundError
raise
def get_versions(self, key, cursor=None, limit=None):
if isinstance(key, KeyValue):
key = key.key
try:
return KeyVersionsIterator(self._client, key, cursor=cursor, limit=limit)
except HTTPError as error:
if error.response.status_code == 404:
raise KeyNotFoundError
raise
def iter(self, cursor=None, limit=None, **kwargs):
return KeysIterator(self._client, cursor=cursor, limit=limit, **kwargs)
def __iter__(self):
return KeysIterator(self._client)
|
import requests
from matroid import error
from matroid.src.helpers import api_call
@api_call(error.InvalidQueryError)
def create_video_summary(self, url=None, videoId=None, file=None):
"""Create an video summary with provided url or file"""
(endpoint, method) = self.endpoints['create_video_summary']
if not file and not url:
raise error.InvalidQueryError(
message='Missing required parameter: file or url')
if url and file:
raise error.InvalidQueryError(
message='You may only specify a file or a URL, not both')
try:
file_to_upload = None
headers = {'Authorization': self.token.authorization_header()}
data = {}
if file:
file_to_upload = self.filereader.get_file(file)
files = {'file': file_to_upload}
return requests.request(method, endpoint, **{'headers': headers, 'files': files, 'data': data})
else:
data['url'] = url
if videoId:
data['videoId'] = videoId
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except IOError as e:
raise e
except error.InvalidQueryError as e:
raise e
except Exception as e:
raise error.APIConnectionError(message=e)
finally:
if file_to_upload:
file_to_upload.close()
@api_call(error.InvalidQueryError)
def get_video_summary(self, summaryId):
"""Fetch a video summary"""
(endpoint, method) = self.endpoints['get_video_summary']
endpoint = endpoint.replace(':summaryId', summaryId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
@api_call(error.InvalidQueryError)
def get_video_summary_tracks(self, summaryId):
"""Fetch a video summary track CSV"""
(endpoint, method) = self.endpoints['get_video_summary_tracks']
endpoint = endpoint.replace(':summaryId', summaryId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
@api_call(error.InvalidQueryError)
def get_video_summary_file(self, summaryId):
"""Fetch a video summary video file"""
(endpoint, method) = self.endpoints['get_video_summary_file']
endpoint = endpoint.replace(':summaryId', summaryId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
@api_call(error.InvalidQueryError)
def delete_video_summary(self, summaryId):
"""Delete a video summary"""
(endpoint, method) = self.endpoints['delete_video_summary']
endpoint = endpoint.replace(':summaryId', summaryId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
@api_call(error.InvalidQueryError)
def get_stream_summaries(self, streamId):
"""Fetch all video summaries for a stream"""
(endpoint, method) = self.endpoints['get_stream_summaries']
endpoint = endpoint.replace(':streamId', streamId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
@api_call(error.InvalidQueryError)
def create_stream_summary(self, streamId, startTime, endTime):
"""Create a video summary for a stream"""
(endpoint, method) = self.endpoints['create_stream_summary']
endpoint = endpoint.replace(':streamId', streamId)
try:
headers = {'Authorization': self.token.authorization_header()}
data = {
'startTime': startTime,
'endTime': endTime
}
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except Exception as e:
raise error.APIConnectionError(message=e)
|
import socket
import logging
import threading
import time
class SyslogServer(threading.Thread):
""" This is a test"""
def __init__(self):
threading.Thread.__init__(self,name="Syslog")
self.daemon = True
self.logfile = "syslog.txt"
self.server = None
self.ip = "0.0.0.0"
self.port = 514
def run(self):
if self.server is None:
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
logging.debug("socket bound to %s:%s" % (self.ip,self.port))
print "Binding server to %s:%s" % (self.ip,self.port)
self.server.bind((self.ip, self.port))
try:
f = open(self.logfile,"a")
while True:
try:
data, addr = self.server.recvfrom(1024)
f.write(data)
f.flush()
#print data
except Exception, e:
logging.error("Error while receiving message: %s" % e)
f.close()
raise e
finally:
if not f.closed:
f.close()
if __name__ == '__main__':
logging.basicConfig(filename="debug.log",level=logging.DEBUG)
print "Starting server..."
c = SyslogServer()
c.start()
while True:
time.sleep(1)
|
"""
Services templatetags docstring
"""
|
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
# This is required for Python versions < 3.8
import importlib_metadata
try:
__version__ = importlib_metadata.version('django-cacheback')
except Exception:
__version__ = 'HEAD'
default_app_config = 'cacheback.apps.CachebackConfig'
|
from directory_massager import cloakroom_file, get_top_level_agencies, cabinet_level_ids
import json
class Network:
# Base class for Cloakroom Networks
def __init__(self, name, numeric_id):
self.name = name
self.numeric_id = numeric_id
self.domains = []
self.locations = []
def add_domain(self, domain):
self.domains.append(domain)
def add_location(self, geocode):
self.locations.append(geocode)
def get_full_domain_from_email(email):
try:
return email.split('@')[1]
except IndexError as e:
print(e)
print("Bad email: %s" % email)
def get_all_domains_from_email(email):
try:
user_domain = email.split('@')[1]
separator = "."
domain_slices = user_domain.split(separator)
tld = domain_slices[-1]
del domain_slices[-1]
domains = []
for element in reversed(domain_slices):
tld = element + "." + tld
domains.append(tld)
return domains
except IndexError as e:
print(e)
print("Bad email: %s" % email)
return None
def get_network_id_prefix():
numerals = [ord(char) - 96 for char in "usfederalagency".lower()]
prefix = ""
for numeral in numerals:
prefix += str(numeral)
return prefix
def simulate_network_creation():
with open(cloakroom_file) as json_data:
directory = json.load(json_data)
saved_networks = []
for agency in directory:
network_id = get_network_id_prefix() + str(agency["Agency ID"])
email = agency["Email"]
geocode = agency["geocode"]
new_network = Network(name=agency["Name"].lower(), numeric_id=network_id)
valid = False
if geocode is not None:
new_network.add_location(geocode)
valid = True
if email == "":
email = None
if email is not None:
if agency["Agency ID"] in cabinet_level_ids:
domains = get_all_domains_from_email(email)
if domains is None:
print("Agency with bad email: %s" % new_network.name)
else:
new_network.domains = get_all_domains_from_email(email)
valid = True
else:
domain = get_full_domain_from_email(email)
if domain is None:
print("Agency with bad email: %s" % new_network.name)
else:
new_network.add_domain(domain)
valid = True
if valid:
saved_networks.append(new_network)
return saved_networks
|
class QuickSort():
def __init__(self):
self.list = [5,1,4,2,3]
def partition(self, list, size):
if size < 2:
return
pivot = list[size-1]
L, U = 0, size-1
while L < U:
while list[L] < pivot:
L = L + 1
while list[U] > pivot:
U = U - 1
print self.list, 'swap!'
self.list[L], self.list[U] = self.list[U], self.list[L]
print self.list
self.partition(list, U)
self.partition(list[L+1:], size-L-1)
def quickSort(self):
self.partition(self.list, len(self.list))
quickSort = QuickSort()
quickSort.quickSort()
print quickSort.list
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Notification.message'
db.delete_column(u'riskgame_notification', 'message')
def backwards(self, orm):
# Adding field 'Notification.message'
db.add_column(u'riskgame_notification', 'message',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
models = {
u'riskgame.emailuser': {
'Meta': {'object_name': 'EmailUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'riskgame.episode': {
'Meta': {'object_name': 'Episode'},
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'first_day': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['riskgame.EpisodeDay']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'riskgame.episodeday': {
'Meta': {'object_name': 'EpisodeDay'},
'current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'episode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['riskgame.Episode']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'next': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['riskgame.EpisodeDay']", 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'riskgame.game': {
'Meta': {'object_name': 'Game'},
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {})
},
u'riskgame.notification': {
'Meta': {'object_name': 'Notification'},
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['riskgame.Player']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['riskgame.Team']"})
},
u'riskgame.player': {
'Meta': {'object_name': 'Player'},
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'emails_unsubscribe_hash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'onelinebio': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
'receive_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['riskgame.EmailUser']", 'unique': 'True'})
},
u'riskgame.team': {
'Meta': {'object_name': 'Team'},
'action_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'active_events': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'goal_zero_markers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leader': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ledteam'", 'null': 'True', 'to': u"orm['riskgame.Player']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'players': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['riskgame.Player']", 'through': u"orm['riskgame.TeamPlayer']", 'symmetrical': 'False'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'riskgame.teamjoinrequest': {
'Meta': {'object_name': 'TeamJoinRequest'},
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['riskgame.Player']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['riskgame.Team']"})
},
u'riskgame.teamplayer': {
'Meta': {'object_name': 'TeamPlayer'},
'active_events': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'episode_events': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'gather_markers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gather_pile': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['riskgame.Player']"}),
'prevent_markers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'risk_pile': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'office'", 'max_length': '255'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['riskgame.Team']"})
},
u'riskgame.validemaildomain': {
'Meta': {'object_name': 'ValidEmailDomain'},
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['riskgame']
|
from pkg_resources import resource_filename
from pyramid.events import (
BeforeRender,
subscriber,
)
from pyramid.httpexceptions import (
HTTPMovedPermanently,
HTTPPreconditionFailed,
HTTPUnauthorized,
HTTPUnsupportedMediaType,
)
from pyramid.security import forget
from pyramid.settings import asbool
from pyramid.threadlocal import (
manager,
)
from pyramid.traversal import (
split_path_info,
_join_path_tuple,
)
from pyramid.tweens import EXCVIEW
from snovault.validation import CSRFTokenError
from subprocess_middleware.tween import SubprocessTween
from urllib.parse import parse_qs
import logging
import os
import psutil
import time
log = logging.getLogger(__name__)
def includeme(config):
config.add_tween(
'.renderers.fix_request_method_tween_factory',
under='snovault.stats.stats_tween_factory')
config.add_tween(
'.renderers.normalize_cookie_tween_factory',
under='.renderers.fix_request_method_tween_factory')
renderer_tween = (
'.renderers.debug_page_or_json'
if config.registry.settings['pyramid.reload_templates']
else '.renderers.page_or_json'
)
config.add_tween(
renderer_tween,
under='.renderers.normalize_cookie_tween_factory')
config.add_tween(
'.renderers.set_x_request_url_tween_factory',
under=renderer_tween,
)
config.add_tween('.renderers.security_tween_factory', under=EXCVIEW)
config.scan(__name__)
def fix_request_method_tween_factory(handler, registry):
""" Fix Request method changed by mod_wsgi.
See: https://github.com/GrahamDumpleton/mod_wsgi/issues/2
Apache config:
SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD
"""
def fix_request_method_tween(request):
environ = request.environ
if 'X_REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = environ['X_REQUEST_METHOD']
return handler(request)
return fix_request_method_tween
def security_tween_factory(handler, registry):
def security_tween(request):
login = None
expected_user = request.headers.get('X-If-Match-User')
if expected_user is not None:
login = request.authenticated_userid
if login != 'mailto.' + expected_user:
detail = 'X-If-Match-User does not match'
raise HTTPPreconditionFailed(detail)
# wget may only send credentials following a challenge response.
auth_challenge = asbool(request.headers.get('X-Auth-Challenge', False))
if auth_challenge or request.authorization is not None:
login = request.authenticated_userid
if login is None:
raise HTTPUnauthorized(headerlist=forget(request))
if request.method in ('GET', 'HEAD'):
return handler(request)
if request.content_type != 'application/json':
detail = "%s is not 'application/json'" % request.content_type
raise HTTPUnsupportedMediaType(detail)
token = request.headers.get('X-CSRF-Token')
if token is not None:
# Avoid dirtying the session and adding a Set-Cookie header
# XXX Should consider if this is a good idea or not and timeouts
if token == dict.get(request.session, '_csrft_', None):
return handler(request)
raise CSRFTokenError('Incorrect CSRF token')
if login is None:
login = request.authenticated_userid
if login is not None:
namespace, userid = login.split('.', 1)
if namespace not in ('mailto', 'auth0'):
return handler(request)
raise CSRFTokenError('Missing CSRF token')
return security_tween
def normalize_cookie_tween_factory(handler, registry):
from webob.cookies import Cookie
ignore = {
'/favicon.ico',
}
def normalize_cookie_tween(request):
if request.path in ignore or request.path.startswith('/static/'):
return handler(request)
session = request.session
if session or session._cookie_name not in request.cookies:
return handler(request)
response = handler(request)
existing = response.headers.getall('Set-Cookie')
if existing:
cookies = Cookie()
for header in existing:
cookies.load(header)
if session._cookie_name in cookies:
return response
response.delete_cookie(
session._cookie_name,
path=session._cookie_path,
domain=session._cookie_domain,
)
return response
return normalize_cookie_tween
def set_x_request_url_tween_factory(handler, registry):
def set_x_request_url_tween(request):
response = handler(request)
response.headers['X-Request-URL'] = request.url
return response
return set_x_request_url_tween
@subscriber(BeforeRender)
def canonical_redirect(event):
request = event['request']
# Ignore subrequests
if len(manager.stack) > 1:
return
if request.method not in ('GET', 'HEAD'):
return
if request.response.status_int != 200:
return
if not request.environ.get('encoded.canonical_redirect', True):
return
if request.path_info == '/':
return
if not isinstance(event.rendering_val, dict):
return
canonical = event.rendering_val.get('@id', None)
if canonical is None:
return
canonical_path, _, canonical_qs = canonical.partition('?')
request_path = _join_path_tuple(('',) + split_path_info(request.path_info))
if (request_path == canonical_path.rstrip('/') and
request.path_info.endswith('/') == canonical_path.endswith('/') and
(canonical_qs in ('', request.query_string))):
return
if '/@@' in request.path_info:
return
if (parse_qs(canonical_qs) == parse_qs(request.query_string) and
'/suggest' in request_path):
return
qs = canonical_qs or request.query_string
location = canonical_path + ('?' if qs else '') + qs
raise HTTPMovedPermanently(location=location)
def should_transform(request, response):
if request.method not in ('GET', 'HEAD'):
return False
if response.content_type != 'application/json':
return False
format = request.params.get('format')
if format is None:
original_vary = response.vary or ()
response.vary = original_vary + ('Accept', 'Authorization')
if request.authorization is not None:
format = 'json'
else:
acceptable = request.accept.acceptable_offers(
[
'text/html',
'application/ld+json',
'application/json',
])
mime_type, q_value = acceptable[0] if acceptable else ('text/html', 0)
format = mime_type.split('/', 1)[1]
if format == 'ld+json':
format = 'json'
else:
format = format.lower()
if format not in ('html', 'json'):
format = 'html'
if format == 'json':
return False
request._transform_start = time.time()
return True
def after_transform(request, response):
end = time.time()
duration = int((end - request._transform_start) * 1e6)
stats = request._stats
stats['render_count'] = stats.get('render_count', 0) + 1
stats['render_time'] = stats.get('render_time', 0) + duration
request._stats_html_attribute = True
rss_limit = 256 * (1024 ** 2) # MB
def reload_process(process):
return psutil.Process(process.pid).memory_info().rss > rss_limit
node_env = os.environ.copy()
node_env['NODE_PATH'] = ''
page_or_json = SubprocessTween(
should_transform=should_transform,
after_transform=after_transform,
reload_process=reload_process,
args=['node', resource_filename(__name__, 'static/build-server/renderer.js')],
env=node_env,
)
debug_page_or_json = SubprocessTween(
should_transform=should_transform,
after_transform=after_transform,
reload_process=reload_process,
args=['node', resource_filename(__name__, 'static/server.js')],
env=node_env,
)
|
import json
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from werkzeug.urls import url_join
from indico.core.config import config
from indico.modules.cephalopod import cephalopod_settings, logger
from indico.modules.core.settings import core_settings
HEADERS = {'Content-Type': 'application/json'}
TIMEOUT = 10
def _get_url():
return url_join(config.COMMUNITY_HUB_URL, 'api/instance/')
def register_instance(contact, email):
payload = {'url': config.BASE_URL,
'contact': contact,
'email': email,
'organization': core_settings.get('site_organization')}
response = requests.post(_get_url(), data=json.dumps(payload), headers=HEADERS, timeout=TIMEOUT,
verify=(not config.DEBUG))
try:
response.raise_for_status()
except HTTPError as err:
logger.error('failed to register the server to the community hub, got: %s', err)
cephalopod_settings.set('joined', False)
raise
except Timeout:
logger.error('failed to register: timeout while contacting the community hub')
cephalopod_settings.set('joined', False)
raise
except RequestException as err:
logger.error('unexpected exception while registering the server with the Community Hub: %s', err)
raise
json_response = response.json()
if 'uuid' not in json_response:
logger.error('invalid json reply from the community hub: uuid missing')
cephalopod_settings.set('joined', False)
raise ValueError('invalid json reply from the community hub: uuid missing')
cephalopod_settings.set_multi({
'joined': True,
'uuid': json_response['uuid'],
'contact_name': payload['contact'],
'contact_email': payload['email']
})
logger.info('successfully registered the server to the community hub')
def unregister_instance():
payload = {'enabled': False}
url = url_join(_get_url(), cephalopod_settings.get('uuid'))
response = requests.patch(url, data=json.dumps(payload), headers=HEADERS, timeout=TIMEOUT,
verify=(not config.DEBUG))
try:
response.raise_for_status()
except HTTPError as err:
if err.response.status_code != 404:
logger.error('failed to unregister the server to the community hub, got: %s', err)
raise
except Timeout:
logger.error('failed to unregister: timeout while contacting the community hub')
raise
except RequestException as err:
logger.error('unexpected exception while unregistering the server with the Community Hub: %s', err)
raise
cephalopod_settings.set('joined', False)
logger.info('successfully unregistered the server from the community hub')
def sync_instance(contact, email):
contact = contact or cephalopod_settings.get('contact_name')
email = email or cephalopod_settings.get('contact_email')
# registration needed if the instance does not have a uuid
if not cephalopod_settings.get('uuid'):
logger.warning('unable to synchronize: missing uuid, registering the server instead')
register_instance(contact, email)
return
payload = {'enabled': True,
'url': config.BASE_URL,
'contact': contact,
'email': email,
'organization': core_settings.get('site_organization')}
url = url_join(_get_url(), cephalopod_settings.get('uuid'))
response = requests.patch(url, data=json.dumps(payload), headers=HEADERS, timeout=TIMEOUT,
verify=(not config.DEBUG))
try:
response.raise_for_status()
except HTTPError as err:
if err.response.status_code == 404:
logger.warning('unable to synchronize: the server was not registered, registering the server now')
register_instance(contact, email)
else:
logger.error('failed to synchronize the server with the community hub, got: %s', err)
raise
except Timeout:
logger.error('failed to synchronize: timeout while contacting the community hub')
raise
except RequestException as err:
logger.error('unexpected exception while synchronizing the server with the Community Hub: %s', err)
raise
else:
cephalopod_settings.set_multi({
'joined': True,
'contact_name': payload['contact'],
'contact_email': payload['email']})
logger.info('successfully synchronized the server with the community hub')
|
from __future__ import absolute_import
import os.path
import sys
try:
import ansible_mitogen.connection
except ImportError:
base_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
del base_dir
import ansible_mitogen.connection
class Connection(ansible_mitogen.connection.Connection):
transport = 'mitogen_su'
|
import sys
import unittest
sys.path.append('./code')
from transforms import Transform, RadialGaussianization
from models import GSM
from numpy import all, sqrt, sum, square
Transform.VERBOSITY = 0
class Tests(unittest.TestCase):
def test_inverse(self):
"""
Make sure inverse Gaussianization is inverse to Gaussianization.
"""
gsm = GSM(3, 10)
gsm.initialize('cauchy')
# generate test data
samples = gsm.sample(100)
rg = RadialGaussianization(gsm)
# reconstructed samples
samples_ = rg.inverse(rg(samples))
# distance between norm and reconstructed norm
dist = abs(sqrt(sum(square(samples_))) - sqrt(sum(square(samples))))
self.assertTrue(all(dist < 1E-6))
###
# test one-dimensional GSM
gsm = GSM(1, 7)
gsm.initialize('cauchy')
# generate test data
samples = gsm.sample(100)
rg = RadialGaussianization(gsm)
# reconstructed samples
samples_rg = rg.inverse(rg(samples))
# distance between norm and reconstructed norm
dist = abs(sqrt(sum(square(samples_rg))) - sqrt(sum(square(samples))))
self.assertTrue(all(dist < 1E-6))
def test_logjacobian(self):
"""
Test log-Jacobian.
"""
gsm = GSM(3, 10)
gsm.initialize('cauchy')
# standard normal distribution
gauss = GSM(3, 1)
gauss.scales[0] = 1.
# generate test data
samples = gsm.sample(100)
rg = RadialGaussianization(gsm)
# after Gaussianization, samples should be Gaussian distributed
loglik_gsm = gsm.loglikelihood(samples)
loglik_gauss = gauss.loglikelihood(rg(samples)) + rg.logjacobian(samples)
dist = abs(loglik_gsm - loglik_gauss)
self.assertTrue(all(dist < 1E-6))
###
# test one-dimensional Gaussian
gsm = GSM(1, 10)
gsm.initialize('cauchy')
# standard normal distribution
gauss = GSM(1, 1)
gauss.scales[0] = 1.
# generate test data
samples = gsm.sample(100)
rg = RadialGaussianization(gsm)
# after Gaussianization, samples should be Gaussian distributed
loglik_gsm = gsm.loglikelihood(samples)
loglik_gauss = gauss.loglikelihood(rg(samples)) + rg.logjacobian(samples)
dist = abs(loglik_gsm - loglik_gauss)
self.assertTrue(all(dist < 1E-6))
if __name__ == '__main__':
unittest.main()
|
from library.stigma.application import Button
class HomeNewgame(Button):
def __init__(self):
super(HomeNewgame, self).__init__()
self.text = 'New game'
self.params = None
|
from app import manager
if __name__ == "__main__":
manager.run()
|
"""
Unit tests for the Pythia PRF service implementation.
"""
from django.test import SimpleTestCase
import json
from settings import dp
from pyrelic import vpop, vprf, bls
from pyrelic.pbc import G1Element, G2Element
from crypto import *
w = "abcdefg0987654321"
t = "123456789poiuytrewq"
pw = "super secret pw"
class VpopEvalTest(SimpleTestCase):
"""
Tests the eval API.
"""
urlTemplate = "/pythia/eval?w={}&t={}&x={}"
def checkErrorResponse(self, response):
"""
Checks that a response with an expected error, gives HTTP 200,
valid JSON with errorCode and errorMessage fields.
"""
self.assertEqual(response.status_code, 200)
r = json.loads(response.content)
self.assertTrue("errorCode" in r and "errorMessage" in r )
def check(self, response):
"""
Basic validation that a response contains the expected fields.
"""
self.assertEqual(response.status_code, 200)
r = json.loads(response.content)
self.assertTrue("y" in r)
return r
def testNoParams(self):
"""
Ensure eval responds with an HTTP 200 and valid JSON repsonse.
"""
response = self.client.get('/pythia/eval')
self.checkErrorResponse(response)
def testWrongParams(self):
"""
Test a variety of bad params for the eval function.
"""
response = self.client.get('/pythia/eval?w')
self.checkErrorResponse(response)
response = self.client.get('/pythia/eval?w=2341234')
self.checkErrorResponse(response)
response = self.client.get('/pythia/eval?w=10987314&t=lkjasdf')
self.checkErrorResponse(response)
response = self.client.get('/pythia/eval?w=10987314&x=adsfasdf')
self.checkErrorResponse(response)
response = self.client.get('/pythia/eval?t=10987314&x=adsfasdf')
self.checkErrorResponse(response)
def testEvalSimple(self):
"""
Simple test of the eval function
"""
r,x = vpop.blind(pw)
x = vpop.wrap(x)
response = self.client.get(VpopEvalTest.urlTemplate.format(w,t,x))
d = self.check(response)
y = vpop.unwrapGt(str(d["y"]))
z = vpop.deblind(r,y)
def testEvalStable(self):
"""
Runs eval a number of times and verifies thatL intermediate results
differ (because blinding is randomized), and final result is always
the same.
"""
y1,z1 = self.runClientEval(w, t, pw)
y2,z2 = self.runClientEval(w, t, pw)
# Verify that the intermediate results differ
self.assertNotEqual(y1,y2)
# But final results are the same
self.assertTrue(z1 == z2 )
def runClientEval(self,w,t,m):
"""
Runs the client-side eval() and returns the resulting y and z values.
"""
# Prepare the message
r,x = vpop.blind(m)
x = vpop.wrap(x)
# Submit the request and do a quick-check on the response
response = self.client.get(VpopEvalTest.urlTemplate.format(w,t,x))
d = self.check(response)
# Compute the final result
y = vpop.unwrapGt(str(d["y"]))
z = vpop.deblind(r,y)
return y,z
def testProofOmmitted(self):
"""
Requests eval with proof omitted.
"""
# Request eval with no proof
_, url = self.getUrl()
response = self.client.get(url + "&skipproof=true")
# Check the response and ensure there is no proof included.
r = json.loads(response.content)
self.assertTrue( "p" not in r and "c" not in r and "u" not in r)
def parseResponse(self, response):
"""
Verifies the response code is HTTP 200 and parses the JSON response.
@returns a dictionary of the response contents.
"""
self.assertEqual(response.status_code, 200)
return json.loads(response.content)
def testProofRandomized(self):
"""
Checks to ensure that the same request gets a different proof each time
"""
# Make two requests with the same URL.
_, url = self.getUrl()
r1 = self.parseResponse(self.client.get(url))
r2 = self.parseResponse(self.client.get(url))
# Test that the pubkey p is the same, but c,u are different.
self.assertEqual(r1["p"], r2["p"])
self.assertNotEqual(r1["c"], r2["u"])
self.assertNotEqual(r1["u"], r2["u"])
def testProof(self):
"""
Ensures the proof is valid.
"""
# Make an eval request
r,x = vpop.blind(pw)
xWrap = vpop.wrap(x)
url = VpopEvalTest.urlTemplate.format(w,t,xWrap)
r = self.parseResponse(self.client.get(url))
# Deserialize the items needed to verify the proof.
y = vpop.unwrapY(r["y"])
pi = (vpop.unwrapP(r["p"]), vpop.unwrapC(r["c"]), vpop.unwrapU(r["u"]) )
# Test the proof
self.assertTrue( vpop.verify(x, t, y, pi) )
def getUrl(self):
"""
Helper function: gets a URL using the standard VpopEvalTest w,t,pw
parameters with a freshly blinded value x.
@returns r, url where r is the value required for deblinding.
"""
r,x = vpop.blind(pw)
x = vpop.wrap(x)
return r, VpopEvalTest.urlTemplate.format(w,t,x)
class UnbEvalTest(SimpleTestCase):
"""
Tests the eval API for unblinded PRF implementation.
"""
def setUp(self):
self.urlTemplate = "/pythia/eval-unb?w={}&t={}&x={}"
# Dummy values that can be used for testing
self.w = "abcdefg0987654321"
self.t = "123456789poiuytrewq"
self.pw = "super secret pw"
self.salt = secureRandom()
self.x = sha(self.salt, self.pw)
self.standardUrl = self.urlTemplate.format(self.w, self.t, self.x)
def parse(self, response):
"""
Verify that the response is HTTP status 200 and parse the response
as JSON.
"""
self.assertEqual(response.status_code, 200)
d = json.loads(response.content)
# Convert unicode keys into strings.
return dict({ (str(k), v) for k,v in d.iteritems() })
def testEvalSimple(self):
"""
Simple test of the eval function
"""
# Query the eval function
response = self.client.get(self.standardUrl)
d = self.parse(response)
# Verify that we got a response of the correct type
self.assertTrue( "y" in d )
y = vprf.unwrapY(d["y"])
self.assertTrue(isinstance(y, G1Element))
def testEvalStable(self):
"""
Runs eval a number of times and verifies thatL intermediate results
differ (because blinding is randomized), and final result is always
the same.
"""
y1, p1, c1, u1 = self.runClientEval()
y2, p2, c2, u2 = self.runClientEval()
# Verify that result and pubkey are the same
self.assertEqual(y1, y2)
self.assertEqual(p1, p2)
# But the proof is randomized
self.assertNotEqual(c1, c2)
self.assertNotEqual(u1, u2)
def runClientEval(self):
"""
Runs the client-side eval() and returns the resulting values
after they've been unwrapped.
"""
# Query the eval function
response = self.client.get(self.standardUrl)
d = self.parse(response)
# Unwrap the results.
return (vprf.unwrapY(d["y"]), vprf.unwrapP(d["p"]),
vprf.unwrapC(d["c"]), vprf.unwrapU(d["u"]) )
def testProofOmmitted(self):
"""
Requests eval with proof omitted.
"""
# Request eval with no proof
response = self.client.get(self.standardUrl + "&skipproof=true")
# Check the response and ensure there is no proof included.
r = json.loads(response.content)
self.assertTrue( "p" not in r and "c" not in r and "u" not in r)
def parseResponse(self, response):
"""
Verifies the response code is HTTP 200 and parses the JSON response.
@returns a dictionary of the response contents.
"""
self.assertEqual(response.status_code, 200)
return json.loads(response.content)
def testProof(self):
"""
Ensures the proof is valid.
"""
# Query eval function
y,p,c,u = self.runClientEval()
# Test the proof
self.assertTrue( vprf.verify(self.x, self.t, y, (p,c,u),
errorOnFail=False) )
class BlsEvalTest(SimpleTestCase):
"""
Tests the eval API for BLS PRF implementation.
"""
def setUp(self):
self.urlTemplate = "/pythia/eval-bls?w={}&t={}&x={}"
# Dummy values that can be used for testing
self.w = "abcdefg0987654321"
self.t = "123456789poiuytrewq"
self.pw = "super secret pw"
self.salt = secureRandom()
self.x = sha(self.salt, self.pw)
self.standardUrl = self.urlTemplate.format(self.w, self.t, self.x)
def parse(self, response):
"""
Verify that the response is HTTP status 200 and parse the response
as JSON.
"""
self.assertEqual(response.status_code, 200)
d = json.loads(response.content)
# Convert unicode keys into strings.
return dict({ (str(k), v) for k,v in d.iteritems() })
def testEvalSimple(self):
"""
Simple test of the eval function
"""
# Query the eval function
response = self.client.get(self.standardUrl)
d = self.parse(response)
# Verify that we got a response of the correct type
self.assertTrue( "y" in d )
y = bls.unwrapY(d["y"])
p = bls .unwrapP(d["p"])
self.assertTrue(isinstance(y, G1Element))
self.assertTrue(isinstance(p, G2Element))
def testEvalStable(self):
"""
Runs eval a number of times and verifies thatL intermediate results
differ (because blinding is randomized), and final result is always
the same.
"""
y1, p1 = self.runClientEval()
y2, p2 = self.runClientEval()
# Verify that result and pubkey are the same
self.assertEqual(y1, y2)
self.assertEqual(p1, p2)
def runClientEval(self):
"""
Runs the client-side eval() and returns the resulting values
after they've been unwrapped.
"""
# Query the eval function
response = self.client.get(self.standardUrl)
d = self.parse(response)
# Unwrap the results.
return bls.unwrapY(d["y"]), bls.unwrapP(d["p"])
def testProofOmmitted(self):
"""
Requests eval with proof omitted.
"""
# Request eval with no proof
response = self.client.get(self.standardUrl + "&skipproof=true")
# Check the response and ensure there is no proof included.
r = json.loads(response.content)
self.assertTrue( "p" not in r and "c" not in r and "u" not in r)
def parseResponse(self, response):
"""
Verifies the response code is HTTP 200 and parses the JSON response.
@returns a dictionary of the response contents.
"""
self.assertEqual(response.status_code, 200)
return json.loads(response.content)
def testProof(self):
"""
Ensures the proof is valid.
"""
# Query eval function
y,p = self.runClientEval()
# Test the proof
self.assertTrue( bls.verify(self.x, self.t, y, (p,None,None),
errorOnFail=False) )
class VpopAdvTest(SimpleTestCase):
"""
Tests updates and deletions for the vpop PRF.
"""
urlEval = "/pythia/eval?w={}&t={}&x={}"
urlUpdate = "/pythia/updateToken?w={}&wPrime={}"
urlDelete = "/pythia/delete?w={}"
def check(self, response, key=None):
"""
Verifies that the response is HTTP 200 and optionally extracts a single
key from the response dictionary.
"""
# Verify the status code and parse the repsonse as JSON
self.assertEqual(response.status_code, 200)
respDict = json.loads(response.content)
# Check for and extract a key if requested
if key:
self.assertTrue(key in respDict)
return respDict[key]
def eval(self,w,t,pw):
"""
Runs an eval and returns the result.
"""
# Blind and serialize the pw
r,x = vpop.blind(pw)
x = vpop.wrap(x)
# Call the URL and verify the response
response = self.client.get(VpopAdvTest.urlEval.format(w,t,x))
ySerial = str(self.check(response, "y"))
# Deserialize and de-blind the result.
y = vpop.unwrapY(ySerial)
return vpop.deblind(r,y)
def testUpdate(self):
"""
Tests client-side key rotation.
"""
# Encrypt a pw under the default w.
z = self.eval(w,t,pw)
# Create new w' and get an update token
wPrime = "Not your Daddy's identifier"
response = self.client.get(VpopAdvTest.urlUpdate.format(w,wPrime))
deltaSerial = self.check(response, "delta")
delta = vpop.unwrapDelta(deltaSerial)
# Update z
zPrime1 = vpop.update(z, delta)
# Re-run eval using w' and verify the result.
zPrime2 = self.eval(wPrime,t,pw)
self.assertEqual(zPrime1, zPrime2)
def testDeleteSimple(self):
"""
Tests key deletion by simply verifying that the call suceeds and
returns an positive status code and message.
"""
response = self.client.get(VpopAdvTest.urlDelete.format(w))
status = self.check(response, "status")
self.assertEqual(str(status), "OK")
def testDelete(self):
"""
Tests key deletion by checking that eval results change after a
deletion.
"""
# Encrypt a pw under the default w.
z1 = self.eval(w,t,pw)
# Request key deletion
response = self.client.get(VpopAdvTest.urlDelete.format(w))
self.check(response, "status")
# Try again and test
z2 = self.eval(w,t,pw)
self.assertNotEqual(z1,z2)
|
import tornado.escape
import tornado.web
import tornado.websocket
import motor
import logging
import ast
import bcrypt
from interface import *
from choices import *
from decorators import *
__all__ = [
"LoginHandler", "LogoutHandler",
"DeviceListHandler", "DeviceCreateHandler", "DeviceUpdateHandler",
"UserListHandler", "UserCreateHandler", "UserUpdateHandler",
"DeviceSocketHandler", "TrebolSocketHandler",
]
class BaseHandler(tornado.web.RequestHandler):
def set_current_user(self, user):
if user:
self.set_secure_cookie("user", tornado.escape.json_encode(user))
else:
self.clear_cookie("user")
def get_current_user(self):
user_json = self.get_secure_cookie("user")
if user_json:
return tornado.escape.json_decode(user_json)
else:
return None
def set_message(self, kind, text):
msg = {"type": kind, "text": text}
self.set_secure_cookie("message", str(msg))
def get_message(self):
message = self.get_secure_cookie("message")
self.clear_cookie("message")
if message:
message = ast.literal_eval(message)
return message
class LoginHandler(BaseHandler):
def get(self):
self.render("login.html", message=self.get_message())
@tornado.gen.coroutine
def post(self):
users = self.settings["db"].users
email = self.get_argument("email", "")
password = self.get_argument("password", "").encode()
user = yield users.find_one({"email": email})
if user and user['hash'] and \
bcrypt.hashpw(password, user["hash"].encode()) == user["hash"]:
user.pop("hash")
self.set_current_user(user)
self.redirect(self.get_argument("next", u"/device/list/"))
else:
self.set_message("danger", "Authorization Failure.")
self.redirect(self.settings["login_url"])
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", "/"))
class DeviceListHandler(BaseHandler):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
db = self.settings["db"]
cursor = db.devices.find().sort('name', 1)
count = yield cursor.count()
devices = yield cursor.to_list(count)
self.render(
"device_list.html",
username=self.get_current_user(),
devices=devices,
message=self.get_message()
)
class DeviceCreateHandler(BaseHandler):
@tornado.web.authenticated
@is_admin
def get(self):
self.render("device_create.html", message=self.get_message())
@tornado.gen.coroutine
def post(self):
db = self.settings["db"]
name = self.get_argument("name", "")
key = self.get_argument("key", "")
device = yield db.devices.find_one({"name": name})
if device is not None:
kind = "danger"
text = "Device already exists."
elif name and key:
yield db.devices.insert({
"name": name,
"key": key,
"address": None,
})
kind = "success"
text = "Device {} added succesfully".format(name)
else:
kind = "danger"
text = "Please enter a valid device."
self.set_message(kind, text)
self.redirect('/device/create/')
class DeviceUpdateHandler(BaseHandler):
@tornado.web.authenticated
@is_admin
@tornado.gen.coroutine
def get(self, slug):
db = self.settings["db"]
name = slug.rstrip("/")
device = yield db.devices.find_one({"name": name})
if device is None:
raise tornado.web.HTTPError(404)
message = self.get_secure_cookie("message", "")
self.render(
"device_update.html", device=device, message=message)
@tornado.gen.coroutine
def post(self, slug):
db = self.settings["db"]
name = slug.rstrip("/")
device = yield db.devices.find_one({"name": name})
action = self.get_argument("action", "")
if action not in ("update", "delete") or device is None:
raise tornado.web.HTTPError(404)
if action == "delete":
response = yield db.devices.remove({"name": name})
if response["err"] is None:
kind = "success"
text = "Device {} deleted.".format(name)
else:
kind = "danger"
text = "Device {} could not be deleted: {}".format(
name, response["err"])
redirect = "/device/list/"
elif action == "update":
new_name = self.get_argument("name", None)
new_key = self.get_argument("key", None)
if new_name is None:
kind = "danger"
text = "Please enter a device name."
redirect = "/device/{}/update/".format(name)
elif new_key is None:
kind = "danger"
text = "Please enter a key for device."
redirect = "/device/{}/update/".format(name)
else:
update = {"name": new_name, "key": new_key}
response = yield db.devices.update(
{"name": name}, {"$set": update})
if response["err"] is None:
kind = "success"
text = "Device updated succesfully."
else:
kind = "alert"
text = "Device update failure: {}".format(response["err"])
redirect = "/device/{}/update/".format(name)
self.set_message(kind, text)
self.redirect(redirect)
class UserListHandler(BaseHandler):
@tornado.web.authenticated
@is_admin
@tornado.gen.coroutine
def get(self):
cursor = self.settings["db"].users.find().sort('_id', 1)
count = yield cursor.count()
users = yield cursor.to_list(count)
self.render("user_list.html", users=users, message=self.get_message())
class UserCreateHandler(BaseHandler):
@tornado.web.authenticated
@is_admin
def get(self):
self.render(
"user_create.html", message=self.get_message(), groups=USER_GROUPS)
@tornado.gen.coroutine
def post(self):
db = self.settings["db"]
email = self.get_argument("email", "")
password = self.get_argument("password", "")
group = self.get_argument("group", "")
user = yield db.users.find_one({"email": email})
if user is not None:
kind = "danger"
text = "User already exists."
elif email and password and group in USER_GROUPS:
yield create_new_user(db, email, password, group)
kind = "success"
text = "User {} added succesfully.".format(email)
else:
kind = "danger"
text = "Please enter a valid user."
self.set_message(kind, text)
self.redirect("/user/create/")
class UserUpdateHandler(BaseHandler):
@tornado.web.authenticated
@is_admin
@tornado.gen.coroutine
def get(self, uid):
user = yield self.settings["db"].users.find_one({"_id": int(uid)})
if user is None:
raise tornado.web.HTTPError(404)
self.render("user_update.html", user=user, message=self.get_message(),
groups=USER_GROUPS)
@tornado.gen.coroutine
def post(self, uid):
db = self.settings["db"]
uid = int(uid)
user = yield db.users.find_one({"_id": uid})
action = self.get_argument("action", "")
if action not in ("update", "delete") or user is None:
raise tornado.web.HTTPError(404)
if action == "delete":
response = yield db.users.remove({"_id": uid})
if response["err"] is None:
kind = "success"
text = "User {} deleted.".format(user["email"])
else:
kind = "danger"
text = "User {} could not be deleted: {}".format(
user["email"], response["err"])
redirect = "/device/list/"
elif action == "update":
new_email = self.get_argument("email", None)
new_group = self.get_argument("group", None)
new_pass = self.get_argument("password", None)
if new_email == "" or new_group not in USER_GROUPS:
kind = "danger"
text = "Please enter a valid user."
redirect = "/user/{}/update/".format(name)
else:
update = {"email": new_email, "group": new_group}
if new_pass != "":
update.update({"hash": bcrypt.hashpw(
new_pass.encode(), bcrypt.gensalt(8))})
response = yield db.users.update(
{"_id": uid}, {"$set": update})
redirect = "/user/{}/update".format(str(uid))
if response["err"] is None:
kind = "success"
text = "User updated successfully."
if user["email"] == self.get_current_user()["email"]:
if update.has_key("hash"):
update.pop("hash")
self.set_current_user(update)
else:
kind = "danger"
text = "User update failure: {}".format(response["err"])
self.set_message(kind, text)
self.redirect(redirect)
class DeviceSocketHandler(tornado.websocket.WebSocketHandler):
devices = set()
@tornado.gen.coroutine
def open(self):
if not self.request.headers.has_key("X-Device-Name"):
self.write_message("missing-device-name")
self.close()
self.finish()
if not self.request.headers.has_key("X-Device-Key"):
self.write_message("missing-device-key")
self.close()
self.finish()
if self.request.headers.has_key("X-Real-Ip"):
self.request.remote_ip = self.request.headers.get("X-Real-Ip")
name = self.request.headers["X-Device-Name"]
key = self.request.headers["X-Device-Key"]
device = yield self.settings["db"].devices.find_and_modify(
{"name": name, "key": key},
{"$set": {"address": self.request.remote_ip}},
new=True)
if device is None:
self.write_message("device-does-not-exist")
self.close()
DeviceSocketHandler.devices.add(self)
logging.info("A device connected.")
TrebolSocketHandler.device_action(self, "connect")
def on_close(self):
DeviceSocketHandler.devices.remove(self)
logging.info("A device disconnected.")
TrebolSocketHandler.device_action(self, "disconnect")
class TrebolSocketHandler(tornado.websocket.WebSocketHandler):
clients = set()
def open(self):
TrebolSocketHandler.clients.add(self)
logging.info("A Trebol client connected.")
for device in DeviceSocketHandler.devices:
TrebolSocketHandler.device_action(device, "connect")
@classmethod
def device_action(cls, device, action):
msg = {
"action": action,
"device": device.request.headers["X-Device-Name"],
"address": device.request.remote_ip,
}
for client in cls.clients:
client.write_message(msg)
def on_close(self):
TrebolSocketHandler.clients.remove(self)
logging.info("A Trebol client disconnected.")
|
"""
A utility module to simplify tweepy usage from within the flask app
"""
import tweepy
from tweepy import Cursor
import app_settings as cfg
def sort_statuses(statuses):
length = len(statuses)
for cnt1 in range(length):
for cnt2 in range(length-1):
if statuses[cnt2].retweet_count < statuses[cnt2+1].retweet_count:
tmp = statuses[cnt2]
statuses[cnt2] = statuses[cnt2+1]
statuses[cnt2+1] = tmp
return statuses
def get_twitter_auth():
auth = tweepy.OAuthHandler(cfg.TW_API, cfg.TW_SECRET)
return auth
def get_auth_url():
auth = get_twitter_auth()
auth_url = auth.get_authorization_url()
return [auth_url, auth.request_token.key, auth.request_token.secret]
def get_access_token(token, secret, vfr):
auth = get_twitter_auth()
auth.set_request_token(token, secret)
try:
auth.get_access_token(vfr)
auth.get_username()
return [auth.access_token.key, auth.access_token.secret, auth.username]
except tweepy.TweepError:
return []
raise e
def get_access_auth(key, secret):
auth = get_twitter_auth()
auth.set_access_token(key, secret)
return auth
def get_api(key, secret):
auth = get_access_auth(key, secret)
return tweepy.API(auth)
def get_statuses(key, secret, page=1):
api = get_api(key, secret)
try:
statuses = api.home_timeline(page=page, count=100)
return sort_statuses(statuses)
except tweepy.TweepError as e:
return e.message
# try:
# statuses = []
# for status in Cursor(api.home_timeline).items(200):
# statuses.append(status)
# return sort_statuses(statuses)
# except tweepy.TweepError as e:
# return e.message
def get_me(key, secret):
api = get_api(key, secret)
try:
user = api.me()
print(user)
return user
except tweepy.TweepError as e:
return e.message
|
import unittest
import sys
import os
from report.eval_summary import *
import build_id
from testsupport import checkin
def get_sample_eval_summary():
bi = buildinfo.BuildInfo()
return EvalSummary(
'sadm.trunk.136.20',
'OFFICIAL',
'zufa',
EvalPhase.TEST,
None,
1314492700.49,
[11.08, 0.44, 9.18],
'linux_x86-64',
'Linux',
'64',
'2.6.35.13-92.fc14'
)
@checkin
class EvalSummaryTest(unittest.TestCase):
def test_ctor(self):
es = get_sample_eval_summary()
self.assertEqual('sadm.trunk.136.20', es.build_id)
self.assertEqual('OFFICIAL', es.style)
self.assertEqual('zufa', es.host)
self.assertEqual(EvalPhase.TEST, es.final_phase)
self.assertEqual(None, es.failure_reason)
def test_elapsed(self):
es = get_sample_eval_summary()
self.assertAlmostEqual(20.7, es.get_elapsed_seconds())
self.assertAlmostEqual(11.08, es.get_elapsed_seconds(EvalPhase.UPDATE))
self.assertAlmostEqual(0.44, es.get_elapsed_seconds(EvalPhase.BUILD))
self.assertAlmostEqual(9.18, es.get_elapsed_seconds(EvalPhase.TEST))
def test_start_time(self):
es = get_sample_eval_summary()
self.assertAlmostEqual(1314492700.49, es.get_start_time())
def test_end_time(self):
es = get_sample_eval_summary()
self.assertAlmostEqual(1314492721.19, es.get_end_time())
def test_start_and_end_equal_elapsed(self):
es = get_sample_eval_summary()
self.assertAlmostEqual(es.get_elapsed_seconds(), es.get_end_time() - es.get_start_time())
def test_reported_result(self):
es = get_sample_eval_summary()
self.assertEqual(EvalResult.OK, es.get_reported_result())
def test_reported_result(self):
es = get_sample_eval_summary()
es.failure_reason = '5 tests failed'
self.assertEqual(EvalResult.FAILED, es.get_reported_result())
def test_imputed_result_official(self):
es = get_sample_eval_summary()
es.failure_reason = '5 tests failed'
self.assertEqual(EvalResult.FAILED, es.get_imputed_result())
def test_imputed_result_continuous(self):
es = get_sample_eval_summary()
es.style = 'CONTINUOUS'
es.failure_reason = '5 tests failed'
self.assertEqual(EvalResult.FAILED, es.get_imputed_result())
def test_imputed_result_experimental(self):
es = get_sample_eval_summary()
es.style = 'EXPERIMENTAL'
es.failure_reason = '5 tests failed'
self.assertEqual(EvalResult.PROBLEMATIC, es.get_imputed_result())
def test_str(self):
es = get_sample_eval_summary()
ld = dateutils.format_standard_date_with_tz_offset(dateutils.parse_standard_date_with_tz_offset('2011-08-27 18:51:40.490000-0600'))
self.assertEqual('sadm.trunk.136.20,OFFICIAL,zufa,TEST,,' + ld + ',11.08 0.44 9.18,linux_x86-64,Linux,64,2.6.35.13-92.fc14', str(es))
def test_parse_eval_summary_line(self):
es = parse_eval_summary_line('sadm.trunk.136.20,OFFICIAL,zufa,TEST,,2009-05-27 18:29:06-0600,11.08 0.44 9.18,linux_x86-64,Linux,64,2.6.35.13-92.fc14')
self.assertTrue(isinstance(es.build_id, build_id.BuildID))
def test_enum_to_str(self):
self.assertEqual('FAILED', enum_to_str(EvalResult, EvalResult.FAILED))
def test_str_to_enum(self):
self.assertEqual(EvalResult.FAILED, str_to_enum(EvalResult, "FAILED"))
if __name__ == '__main__':
unittest.main()
|
try:
from . import generic as g
except BaseException:
import generic as g
class VHACDTest(g.unittest.TestCase):
def test_vhacd(self):
# exit if no VHACD
if not g.trimesh.interfaces.vhacd.exists and not g.all_dep:
g.log.warning(
'not testing convex decomposition (no vhacd)!')
return
g.log.info('testing convex decomposition using vhacd')
# get a bunny
mesh = g.get_mesh('bunny.ply')
# run a convex decomposition using vhacd
decomposed = mesh.convex_decomposition(
maxhulls=10, debug=True)
if len(decomposed) != 10:
# it should return the correct number of meshes
raise ValueError('{} != 10'.format(len(decomposed)))
# make sure everything is convex
# also this will fail if the type is returned incorrectly
assert all(i.is_convex for i in decomposed)
# make sure every result is actually a volume
# ie watertight, consistent winding, positive nonzero volume
assert all(i.is_volume for i in decomposed)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
from .robot_model import RobotModel, create_robot
from .manipulators import *
|
import yaml
import csv
import time
import setup
textfile_path, deffile_path = setup.get_arguments()
from functions import generate_tables
analyzing = "\nAnalyzing '{textfile}' using definitions from '{deffile}'".format(
textfile=textfile_path,
deffile=deffile_path
)
print analyzing
with open(textfile_path, "r") as textfile:
text_lines_to_analyze=textfile.readlines()
with open(deffile_path, 'r') as stream:
definitions_file = yaml.load(stream)
normal_power_diff_table, weighted_power_diff_table, normal_edge_list_matrix, weighted_edge_list_matrix, binary_power_diff_table, connection_table, attribute_table = generate_tables(definitions_file, text_lines_to_analyze)
output_time = str(time.time()).split('.')[0]
normal_power_diff_name = 'normal_power_diff_table'
weighted_power_diff_name = 'weighted_power_diff_table'
binary_power_diff_name = 'binary_power_diff_table'
normal_edge_list_name = 'normal_edge_list_matrix'
weighted_edge_list_name = 'weighted_edge_list_matrix'
connection_name = 'connection_table'
attribute_name = 'attribute_table'
outputfile_path = output_time + '_' + normal_power_diff_name + '.csv'
print('\nSaving Normal Power Differential Table output to file: ' + outputfile_path + ' ... '),
with open(outputfile_path, 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in normal_power_diff_table]
print('DONE!')
outputfile_path = output_time + '_' + weighted_power_diff_name + '.csv'
print('\nSaving Weighted Power Differential Table output to file: ' + outputfile_path + ' ... '),
with open(outputfile_path, 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in weighted_power_diff_table]
print('DONE!')
outputfile_path = output_time + '_' + normal_edge_list_name + '.csv'
print('\nSaving Normal Edge List Matrix output to file: ' + outputfile_path + ' ... '),
with open(outputfile_path, 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in normal_edge_list_matrix]
print('DONE!')
outputfile_path = output_time + '_' + weighted_edge_list_name + '.csv'
print('\nSaving Weighted Edge List Matrix output to file: ' + outputfile_path + ' ... '),
with open(outputfile_path, 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in weighted_edge_list_matrix]
print('DONE!')
outputfile_path = output_time + '_' + binary_power_diff_name + '.csv'
print('\nSaving Binary Power Differential Table output to file: ' + outputfile_path + ' ... '),
with open(outputfile_path, 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in binary_power_diff_table]
print('DONE!')
outputfile_path = output_time + '_' + connection_name + '.csv'
print('\nSaving Connection Table output to file: ' + outputfile_path + ' ... '),
with open(outputfile_path, 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in connection_table]
print('DONE!')
outputfile_path = output_time + '_' + attribute_name + '.csv'
print('\nSaving Attribute Table output to file: ' + outputfile_path + ' ... '),
with open(outputfile_path, 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in attribute_table]
print('DONE!')
print '\nFile Analysis Complete!\n'
|
import sys
import json
import hashlib
import base64
import datetime
import os
def dir_size(path):
total_size = os.path.getsize(path)
for item in os.listdir(path):
itempath = os.path.join(path, item)
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += dir_size(itempath)
return total_size
def folder_size(app, db_path):
root_size = os.path.getsize(db_path)
data_size = dir_size(app['DB_DATA_PATH'])
object_db_size = dir_size(app['DB_OBJECT_PATH'])
cumulative = root_size + data_size + object_db_size
return cumulative, object_db_size, data_size
def stats_written_today(path, today):
data = load_data(path)
if not len(data['item-bytes-overtime']) > 0:
return False, data
last_written_date = data['item-bytes-overtime'][-1][0]
if last_written_date == today:
return True, data
return False, data
def load_data(path):
with open(path) as data_file:
return json.load(data_file)
def update_global_db_stats(app):
stat_path = app['DB_STATISTICS_FILEPATH']
today = datetime.datetime.now().strftime('%Y-%m-%d')
uptodate, data = stats_written_today(stat_path, today)
if uptodate:
return
db_path = app['DB_ROOT_PATH']
cumulative, object_db_size, data_db_size = folder_size(app, db_path)
# XXX: this assumes that the DB can grow *only*
if len(data['item-bytes-overtime']) > 0 and \
cumulative <= data['item-bytes-overtime'][-1][1] + 1000:
# the size do not differ greatly (1K) from the last one
# we do *not* write tiny changes here to keep bookkepping
# information smaller. Sammler writtes are usually from new
# tests results, but no new data objects.
return
data['item-bytes-overtime'].append([today, cumulative, data_db_size, object_db_size])
d_jsonfied = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
with open(stat_path, "w+") as f:
f.write(d_jsonfied)
def get(app):
path = app['DB_STATISTICS_FILEPATH']
with open(path) as data_file:
data = json.load(data_file)
if not len(data['item-bytes-overtime']) > 0:
# ok, if no data was ever written we fake it
# here and a zero today entry
today = datetime.datetime.now().strftime('%Y-%m-%d')
data['item-bytes-overtime'].append(list())
data['item-bytes-overtime'][0] = (today, 0, 0)
return data
def data_write(path, data):
d = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
with open(path, "w+") as f:
f.write(d)
def update_mimetype_data_store(app, mimetype, size_raw, size_compressed, compressed):
stat_path = app['DB_STATISTICS_FILEPATH']
data = load_data(stat_path)
ptr = data['data-compression']
if mimetype not in ptr:
ptr[mimetype] = list()
entry = dict()
entry['compressed'] = compressed
entry['size-raw'] = size_raw
entry['size-compressed'] = size_compressed
ptr[mimetype].append(entry)
data_write(stat_path, data)
return
entry = dict()
entry['compressed'] = compressed
entry['size-raw'] = size_raw
entry['size-compressed'] = size_compressed
ptr[mimetype].append(entry)
data_write(stat_path, data)
|
from machine import SPI, Pin
SPI = machine.SPI(0) # GP14 (CLK) + GP16 (MOSI->DIN), User-LED jumper removed!
RST = machine.Pin('GP24')
CE = machine.Pin('GP12')
DC = machine.Pin('GP22')
LIGHT = machine.Pin('GP23')
import upcd8544
lcd = upcd8544.PCD8544(SPI, RST, CE, DC, LIGHT)
lcd.data([0xff])
lcd.data([0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa])
|
from utils.logger import Logger
import os, unittest
def foo():
logger = Logger().getLogger(__name__)
print 'Hello foo()'
logger.info('Hi, foo')
class TestLogger(unittest.TestCase):
def setUp(self):
self.logger = Logger().getLogger(__name__)
def test_log(self):
self.logger.debug("debug")
self.logger.info("info")
self.logger.warn("warn")
self.logger.error("error")
self.logger.critical("critical")
def test():
foo()
unittest.main()
if __name__ == '__main__': test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.