text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import partial
import fast_ext
from fast_ext import double_integral, single_integral, aligned_single_integral
# Let's make a whole bunch of the c++ classes picklable!
# They all have constant internal state after initialization, so
# it is simple to pickle them -- just store the initialization arguments!
class Pickleable:
def __getinitargs__(self):
return self.args
# We use some metaclassing to do this Pickleable-izing.
# Read here if unfamiliar:
# http://www.jeffknupp.com/blog/2013/12/28/improve-your-python-metaclasses-and-dynamic-classes-with-type/
# credit for the basic design goes here: http://stackoverflow.com/questions/9310053/how-to-make-my-swig-extension-module-work-with-pickle
# a reference on boost python pickling: http://www.boost.org/doc/libs/1_35_0/libs/python/doc/v2/pickle.html
def create_init(super_type):
def init_dynamic_type(self, *args):
self.args = args
super_type.__init__(self, *args)
return init_dynamic_type
def create_pickleable_class(base_class):
new_type = type(base_class.__name__, (base_class, Pickleable), {})
new_type.__init__ = create_init(base_class)
return new_type
ext_classes = ['PolyBasis', 'GradientBasis',
'ConstantBasis', 'ZeroBasis', 'MappingEval', 'KernelData',
'MassMatrixKernel', 'Kernel', 'DisplacementKernel',
'TractionKernel', 'AdjointTractionKernel',
'HypersingularKernel', 'RegularizedHypersingularKernel',
'SemiRegularizedHypersingularKernel', 'QuadratureInfo',
'CoeffBasis', 'InteriorPoint', 'AlignedInteriorPoint']
# Loop over all the classes and make them pickleable.
for cls in ext_classes:
globals()[cls] = create_pickleable_class(fast_ext.__dict__[cls])
| {
"repo_name": "tbenthompson/codim1",
"path": "codim1/fast_lib.py",
"copies": "1",
"size": "1782",
"license": "mit",
"hash": -7902335764775118000,
"line_mean": 47.1621621622,
"line_max": 137,
"alpha_frac": 0.7070707071,
"autogenerated": false,
"ratio": 3.592741935483871,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47998126425838705,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import functools
import time
import weakref
import asyncio
from copy import deepcopy
from numpy.lib.arraysetops import isin
OBSERVE_GRAPH_DELAY = 0.23 # 23 is not a multiple of 50
OBSERVE_STATUS_DELAY = 0.5
OBSERVE_STATUS_DELAY2 = 0.2
status_observers = []
class StatusObserver:
def __init__(self, ctx, ctx2):
self.ctx = weakref.ref(ctx)
self.ctx2 = weakref.ref(ctx2)
self.status = {}
self._dirty = True
self.observers = {}
self.last_time = None
self.runner = asyncio.ensure_future(self._run())
def _callback(self, path, status):
path2 = ".".join(path)
if status is None:
self.status.pop(path2, None)
else:
self.status[path2] = status
self._dirty = True
self._update()
def _update(self):
if not self._dirty:
return
t = time.time()
if self.last_time is None or t - self.last_time > OBSERVE_STATUS_DELAY2:
ctx, ctx2 = self.ctx(), self.ctx2()
if ctx is None or ctx2 is None:
return
if ctx._gen_context is None or ctx2._gen_context is None:
return
if ctx._gen_context._destroyed or ctx2._gen_context._destroyed:
return
try:
c = ctx2.status_
if not isinstance(c, Cell):
return
c.set(self.status)
except Exception:
pass
self.last_time = t
self._dirty = False
async def _run(self):
while 1:
await asyncio.sleep(OBSERVE_STATUS_DELAY2)
self._update()
def observe(self, path):
ctx, ctx2 = self.ctx(), self.ctx2()
if ctx is None or ctx2 is None:
return
callback = functools.partial(self._callback, path)
callback(None)
observer = ctx.observe(path, callback, OBSERVE_STATUS_DELAY, observe_none=True)
self.destroy(path)
self.observers[path] = observer
def destroy(self, path):
observer = self.observers.pop(path, None)
if observer is None:
return
callback = observer.callback
observer.destroy()
callback(None)
def observe_graph(ctx, ctx2, graph):
try:
graph_rt = ctx2.graph_rt
except AttributeError:
graph_rt = None
if isinstance(graph_rt, Cell):
graph_rt.set(deepcopy(graph))
else:
try:
graph_cell = ctx2.graph
except AttributeError:
graph_cell = None
if isinstance(graph_cell, Cell):
graph_cell.set(deepcopy(graph))
for status_observer in status_observers:
if status_observer.ctx() is ctx and status_observer.ctx2() is ctx2:
break
else:
status_observer = StatusObserver(ctx, ctx2)
status_observers.append(status_observer)
paths_to_delete = set(status_observer.observers.keys())
for node in graph["nodes"]:
path = tuple(node["path"])
if node["type"] == "cell":
paths = [path]
elif node["type"] == "transformer":
paths = [
path,
path + (node["INPUT"],),
]
else: # TODO: macro
continue
for path in paths:
for attr in ("status", "exception"):
subpath = path + (attr,)
if subpath in status_observer.observers:
paths_to_delete.discard(subpath)
status_observer.observe(subpath)
for dpath in paths_to_delete:
status_observer.destroy(dpath)
#print("DONE")
def bind_status_graph(ctx, status_graph, *, zips=None, mounts=False, shares=True):
""""Creates context that will monitor the status of ctx
The context is loaded from status_graph, which must be a graph in JSON format.
It uses the same manager as ctx.
The status graph's underlying buffers must be available already
(from add_zip or via Seamless database)
The status graph must have a cell called "graph",
and normally, also a cell shared as "index.html"
The status graph will receive the share namespace "status"
mounts and shares have the same meaning as in from_graph
Additional zips can be provided.
They will be passed to ctx.add_zip before the graph is loaded
"""
from seamless.highlevel import Context
ctx2 = Context()
if zips is not None:
for zipf in zips:
ctx2.add_zip(zipf)
ctx2.share_namespace="status"
ctx2.set_graph(
status_graph,
mounts=mounts,
shares=shares
)
assert "graph" in ctx2.get_children()
observe_graph_bound = partial(
observe_graph, ctx, ctx2
)
ctx2.translate()
params = {"runtime": True}
ctx.observe(("get_graph",), observe_graph_bound, OBSERVE_GRAPH_DELAY, params=params)
def observe2(graph):
try:
graph_rt = ctx2.graph_rt
except AttributeError:
graph_rt = None
if not isinstance(graph_rt, Cell):
return
ctx2.graph.set(deepcopy(graph))
ctx.observe(("get_graph",), observe2, OBSERVE_GRAPH_DELAY)
return ctx2
async def bind_status_graph_async(ctx, status_graph, *, zips=None, mounts=False, shares=True):
""""Creates context that will monitor the status of ctx
The context is loaded from status_graph, which must be a graph in JSON format.
It uses the same manager as ctx.
The status graph's underlying buffers must be available already
(from add_zip or via database)
The status graph must have a cell called "graph",
and normally, also a cell shared as "index.html"
The status graph will receive the share namespace "status"
mounts and shares have the same meaning as in from_graph
Additional zips can be provided.
They will be passed to ctx.add_zip before the graph is loaded
"""
from seamless.highlevel import Context
ctx2 = Context()
if zips is not None:
for zipf in zips:
ctx2.add_zip(zipf)
ctx2.share_namespace="status"
ctx2.set_graph(
status_graph,
mounts=mounts,
shares=shares
)
assert "graph" in ctx2.get_children()
observe_graph_bound = partial(
observe_graph, ctx, ctx2,
)
await ctx2.translation()
params = {"runtime": True}
ctx.observe(("get_graph",), observe_graph_bound, OBSERVE_GRAPH_DELAY, params=params)
def observe2(graph):
ctx2.graph.set(deepcopy(graph))
ctx.observe(("get_graph",), observe2, OBSERVE_GRAPH_DELAY)
return ctx2
from seamless.highlevel import Cell | {
"repo_name": "sjdv1982/seamless",
"path": "seamless/metalevel/bind_status_graph.py",
"copies": "1",
"size": "6632",
"license": "mit",
"hash": 1206266007567772700,
"line_mean": 30.4360189573,
"line_max": 94,
"alpha_frac": 0.6102231604,
"autogenerated": false,
"ratio": 3.8335260115606937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4943749171960694,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import gc
from operator import add
import weakref
import sys
import pytest
from distributed.protocol import deserialize, serialize
from distributed.protocol.pickle import HIGHEST_PROTOCOL, dumps, loads
try:
from pickle import PickleBuffer
except ImportError:
pass
def test_pickle_data():
data = [1, b"123", "123", [123], {}, set()]
for d in data:
assert loads(dumps(d)) == d
assert deserialize(*serialize(d, serializers=("pickle",))) == d
def test_pickle_out_of_band():
class MemoryviewHolder:
def __init__(self, mv):
self.mv = memoryview(mv)
def __reduce_ex__(self, protocol):
if protocol >= 5:
return MemoryviewHolder, (PickleBuffer(self.mv),)
else:
return MemoryviewHolder, (self.mv.tobytes(),)
mv = memoryview(b"123")
mvh = MemoryviewHolder(mv)
if HIGHEST_PROTOCOL >= 5:
l = []
d = dumps(mvh, buffer_callback=l.append)
mvh2 = loads(d, buffers=l)
assert len(l) == 1
assert isinstance(l[0], PickleBuffer)
assert memoryview(l[0]) == mv
else:
mvh2 = loads(dumps(mvh))
assert isinstance(mvh2, MemoryviewHolder)
assert isinstance(mvh2.mv, memoryview)
assert mvh2.mv == mv
h, f = serialize(mvh, serializers=("pickle",))
mvh3 = deserialize(h, f)
assert isinstance(mvh3, MemoryviewHolder)
assert isinstance(mvh3.mv, memoryview)
assert mvh3.mv == mv
if HIGHEST_PROTOCOL >= 5:
assert len(f) == 2
assert isinstance(f[0], bytes)
assert isinstance(f[1], memoryview)
assert f[1] == mv
else:
assert len(f) == 1
assert isinstance(f[0], bytes)
def test_pickle_numpy():
np = pytest.importorskip("numpy")
x = np.ones(5)
assert (loads(dumps(x)) == x).all()
assert (deserialize(*serialize(x, serializers=("pickle",))) == x).all()
x = np.ones(5000)
assert (loads(dumps(x)) == x).all()
assert (deserialize(*serialize(x, serializers=("pickle",))) == x).all()
if HIGHEST_PROTOCOL >= 5:
x = np.ones(5000)
l = []
d = dumps(x, buffer_callback=l.append)
assert len(l) == 1
assert isinstance(l[0], PickleBuffer)
assert memoryview(l[0]) == memoryview(x)
assert (loads(d, buffers=l) == x).all()
h, f = serialize(x, serializers=("pickle",))
assert len(f) == 2
assert isinstance(f[0], bytes)
assert isinstance(f[1], memoryview)
assert (deserialize(h, f) == x).all()
@pytest.mark.xfail(
sys.version_info[:2] == (3, 8),
reason="Sporadic failure on Python 3.8",
strict=False,
)
def test_pickle_functions():
def make_closure():
value = 1
def f(x): # closure
return x + value
return f
def funcs():
yield make_closure()
yield (lambda x: x + 1)
yield partial(add, 1)
for func in funcs():
wr = weakref.ref(func)
func2 = loads(dumps(func))
wr2 = weakref.ref(func2)
assert func2(1) == func(1)
func3 = deserialize(*serialize(func, serializers=("pickle",)))
wr3 = weakref.ref(func3)
assert func3(1) == func(1)
del func, func2, func3
gc.collect()
assert wr() is None
assert wr2() is None
assert wr3() is None
| {
"repo_name": "blaze/distributed",
"path": "distributed/protocol/tests/test_pickle.py",
"copies": "1",
"size": "3414",
"license": "bsd-3-clause",
"hash": 387636780172350100,
"line_mean": 24.8636363636,
"line_max": 75,
"alpha_frac": 0.5779144698,
"autogenerated": false,
"ratio": 3.6012658227848102,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.467918029258481,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import gc
import time
import sys
from py4j.java_gateway import JavaGateway, CallbackServerParameters
ITERATIONS_FOR_LENGTHY_METHOD = 3
class ComparablePython(object):
def __init__(self, value):
self.value = value
def compareTo(self, obj):
if obj is None:
# Hack to return the value of this object.
return self.value
value = obj.compareTo(None)
return self.value - value
class Java:
implements = ["java.lang.Comparable"]
def callStaticMethodNoParam(iterations, staticMethod):
i = 0
result = 0
while i < iterations:
result = staticMethod()
i += 1
# Make sure that the last result is returned so Python does not discard the
# output value.
return result
def callInstanceMethodWithShortParam(iterations, instanceMethod):
shortParam = "Super Long Param"
i = 0
while i < iterations:
instanceMethod(shortParam)
instanceMethod(1)
i += 1
def callFunc(iterations, func):
i = 0
result = None
while i < iterations:
result = func()
i += 1
return result
def benchmark(name, func):
start = time.time()
func()
stop = time.time()
print("{0} - {1}".format(stop - start, name))
gc.collect()
def main(iterations):
small_iterations = iterations / 10 if iterations > 10 else iterations
gateway = JavaGateway(
callback_server_parameters=CallbackServerParameters())
currentTimeMillis = gateway.jvm.java.lang.System.currentTimeMillis
sb = gateway.jvm.java.lang.StringBuilder()
append = sb.append
sb2 = gateway.jvm.java.lang.StringBuilder()
def reflection():
sb2.append(2)
sb2.append("hello")
def constructorAndMemoryManagement():
sb = gateway.jvm.java.lang.StringBuilder("Hello World")
sb.append("testing")
def javaCollection():
al = gateway.jvm.java.util.ArrayList()
al.append("test")
al.append(1)
al.append(True)
len(al)
result = []
for elem in al:
result.append(elem)
return result
def callBack():
al = gateway.jvm.java.util.ArrayList()
cp10 = ComparablePython(10)
cp1 = ComparablePython(1)
cp5 = ComparablePython(5)
cp7 = ComparablePython(7)
al.append(cp10)
al.append(cp1)
al.append(cp5)
al.append(cp7)
gateway.jvm.java.util.Collections.sort(al)
def longParamCall():
longParam = "s" * 1024 * 1024 * 10
sb = gateway.jvm.java.lang.StringBuilder()
sb.append(longParam)
sb.toString()
benchmark(
"callStaticMethodNoParam",
partial(callStaticMethodNoParam, iterations, currentTimeMillis))
benchmark(
"callInstanceMethodWithShortParam",
partial(callInstanceMethodWithShortParam, iterations, append))
benchmark(
"callWithReflection",
partial(callFunc, iterations, reflection))
benchmark(
"constructorAndMemoryManagement",
partial(callFunc, iterations, constructorAndMemoryManagement))
benchmark(
"longParamAndMemoryManagement",
partial(callFunc, ITERATIONS_FOR_LENGTHY_METHOD, longParamCall))
benchmark(
"javaCollection",
partial(callFunc, small_iterations, javaCollection))
benchmark(
"callBack",
partial(callFunc, small_iterations, callBack))
gateway.shutdown()
if __name__ == "__main__":
# 1. Run py4j-java, e.g.,
# cd py4j-java; ./gradlew testsJar;
# java -Xmx4096m -cp build/libs/py4j-tests-0.10.0.jar \
# py4j.example.ExampleApplication
# 2. Run python program:
# cd py4j-python; export PYTHONPATH=src
# python3 src/py4j/tests/benchmark1.py
iterations = 100000
if len(sys.argv) > 1:
iterations = int(sys.argv[1])
main(iterations)
| {
"repo_name": "nadav-har-tzvi/amaterasu",
"path": "executor/src/test/resources/py4j/tests/benchmark1.py",
"copies": "4",
"size": "3946",
"license": "apache-2.0",
"hash": 5024822875718897000,
"line_mean": 26.0273972603,
"line_max": 79,
"alpha_frac": 0.6315255955,
"autogenerated": false,
"ratio": 3.880039331366765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 146
} |
from functools import partial
import gevent
from gevent import monkey
from gevent.pool import Pool
monkey.patch_all(thread=False, select=False)
class AsyncInflux(object):
def __init__(self, method, infdb, **kwargs):
self.infdb = infdb
self.method = method
self.kwargs = kwargs
self.response = None
self.exception = None
def send(self, *kwargs):
merged_kwargs = {}
merged_kwargs.update(self.kwargs)
merged_kwargs.update(kwargs)
if self.method == "query":
series = merged_kwargs.get("series")
querystring = merged_kwargs.get("query")
try:
if not series:
self.response = self.infdb.query(querystring)
else:
self.response = safe_influx_query(self.infdb, querystring, series)
except Exception as e:
self.exception = e
elif self.method == "write":
points = merged_kwargs.get("points")
try:
self.infdb.write_points(points)
self.response = True
except Exception as e:
self.exception = e
return self
def send(r, pool=None):
if pool is not None:
return pool.spawn(r.send)
return gevent.spawn(r.send)
write_point = partial(AsyncInflux, 'write')
query = partial(AsyncInflux, 'query')
def map_influx_results(requests, size=None, exception_handler=None):
requests = list(requests)
pool = Pool(size) if size else None
jobs = [send(r, pool) for r in requests]
gevent.joinall(jobs)
def check_series_exists(infdb, series):
series_select = "list series /^%s$/" % series
series_exists = infdb.query(series_select)[0]['points']
if len(series_exists) > 0:
return True
return False
def safe_influx_query(infdb, select_string, series):
result = [{"columns": [], "points": []}]
if check_series_exists(infdb, series):
result = infdb.query(select_string)
return result
| {
"repo_name": "Ombitron/async-influx",
"path": "ginflux.py",
"copies": "1",
"size": "2055",
"license": "bsd-3-clause",
"hash": 4260940107667650000,
"line_mean": 25.6883116883,
"line_max": 86,
"alpha_frac": 0.597080292,
"autogenerated": false,
"ratio": 3.8994307400379506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9995764651981226,
"avg_score": 0.00014927601134497685,
"num_lines": 77
} |
from functools import partial
import glob
import json
import logging
import os
import hashlib
import mimetypes
import pprint
import requests
from daf_fruit_dist.checksums import Checksums
from daf_fruit_dist.file_management import get_file_digests
_HEADER_USER_AGENT = 'User-Agent'
_HEADER_MD5_CHECKSUM = 'X-Checksum-Md5'
_HEADER_SHA1_CHECKSUM = 'X-Checksum-Sha1'
_HEADER_CONTENT_TYPE = 'Content-Type'
_HEADER_CONTENT_ENCODING = 'Content-Encoding'
_CONTENT_TYPE_PROMOTION_REQUEST = (
'application/vnd.org.jfrog.artifactory.build.PromotionRequest+json')
_CONTENT_TYPE_PUBLISH_BUILD_INFO = (
'application/vnd.org.jfrog.artifactory+json')
def deploy_file(
repo_base_url,
repo_push_id,
path,
filename,
attributes=None,
username=None,
password=None,
verify_cert=True):
"""
Deploy the file to the /path/ directory at the given URL. A
dictionary (or pre-formatted string) of attributes may also be
supplied.
"""
def store_hashes_in_headers(headers):
md5, sha1 = get_file_digests(
filename,
digests=(hashlib.md5(), hashlib.sha1()))
headers[_HEADER_MD5_CHECKSUM] = md5.hexdigest()
headers[_HEADER_SHA1_CHECKSUM] = sha1.hexdigest()
def store_mimetypes_in_headers(headers):
content_type, content_enc = mimetypes.guess_type(filename)
if content_type:
headers[_HEADER_CONTENT_TYPE] = content_type
if content_enc:
headers[_HEADER_CONTENT_ENCODING] = content_enc
def generate_uri():
basename = os.path.basename(filename)
norm_path = _normalize_path(path)
uri = '{url}/{repo_push_id}/{path}/{basename}'.format(
url=repo_base_url,
repo_push_id=repo_push_id,
path=norm_path,
basename=basename)
if attributes:
if isinstance(attributes, dict):
uri += ';' + ';'.join(
'{}={}'.format(k, v) for k, v in attributes.iteritems())
elif isinstance(attributes, basestring):
uri += ';' + attributes
else:
raise TypeError(
'"attributes" must be either a dictionary or a pre-'
'formatted string of "key1=value1;key2=value2" pairs')
return uri
def upload_file(deploy_uri):
logging.info('Deploying: ' + deploy_uri)
auth = (username, password) if (username or password) else None
with open(filename, 'rb') as f:
response = requests.put(
deploy_uri,
data=f,
auth=auth,
headers=headers,
verify=verify_cert)
_log_response(response)
headers = _make_headers()
store_hashes_in_headers(headers)
store_mimetypes_in_headers(headers)
upload_file(generate_uri())
def deploy_globbed_files(
repo_base_url,
repo_push_id,
path,
glob_patterns,
attributes=None,
username=None,
password=None,
verify_cert=True):
"""
Like deploy_file, except this function takes a list of globbing
patterns. All files (NOT directories) matched by these patterns are
deployed to the server.
"""
logging.debug("Entering deploy_globbed_files with:")
logging.debug(" repo_base_url: {}".format(repo_base_url))
logging.debug(" repo_push_id: {}".format(repo_push_id))
logging.debug(" path: {}".format(path))
logging.debug(" glob_patterns: {}".format(glob_patterns))
# Create a version of deploy_file() with every field filled out
# except for filename.
deploy = partial(
deploy_file,
repo_base_url=repo_base_url,
repo_push_id=repo_push_id,
path=path,
attributes=attributes,
username=username,
password=password,
verify_cert=verify_cert)
# Set of all files being uploaded. Note that a set is being used
# here instead of a list so that files matched by more than one
# globbing pattern are only uploaded once.
filenames = set()
for pattern in glob_patterns:
filenames.update(filter(os.path.isfile, glob.glob(pattern)))
logging.debug("Found filenames: {}".format(", ".join(filenames)))
for f in filenames:
deploy(filename=f)
return filenames
def build_promote(
username,
password,
repo_base_url,
build_name,
build_number,
promotion_request,
verify_cert=True):
uri = '{url}/api/build/promote/{build_name}/{build_number}'.format(
url=repo_base_url,
build_name=build_name,
build_number=build_number)
json_data = promotion_request.as_json_data
json_to_put_on_wire = json.dumps(json_data, sort_keys=True)
auth = _make_auth(username, password)
headers = _make_headers()
headers[_HEADER_CONTENT_TYPE] = _CONTENT_TYPE_PROMOTION_REQUEST
put_req = requests.post(
uri,
data=json_to_put_on_wire,
headers=headers,
auth=auth,
verify=verify_cert)
_log_response(put_req)
put_req.raise_for_status()
response_json = put_req.json()
return response_json
def publish_build_info(
username,
password,
repo_base_url,
build_info,
verify_cert=True):
json_data = build_info.as_json_data
json_to_put_on_wire = json.dumps(json_data, sort_keys=True)
uri = '{url}/api/build'.format(url=repo_base_url)
auth = _make_auth(username, password)
headers = _make_headers()
headers[_HEADER_CONTENT_TYPE] = _CONTENT_TYPE_PUBLISH_BUILD_INFO
put_req = requests.put(
uri,
data=json_to_put_on_wire,
headers=headers,
auth=auth,
verify=verify_cert)
_log_response(response=put_req)
put_req.raise_for_status()
def determine_checksums(
username,
password,
repo_base_url,
repo_pull_id,
file_path,
verify_cert=True):
uri = '{url}/api/storage/{repo_pull_id}/{file_path}'.format(
url=repo_base_url,
repo_pull_id=repo_pull_id,
file_path=file_path)
auth = _make_auth(username, password)
get_response = requests.get(
uri,
headers=_make_headers(),
auth=auth,
verify=verify_cert)
get_response.raise_for_status()
response_json = get_response.json()
if 'checksums' in response_json:
checksum_data = response_json['checksums']
md5 = checksum_data.get('md5', None)
sha1 = checksum_data.get('sha1', None)
else:
raise RuntimeError(
"Artifact found in Artifactory but no checksums were available.")
return Checksums(sha1=sha1, md5=md5)
def _normalize_path(path):
return path.strip('/')
def _make_auth(username=None, password=None):
return (username, password) if (username or password) else None
def _make_headers():
return {_HEADER_USER_AGENT: 'FruitDist/1.0'}
def _log_response(response):
_log_data_structure('response_headers', response.headers)
try:
_log_data_structure('response_json', response.json())
except StandardError:
response_text = getattr(response, 'text', None)
if response_text:
logging.debug('response_text: {}'.format(response_text))
def _log_data_structure(title, data_structure):
unindented = pprint.pformat(data_structure)
shifted = '\n '.join(unindented.splitlines())
log_msg = '{}:\n {}'.format(title, shifted)
logging.debug(log_msg)
| {
"repo_name": "teamfruit/defend_against_fruit",
"path": "defend_against_fruit/daf_fruit_dist/daf_fruit_dist/artifactory/artifactory_rest.py",
"copies": "1",
"size": "7632",
"license": "apache-2.0",
"hash": 3571977514060902400,
"line_mean": 26.5523465704,
"line_max": 77,
"alpha_frac": 0.6126834382,
"autogenerated": false,
"ratio": 3.722926829268293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9835610267468293,
"avg_score": 0,
"num_lines": 277
} |
from functools import partial
import gym
from gym.spaces import Box, Dict, Discrete
import numpy as np
import unittest
import ray
from ray.rllib.models import ActionDistribution, ModelCatalog, MODEL_DEFAULTS
from ray.rllib.models.preprocessors import NoPreprocessor, Preprocessor
from ray.rllib.models.tf.tf_action_dist import MultiActionDistribution, \
TFActionDistribution
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import framework_iterator
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class CustomPreprocessor(Preprocessor):
def _init_shape(self, obs_space, options):
return [1]
class CustomPreprocessor2(Preprocessor):
def _init_shape(self, obs_space, options):
return [1]
class CustomModel(TFModelV2):
def _build_layers(self, *args):
return tf.constant([[0] * 5]), None
class CustomActionDistribution(TFActionDistribution):
def __init__(self, inputs, model):
# Store our output shape.
custom_model_config = model.model_config["custom_model_config"]
if "output_dim" in custom_model_config:
self.output_shape = tf.concat(
[tf.shape(inputs)[:1], custom_model_config["output_dim"]],
axis=0)
else:
self.output_shape = tf.shape(inputs)
super().__init__(inputs, model)
@staticmethod
def required_model_output_shape(action_space, model_config=None):
custom_model_config = model_config["custom_model_config"] or {}
if custom_model_config is not None and \
custom_model_config.get("output_dim"):
return custom_model_config.get("output_dim")
return action_space.shape
@override(TFActionDistribution)
def _build_sample_op(self):
return tf.random.uniform(self.output_shape)
@override(ActionDistribution)
def logp(self, x):
return tf.zeros(self.output_shape)
class CustomMultiActionDistribution(MultiActionDistribution):
@override(MultiActionDistribution)
def entropy(self):
raise NotImplementedError
class TestModelCatalog(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def test_custom_preprocessor(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
ModelCatalog.register_custom_preprocessor("foo", CustomPreprocessor)
ModelCatalog.register_custom_preprocessor("bar", CustomPreprocessor2)
env = gym.make("CartPole-v0")
p1 = ModelCatalog.get_preprocessor(env, {"custom_preprocessor": "foo"})
self.assertEqual(str(type(p1)), str(CustomPreprocessor))
p2 = ModelCatalog.get_preprocessor(env, {"custom_preprocessor": "bar"})
self.assertEqual(str(type(p2)), str(CustomPreprocessor2))
p3 = ModelCatalog.get_preprocessor(env)
self.assertEqual(type(p3), NoPreprocessor)
def test_default_models(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
for fw in framework_iterator(frameworks=("jax", "tf", "tf2", "torch")):
obs_space = Box(0, 1, shape=(3, ), dtype=np.float32)
p1 = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=Discrete(5),
num_outputs=5,
model_config={},
framework=fw,
)
self.assertTrue("FullyConnectedNetwork" in type(p1).__name__)
# Do a test forward pass.
obs = np.array([obs_space.sample()])
if fw == "torch":
obs = torch.from_numpy(obs)
out, state_outs = p1({"obs": obs})
self.assertTrue(out.shape == (1, 5))
self.assertTrue(state_outs == [])
# No Conv2Ds for JAX yet.
if fw != "jax":
p2 = ModelCatalog.get_model_v2(
obs_space=Box(0, 1, shape=(84, 84, 3), dtype=np.float32),
action_space=Discrete(5),
num_outputs=5,
model_config={},
framework=fw,
)
self.assertTrue("VisionNetwork" in type(p2).__name__)
def test_custom_model(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
ModelCatalog.register_custom_model("foo", CustomModel)
p1 = ModelCatalog.get_model_v2(
obs_space=Box(0, 1, shape=(3, ), dtype=np.float32),
action_space=Discrete(5),
num_outputs=5,
model_config={"custom_model": "foo"})
self.assertEqual(str(type(p1)), str(CustomModel))
def test_custom_action_distribution(self):
class Model():
pass
ray.init(
object_store_memory=1000 * 1024 * 1024,
ignore_reinit_error=True) # otherwise fails sometimes locally
# registration
ModelCatalog.register_custom_action_dist("test",
CustomActionDistribution)
action_space = Box(0, 1, shape=(5, 3), dtype=np.float32)
# test retrieving it
model_config = MODEL_DEFAULTS.copy()
model_config["custom_action_dist"] = "test"
dist_cls, param_shape = ModelCatalog.get_action_dist(
action_space, model_config)
self.assertEqual(str(dist_cls), str(CustomActionDistribution))
self.assertEqual(param_shape, action_space.shape)
# test the class works as a distribution
dist_input = tf1.placeholder(tf.float32, (None, ) + param_shape)
model = Model()
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertEqual(dist.sample().shape[1:], dist_input.shape[1:])
self.assertIsInstance(dist.sample(), tf.Tensor)
with self.assertRaises(NotImplementedError):
dist.entropy()
# test passing the options to it
model_config["custom_model_config"].update({"output_dim": (3, )})
dist_cls, param_shape = ModelCatalog.get_action_dist(
action_space, model_config)
self.assertEqual(param_shape, (3, ))
dist_input = tf1.placeholder(tf.float32, (None, ) + param_shape)
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertEqual(dist.sample().shape[1:], dist_input.shape[1:])
self.assertIsInstance(dist.sample(), tf.Tensor)
with self.assertRaises(NotImplementedError):
dist.entropy()
def test_custom_multi_action_distribution(self):
class Model():
pass
ray.init(
object_store_memory=1000 * 1024 * 1024,
ignore_reinit_error=True) # otherwise fails sometimes locally
# registration
ModelCatalog.register_custom_action_dist(
"test", CustomMultiActionDistribution)
s1 = Discrete(5)
s2 = Box(0, 1, shape=(3, ), dtype=np.float32)
spaces = dict(action_1=s1, action_2=s2)
action_space = Dict(spaces)
# test retrieving it
model_config = MODEL_DEFAULTS.copy()
model_config["custom_action_dist"] = "test"
dist_cls, param_shape = ModelCatalog.get_action_dist(
action_space, model_config)
self.assertIsInstance(dist_cls, partial)
self.assertEqual(param_shape, s1.n + 2 * s2.shape[0])
# test the class works as a distribution
dist_input = tf1.placeholder(tf.float32, (None, param_shape))
model = Model()
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertIsInstance(dist.sample(), dict)
self.assertIn("action_1", dist.sample())
self.assertIn("action_2", dist.sample())
self.assertEqual(dist.sample()["action_1"].dtype, tf.int64)
self.assertEqual(dist.sample()["action_2"].shape[1:], s2.shape)
with self.assertRaises(NotImplementedError):
dist.entropy()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_name": "ray-project/ray",
"path": "rllib/tests/test_catalog.py",
"copies": "1",
"size": "8187",
"license": "apache-2.0",
"hash": 2653137008839018500,
"line_mean": 37.6179245283,
"line_max": 79,
"alpha_frac": 0.616098693,
"autogenerated": false,
"ratio": 3.8545197740112993,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4970618467011299,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import gym
import logging
import numpy as np
import tree
from typing import List, Optional, Type, Union
from ray.tune.registry import RLLIB_MODEL, RLLIB_PREPROCESSOR, \
RLLIB_ACTION_DIST, _global_registry
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.preprocessors import get_preprocessor, Preprocessor
from ray.rllib.models.tf.recurrent_net import LSTMWrapper
from ray.rllib.models.tf.tf_action_dist import Categorical, \
Deterministic, DiagGaussian, Dirichlet, \
MultiActionDistribution, MultiCategorical
from ray.rllib.models.torch.torch_action_dist import TorchCategorical, \
TorchDeterministic, TorchDiagGaussian, \
TorchMultiActionDistribution, TorchMultiCategorical
from ray.rllib.utils.annotations import DeveloperAPI, PublicAPI
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.spaces.space_utils import flatten_space
from ray.rllib.utils.typing import ModelConfigDict, TensorType
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
MODEL_DEFAULTS: ModelConfigDict = {
# === Built-in options ===
# Number of hidden layers for fully connected net
"fcnet_hiddens": [256, 256],
# Nonlinearity for fully connected net (tanh, relu)
"fcnet_activation": "tanh",
# Filter config. List of [out_channels, kernel, stride] for each filter
"conv_filters": None,
# Nonlinearity for built-in convnet
"conv_activation": "relu",
# For DiagGaussian action distributions, make the second half of the model
# outputs floating bias variables instead of state-dependent. This only
# has an effect is using the default fully connected net.
"free_log_std": False,
# Whether to skip the final linear layer used to resize the hidden layer
# outputs to size `num_outputs`. If True, then the last hidden layer
# should already match num_outputs.
"no_final_linear": False,
# Whether layers should be shared for the value function.
"vf_share_layers": True,
# == LSTM ==
# Whether to wrap the model with an LSTM.
"use_lstm": False,
# Max seq len for training the LSTM, defaults to 20.
"max_seq_len": 20,
# Size of the LSTM cell.
"lstm_cell_size": 256,
# Whether to feed a_{t-1} to LSTM (one-hot encoded if discrete).
"lstm_use_prev_action": False,
# Whether to feed r_{t-1} to LSTM.
"lstm_use_prev_reward": False,
# Experimental (only works with `_use_trajectory_view_api`=True):
# Whether the LSTM is time-major (TxBx..) or batch-major (BxTx..).
"_time_major": False,
# == Atari ==
# Whether to enable framestack for Atari envs
"framestack": True,
# Final resized frame dimension
"dim": 84,
# (deprecated) Converts ATARI frame to 1 Channel Grayscale image
"grayscale": False,
# (deprecated) Changes frame to range from [-1, 1] if true
"zero_mean": True,
# === Options for custom models ===
# Name of a custom model to use
"custom_model": None,
# Extra options to pass to the custom classes. These will be available to
# the Model's constructor in the model_config field. Also, they will be
# attempted to be passed as **kwargs to ModelV2 models. For an example,
# see rllib/models/[tf|torch]/attention_net.py.
"custom_model_config": {},
# Name of a custom action distribution to use.
"custom_action_dist": None,
# Custom preprocessors are deprecated. Please use a wrapper class around
# your environment instead to preprocess observations.
"custom_preprocessor": None,
# Deprecated keys:
# Use `lstm_use_prev_action` or `lstm_use_prev_reward` instead.
"lstm_use_prev_action_reward": DEPRECATED_VALUE,
}
# __sphinx_doc_end__
# yapf: enable
@PublicAPI
class ModelCatalog:
"""Registry of models, preprocessors, and action distributions for envs.
Examples:
>>> prep = ModelCatalog.get_preprocessor(env)
>>> observation = prep.transform(raw_observation)
>>> dist_class, dist_dim = ModelCatalog.get_action_dist(
... env.action_space, {})
>>> model = ModelCatalog.get_model_v2(
... obs_space, action_space, num_outputs, options)
>>> dist = dist_class(model.outputs, model)
>>> action = dist.sample()
"""
@staticmethod
@DeveloperAPI
def get_action_dist(
action_space: gym.Space,
config: ModelConfigDict,
dist_type: Optional[Union[str, Type[ActionDistribution]]] = None,
framework: str = "tf",
**kwargs) -> (type, int):
"""Returns a distribution class and size for the given action space.
Args:
action_space (Space): Action space of the target gym env.
config (Optional[dict]): Optional model config.
dist_type (Optional[Union[str, Type[ActionDistribution]]]):
Identifier of the action distribution (str) interpreted as a
hint or the actual ActionDistribution class to use.
framework (str): One of "tf", "tfe", or "torch".
kwargs (dict): Optional kwargs to pass on to the Distribution's
constructor.
Returns:
Tuple:
- dist_class (ActionDistribution): Python class of the
distribution.
- dist_dim (int): The size of the input vector to the
distribution.
"""
dist_cls = None
config = config or MODEL_DEFAULTS
# Custom distribution given.
if config.get("custom_action_dist"):
action_dist_name = config["custom_action_dist"]
logger.debug(
"Using custom action distribution {}".format(action_dist_name))
dist_cls = _global_registry.get(RLLIB_ACTION_DIST,
action_dist_name)
dist_cls = ModelCatalog._get_multi_action_distribution(
dist_cls, action_space, {}, framework)
# Dist_type is given directly as a class.
elif type(dist_type) is type and \
issubclass(dist_type, ActionDistribution) and \
dist_type not in (
MultiActionDistribution, TorchMultiActionDistribution):
dist_cls = dist_type
# Box space -> DiagGaussian OR Deterministic.
elif isinstance(action_space, gym.spaces.Box):
if len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space has multiple dimensions "
"{}. ".format(action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a custom action distribution, "
"using a Tuple action space, or the multi-agent API.")
# TODO(sven): Check for bounds and return SquashedNormal, etc..
if dist_type is None:
dist_cls = TorchDiagGaussian if framework == "torch" \
else DiagGaussian
elif dist_type == "deterministic":
dist_cls = TorchDeterministic if framework == "torch" \
else Deterministic
# Discrete Space -> Categorical.
elif isinstance(action_space, gym.spaces.Discrete):
dist_cls = (TorchCategorical
if framework == "torch" else Categorical)
# Tuple/Dict Spaces -> MultiAction.
elif dist_type in (MultiActionDistribution,
TorchMultiActionDistribution) or \
isinstance(action_space, (gym.spaces.Tuple, gym.spaces.Dict)):
return ModelCatalog._get_multi_action_distribution(
(MultiActionDistribution
if framework == "tf" else TorchMultiActionDistribution),
action_space, config, framework)
# Simplex -> Dirichlet.
elif isinstance(action_space, Simplex):
if framework == "torch":
# TODO(sven): implement
raise NotImplementedError(
"Simplex action spaces not supported for torch.")
dist_cls = Dirichlet
# MultiDiscrete -> MultiCategorical.
elif isinstance(action_space, gym.spaces.MultiDiscrete):
dist_cls = TorchMultiCategorical if framework == "torch" else \
MultiCategorical
return partial(dist_cls, input_lens=action_space.nvec), \
int(sum(action_space.nvec))
# Unknown type -> Error.
else:
raise NotImplementedError("Unsupported args: {} {}".format(
action_space, dist_type))
return dist_cls, dist_cls.required_model_output_shape(
action_space, config)
@staticmethod
@DeveloperAPI
def get_action_shape(action_space: gym.Space) -> (np.dtype, List[int]):
"""Returns action tensor dtype and shape for the action space.
Args:
action_space (Space): Action space of the target gym env.
Returns:
(dtype, shape): Dtype and shape of the actions tensor.
"""
if isinstance(action_space, gym.spaces.Discrete):
return (action_space.dtype, (None, ))
elif isinstance(action_space, (gym.spaces.Box, Simplex)):
return (tf.float32, (None, ) + action_space.shape)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
return (tf.as_dtype(action_space.dtype),
(None, ) + action_space.shape)
elif isinstance(action_space, (gym.spaces.Tuple, gym.spaces.Dict)):
flat_action_space = flatten_space(action_space)
size = 0
all_discrete = True
for i in range(len(flat_action_space)):
if isinstance(flat_action_space[i], gym.spaces.Discrete):
size += 1
else:
all_discrete = False
size += np.product(flat_action_space[i].shape)
size = int(size)
return (tf.int64 if all_discrete else tf.float32, (None, size))
else:
raise NotImplementedError(
"Action space {} not supported".format(action_space))
@staticmethod
@DeveloperAPI
def get_action_placeholder(action_space: gym.Space,
name: str = "action") -> TensorType:
"""Returns an action placeholder consistent with the action space
Args:
action_space (Space): Action space of the target gym env.
name (str): An optional string to name the placeholder by.
Default: "action".
Returns:
action_placeholder (Tensor): A placeholder for the actions
"""
dtype, shape = ModelCatalog.get_action_shape(action_space)
return tf1.placeholder(dtype, shape=shape, name=name)
@staticmethod
@DeveloperAPI
def get_model_v2(obs_space: gym.Space,
action_space: gym.Space,
num_outputs: int,
model_config: ModelConfigDict,
framework: str = "tf",
name: str = "default_model",
model_interface: type = None,
default_model: type = None,
**model_kwargs) -> ModelV2:
"""Returns a suitable model compatible with given spaces and output.
Args:
obs_space (Space): Observation space of the target gym env. This
may have an `original_space` attribute that specifies how to
unflatten the tensor into a ragged tensor.
action_space (Space): Action space of the target gym env.
num_outputs (int): The size of the output vector of the model.
framework (str): One of "tf", "tfe", or "torch".
name (str): Name (scope) for the model.
model_interface (cls): Interface required for the model
default_model (cls): Override the default class for the model. This
only has an effect when not using a custom model
model_kwargs (dict): args to pass to the ModelV2 constructor
Returns:
model (ModelV2): Model to use for the policy.
"""
if model_config.get("custom_model"):
# Allow model kwargs to be overridden / augmented by
# custom_model_config.
customized_model_kwargs = dict(
model_kwargs, **model_config.get("custom_model_config", {}))
if isinstance(model_config["custom_model"], type):
model_cls = model_config["custom_model"]
else:
model_cls = _global_registry.get(RLLIB_MODEL,
model_config["custom_model"])
if not issubclass(model_cls, ModelV2):
raise ValueError(
"`model_cls` must be a ModelV2 sub-class, but is"
" {}!".format(model_cls))
logger.info("Wrapping {} as {}".format(model_cls, model_interface))
model_cls = ModelCatalog._wrap_if_needed(model_cls,
model_interface)
if framework in ["tf2", "tf", "tfe"]:
# Track and warn if vars were created but not registered.
created = set()
def track_var_creation(next_creator, **kw):
v = next_creator(**kw)
created.add(v)
return v
with tf.variable_creator_scope(track_var_creation):
# Try calling with kwargs first (custom ModelV2 should
# accept these as kwargs, not get them from
# config["custom_model_config"] anymore).
try:
instance = model_cls(obs_space, action_space,
num_outputs, model_config, name,
**customized_model_kwargs)
except TypeError as e:
# Keyword error: Try old way w/o kwargs.
if "__init__() got an unexpected " in e.args[0]:
instance = model_cls(obs_space, action_space,
num_outputs, model_config,
name, **model_kwargs)
logger.warning(
"Custom ModelV2 should accept all custom "
"options as **kwargs, instead of expecting"
" them in config['custom_model_config']!")
# Other error -> re-raise.
else:
raise e
registered = set(instance.variables())
not_registered = set()
for var in created:
if var not in registered:
not_registered.add(var)
if not_registered:
raise ValueError(
"It looks like variables {} were created as part "
"of {} but does not appear in model.variables() "
"({}). Did you forget to call "
"model.register_variables() on the variables in "
"question?".format(not_registered, instance,
registered))
else:
# PyTorch automatically tracks nn.Modules inside the parent
# nn.Module's constructor.
# Try calling with kwargs first (custom ModelV2 should
# accept these as kwargs, not get them from
# config["custom_model_config"] anymore).
try:
instance = model_cls(obs_space, action_space, num_outputs,
model_config, name,
**customized_model_kwargs)
except TypeError as e:
# Keyword error: Try old way w/o kwargs.
if "__init__() got an unexpected " in e.args[0]:
instance = model_cls(obs_space, action_space,
num_outputs, model_config, name,
**model_kwargs)
logger.warning(
"Custom ModelV2 should accept all custom "
"options as **kwargs, instead of expecting"
" them in config['custom_model_config']!")
# Other error -> re-raise.
else:
raise e
return instance
if framework in ["tf", "tfe", "tf2"]:
v2_class = None
# Try to get a default v2 model.
if not model_config.get("custom_model"):
v2_class = default_model or ModelCatalog._get_v2_model_class(
obs_space, model_config, framework=framework)
if not v2_class:
raise ValueError("ModelV2 class could not be determined!")
if model_config.get("use_lstm"):
wrapped_cls = v2_class
forward = wrapped_cls.forward
v2_class = ModelCatalog._wrap_if_needed(
wrapped_cls, LSTMWrapper)
v2_class._wrapped_forward = forward
# Wrap in the requested interface.
wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
return wrapper(obs_space, action_space, num_outputs, model_config,
name, **model_kwargs)
elif framework == "torch":
v2_class = \
default_model or ModelCatalog._get_v2_model_class(
obs_space, model_config, framework=framework)
if model_config.get("use_lstm"):
from ray.rllib.models.torch.recurrent_net import LSTMWrapper \
as TorchLSTMWrapper
wrapped_cls = v2_class
forward = wrapped_cls.forward
v2_class = ModelCatalog._wrap_if_needed(
wrapped_cls, TorchLSTMWrapper)
v2_class._wrapped_forward = forward
# Wrap in the requested interface.
wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
return wrapper(obs_space, action_space, num_outputs, model_config,
name, **model_kwargs)
else:
raise NotImplementedError(
"`framework` must be 'tf2|tf|tfe|torch', but is "
"{}!".format(framework))
@staticmethod
@DeveloperAPI
def get_preprocessor(env: gym.Env,
options: Optional[dict] = None) -> Preprocessor:
"""Returns a suitable preprocessor for the given env.
This is a wrapper for get_preprocessor_for_space().
"""
return ModelCatalog.get_preprocessor_for_space(env.observation_space,
options)
@staticmethod
@DeveloperAPI
def get_preprocessor_for_space(observation_space: gym.Space,
options: dict = None) -> Preprocessor:
"""Returns a suitable preprocessor for the given observation space.
Args:
observation_space (Space): The input observation space.
options (dict): Options to pass to the preprocessor.
Returns:
preprocessor (Preprocessor): Preprocessor for the observations.
"""
options = options or MODEL_DEFAULTS
for k in options.keys():
if k not in MODEL_DEFAULTS:
raise Exception("Unknown config key `{}`, all keys: {}".format(
k, list(MODEL_DEFAULTS)))
if options.get("custom_preprocessor"):
preprocessor = options["custom_preprocessor"]
logger.info("Using custom preprocessor {}".format(preprocessor))
logger.warning(
"DeprecationWarning: Custom preprocessors are deprecated, "
"since they sometimes conflict with the built-in "
"preprocessors for handling complex observation spaces. "
"Please use wrapper classes around your environment "
"instead of preprocessors.")
prep = _global_registry.get(RLLIB_PREPROCESSOR, preprocessor)(
observation_space, options)
else:
cls = get_preprocessor(observation_space)
prep = cls(observation_space, options)
logger.debug("Created preprocessor {}: {} -> {}".format(
prep, observation_space, prep.shape))
return prep
@staticmethod
@PublicAPI
def register_custom_preprocessor(preprocessor_name: str,
preprocessor_class: type) -> None:
"""Register a custom preprocessor class by name.
The preprocessor can be later used by specifying
{"custom_preprocessor": preprocesor_name} in the model config.
Args:
preprocessor_name (str): Name to register the preprocessor under.
preprocessor_class (type): Python class of the preprocessor.
"""
_global_registry.register(RLLIB_PREPROCESSOR, preprocessor_name,
preprocessor_class)
@staticmethod
@PublicAPI
def register_custom_model(model_name: str, model_class: type) -> None:
"""Register a custom model class by name.
The model can be later used by specifying {"custom_model": model_name}
in the model config.
Args:
model_name (str): Name to register the model under.
model_class (type): Python class of the model.
"""
_global_registry.register(RLLIB_MODEL, model_name, model_class)
@staticmethod
@PublicAPI
def register_custom_action_dist(action_dist_name: str,
action_dist_class: type) -> None:
"""Register a custom action distribution class by name.
The model can be later used by specifying
{"custom_action_dist": action_dist_name} in the model config.
Args:
model_name (str): Name to register the action distribution under.
model_class (type): Python class of the action distribution.
"""
_global_registry.register(RLLIB_ACTION_DIST, action_dist_name,
action_dist_class)
@staticmethod
def _wrap_if_needed(model_cls: type, model_interface: type) -> type:
assert issubclass(model_cls, ModelV2), model_cls
if not model_interface or issubclass(model_cls, model_interface):
return model_cls
class wrapper(model_interface, model_cls):
pass
name = "{}_as_{}".format(model_cls.__name__, model_interface.__name__)
wrapper.__name__ = name
wrapper.__qualname__ = name
return wrapper
@staticmethod
def _get_v2_model_class(input_space: gym.Space,
model_config: ModelConfigDict,
framework: str = "tf") -> ModelV2:
if framework == "torch":
from ray.rllib.models.torch.fcnet import (FullyConnectedNetwork as
FCNet)
from ray.rllib.models.torch.visionnet import (VisionNetwork as
VisionNet)
else:
from ray.rllib.models.tf.fcnet import \
FullyConnectedNetwork as FCNet
from ray.rllib.models.tf.visionnet import \
VisionNetwork as VisionNet
# Discrete/1D obs-spaces.
if isinstance(input_space, gym.spaces.Discrete) or \
len(input_space.shape) <= 2:
return FCNet
# Default Conv2D net.
else:
return VisionNet
@staticmethod
def _get_multi_action_distribution(dist_class, action_space, config,
framework):
# In case the custom distribution is a child of MultiActionDistr.
# If users want to completely ignore the suggested child
# distributions, they should simply do so in their custom class'
# constructor.
if issubclass(dist_class,
(MultiActionDistribution, TorchMultiActionDistribution)):
flat_action_space = flatten_space(action_space)
child_dists_and_in_lens = tree.map_structure(
lambda s: ModelCatalog.get_action_dist(
s, config, framework=framework), flat_action_space)
child_dists = [e[0] for e in child_dists_and_in_lens]
input_lens = [int(e[1]) for e in child_dists_and_in_lens]
return partial(
dist_class,
action_space=action_space,
child_distributions=child_dists,
input_lens=input_lens), int(sum(input_lens))
return dist_class
| {
"repo_name": "richardliaw/ray",
"path": "rllib/models/catalog.py",
"copies": "1",
"size": "25640",
"license": "apache-2.0",
"hash": -1916023029952308700,
"line_mean": 42.904109589,
"line_max": 79,
"alpha_frac": 0.5667316693,
"autogenerated": false,
"ratio": 4.58840372226199,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5655135391561991,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import hashlib
import struct
import spindrift.mysql._compat as compat
def byte2int(b):
if isinstance(b, int):
return b
else:
return struct.unpack("!B", b)[0]
def int2byte(i):
return struct.pack("!B", i)
def join_bytes(bs):
if len(bs) == 0:
return ""
else:
rv = bs[0]
for b in bs[1:]:
rv += b
return rv
def pack_int24(n):
return struct.pack('<I', n)[:3]
def scramble(password, message):
SCRAMBLE_LENGTH = 20
sha_new = partial(hashlib.new, 'sha1')
if not password:
return b''
stage1 = sha_new(password).digest()
stage2 = sha_new(stage1).digest()
s = sha_new()
s.update(message[:SCRAMBLE_LENGTH])
s.update(stage2)
result = s.digest()
return _crypt(result, stage1)
def _crypt(message1, message2):
length = len(message1)
result = b''
for i in compat.range_type(length):
x = (struct.unpack('B', message1[i:i+1])[0] ^
struct.unpack('B', message2[i:i+1])[0])
result += struct.pack('B', x)
return result
def lenenc_int(i):
if (i < 0):
raise ValueError("Encoding %d is less than 0 - no representation in LengthEncodedInteger" % i)
elif (i < 0xfb):
return int2byte(i)
elif (i < (1 << 16)):
return b'\xfc' + struct.pack('<H', i)
elif (i < (1 << 24)):
return b'\xfd' + struct.pack('<I', i)[:3]
elif (i < (1 << 64)):
return b'\xfe' + struct.pack('<Q', i)
else:
raise ValueError("Encoding %x is larger than %x - no representation in LengthEncodedInteger" % (i, (1 << 64)))
| {
"repo_name": "robertchase/spindrift",
"path": "spindrift/mysql/util.py",
"copies": "1",
"size": "1653",
"license": "mit",
"hash": -3053013468097634000,
"line_mean": 22.6142857143,
"line_max": 118,
"alpha_frac": 0.5638233515,
"autogenerated": false,
"ratio": 3.1972920696324953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9259821699760451,
"avg_score": 0.00025874427440879266,
"num_lines": 70
} |
from functools import partial
import hashlib
class CaseInsensitiveDict(dict):
def __init__(self, obj = None, **kwargs):
if obj is not None:
if isinstance(obj, dict):
for k, v in obj.items():
self[k] = v
else:
for k, v in obj:
self[k] = v
for k, v in kwargs.items():
self[k] = v
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def __delitem__(self, key):
return super(CaseInsensitiveDict, self).__delitem__(key.lower())
def __contains__(self, key):
return super(CaseInsensitiveDict, self).__contains__(key.lower())
def pop(self, key):
return super(CaseInsensitiveDict, self).pop(key.lower())
class memoize(object):
"""
Memoize the return parameters of a function.
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
def checksum(s):
"""
Calculate the checksum of a string.
Should eventually support files too.
We use MD5 because S3 does.
"""
return hashlib.md5(s).hexdigest()
def get_or_prompt(config, key, prompt_fn, *args, **kwargs):
"""
:param config: The configuration object to get the value from
:param key: The configuration key to retrieve
:type key: str
:param prompt_fn: The prompt function to use to prompt the value
:param args: Extra arguments for the prompt function
:param kwargs: Extra keyword arguments for hte prompt function
"""
value = config.get(key)
if value is None:
value = prompt_fn(*args, **kwargs)
config.set(key, value)
return value | {
"repo_name": "fjxhkj/Cactus",
"path": "cactus/utils/helpers.py",
"copies": "9",
"size": "2312",
"license": "bsd-3-clause",
"hash": -1826707449982062800,
"line_mean": 26.8674698795,
"line_max": 73,
"alpha_frac": 0.5709342561,
"autogenerated": false,
"ratio": 4.099290780141844,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9170225036241844,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import html
import json
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.utils.encoding import force_str
from django.utils.encoding import smart_str
from django.utils.formats import number_format
from django.utils.functional import Promise
from django.utils.html import strip_tags
from django.utils.translation import gettext_lazy as _
def field_as_string(obj, field, ascii=False):
value = getattr(obj, field + '_csv_display', None)
if value is None:
value = getattr(obj, field + '_display', None)
if value is None:
value = getattr(obj, field)
if isinstance(value, bool):
value = (_('no'), _('yes'))[value]
if isinstance(value, float) or isinstance(value, int):
value = number_format(value)
if isinstance(value, list) or isinstance(value, QuerySet):
value = ", ".join([str(val) for val in value])
return smart_plain_text(value, ascii)
def plain_text(html_content):
return html.unescape(strip_tags(html_content))
def smart_plain_text(s, ascii=False):
if s is None:
return ''
try:
# Converts to unicode, remove HTML tags, convert HTML entities
us = plain_text(str(s))
if ascii:
return smart_str(us)
return us
except UnicodeDecodeError:
return smart_str(s)
class DjangoJSONEncoder(DjangoJSONEncoder):
"""
Taken (slightly modified) from:
http://stackoverflow.com/questions/2249792/json-serializing-django-models-with-simplejson
"""
def default(self, obj):
# https://docs.djangoproject.com/en/dev/topics/serialization/#id2
if isinstance(obj, Promise):
return force_str(obj)
if isinstance(obj, QuerySet):
# `default` must return a python serializable
# structure, the easiest way is to load the JSON
# string produced by `serialize` and return it
return json.loads(serialize('json', obj))
return force_str(obj)
# partial function, we can now use dumps(my_dict) instead
# of dumps(my_dict, cls=DjangoJSONEncoder)
json_django_dumps = partial(json.dumps, cls=DjangoJSONEncoder)
| {
"repo_name": "GeotrekCE/Geotrek-admin",
"path": "mapentity/serializers/helpers.py",
"copies": "2",
"size": "2311",
"license": "bsd-2-clause",
"hash": 501738902651263400,
"line_mean": 32.9852941176,
"line_max": 93,
"alpha_frac": 0.6724361748,
"autogenerated": false,
"ratio": 4.012152777777778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5684588952577777,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import importlib
import inspect
import pkgutil
def get_all_corpora():
"""Returns all corpus classes defined in the corpus package."""
from emLam.corpus.corpus_base import Corpus
return get_all_classes(Corpus)
def get_all_preprocessors():
"""Returns all preprocessor classes defined in the corpus package."""
from emLam.corpus.preprocessor_base import Preprocessor
return get_all_classes(Preprocessor)
def get_all_classes(ancestor):
"""
Returns all classes of a specific type defined in the corpus package. In
order to support configuration that follows the class hierarchy, the data
is returned as {component_name: component_class, [path_to_ancestor]}.
"""
def is_mod_class(mod, cls):
return inspect.isclass(cls) and inspect.getmodule(cls) == mod
curr_module = importlib.import_module(__name__)
classes = {}
for _, module_name, _ in pkgutil.iter_modules(curr_module.__path__):
module = importlib.import_module(__name__ + '.' + module_name)
for _, cls in inspect.getmembers(module, partial(is_mod_class, module)):
# Only take 'named' classes, i.e. leaves in the class tree
if issubclass(cls, ancestor) and cls.name:
path = inspect.getmro(cls)
path = path[:path.index(ancestor) + 1]
path = [c.name or c.__name__ for c in path][::-1]
classes[cls.name] = cls, path
return classes
| {
"repo_name": "DavidNemeskey/emLam",
"path": "emLam/corpus/__init__.py",
"copies": "2",
"size": "1489",
"license": "mit",
"hash": -4270226376510091000,
"line_mean": 37.1794871795,
"line_max": 80,
"alpha_frac": 0.6581598388,
"autogenerated": false,
"ratio": 4.0572207084468666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00031655587211142766,
"num_lines": 39
} |
from functools import partial
import inspect
from multiprocessing import Process, Event, Pipe
from collections import deque
import asyncio
from threading import Thread
class Worker(Process):
"""
Starts a new process and inits an instance of proxy_type.
Calls methods and returns results communicated over the message_pipe
"""
def __init__(self, message_pipe, close_event, is_closed_event, proxy_type, proxy_args, proxy_kwargs):
super().__init__()
self.message_pipe = message_pipe
self.close_event = close_event
self.is_closed_event = is_closed_event
self.proxy_type = proxy_type
self.proxy_args = proxy_args
self.proxy_kwargs = proxy_kwargs
def run(self):
try:
proxied_obj = self.proxy_type(*self.proxy_args, **self.proxy_kwargs)
self.message_pipe.send(True)
except Exception as e:
self.message_pipe.send(e)
while not self.close_event.is_set():
if not self.message_pipe.poll(timeout=1):
continue
# Need to check if close_event is set to prevent broken message pipe errors
if self.close_event.is_set():
return self.close()
func_name, args, kwargs = self.message_pipe.recv()
try:
if func_name == '__getattr__':
result = getattr(proxied_obj, args[0])
elif func_name == '__setattr__':
result = setattr(proxied_obj, args[0], args[1])
else:
result = getattr(proxied_obj, func_name)(*args, **kwargs)
self.message_pipe.send(result)
except Exception as e:
self.message_pipe.send(e)
return self.close()
def close(self):
self.is_closed_event.set()
class ManagerThread(Thread):
"""
Polls the message_pipe waiting for messages from the worker and sets the result of futures when received.
"""
def __init__(self, message_pipe, future_deque, close_event, event_loop):
super().__init__()
self.message_pipe = message_pipe
self.future_deque = future_deque
self.close_event = close_event
self.event_loop = event_loop
def run(self):
while not self.close_event.is_set():
if not self.message_pipe.poll(timeout=1):
continue
result = self.message_pipe.recv()
future = self.future_deque.popleft()
if isinstance(result, Exception):
if self.event_loop.is_running():
self.event_loop.call_soon_threadsafe(future.set_exception, result)
else:
future.set_exception(result)
else:
if self.event_loop.is_running():
self.event_loop.call_soon_threadsafe(future.set_result, result)
else:
future.set_result(result)
class Manager:
"""
Creates a Worker and sends tasks to it. Returns and sets futures for the task results.
When called in the asyncio event_loop, polls Worker results with ManagerThread.
"""
def __init__(self, proxy_type, args, kwargs, instance_future, event_loop=None):
self.message_pipe, worker_pipe = Pipe()
self.future_deque = deque()
self.future_deque.append(instance_future)
self.close_event = Event()
self.worker_closed_event = Event()
self.event_loop = event_loop if event_loop is not None else asyncio.get_event_loop()
self.thread = ManagerThread(self.message_pipe, self.future_deque, self.close_event, self.event_loop)
self.worker = Worker(worker_pipe, self.close_event, self.worker_closed_event, proxy_type, args, kwargs)
self.worker.start()
self.thread.start()
def run_async(self, name, *args, **kwargs):
future = ProcessFuture()
try:
self.message_pipe.send([name, args, kwargs])
self.future_deque.append(future)
except Exception as e:
future.set_exception(e)
return future
def close(self, wait=False):
self.close_event.set()
if wait:
self.worker_closed_event.wait()
def __del__(self):
self.close()
@property
def is_closing(self):
return self.close_event.is_set()
@property
def is_closed(self):
return self.worker_closed_event.is_set()
class ProcessFuture(asyncio.Future):
"""
Future whose result is computed from another process and can't be cancelled.
"""
def cancel(self):
raise RuntimeError("ProcessFuture instances cannot be cancelled.")
class ProcessInterface(ProcessFuture):
"""
Interface to proxy_type(*args, **kwargs) running in another process.
Only an interface to the object's methods, can't set or get member variables.
"""
_fields = {'proxy_type', 'method_names', '_manager', '_loop', '_callbacks', '_result', '_state', '_log_traceback',
'_exception'}
def __init__(self, proxy_type, *args, event_loop=None, **kwargs):
super().__init__()
self.method_names = set(self.iter_method_names(proxy_type))
self.proxy_type = proxy_type
self._manager = Manager(proxy_type, args, kwargs, self, event_loop=event_loop)
@classmethod
def iter_method_names(cls, proxy_type):
func_or_method = lambda x: inspect.isfunction(x) or inspect.ismethod(x)
for name, member in inspect.getmembers(proxy_type, predicate=func_or_method):
yield name
def __getattr__(self, name):
if name in self.method_names:
return partial(self._manager.run_async, name)
return self._manager.run_async('__getattr__', name)
def __setattr__(self, name, value):
if name in self._fields:
super().__setattr__(name, value)
return
return self._manager.run_async('__setattr__', name, value)
def set_result(self, result):
super().set_result(self)
def close(self, wait=False):
return self._manager.close(wait)
def __repr__(self):
return "{}({})<{}>".format(self.__class__.__name__, repr(self.proxy_type), self._state)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@property
def is_closing(self):
return self._manager.is_closing
@property
def is_closed(self):
return self._manager.is_closed
class ProcessMixin(object):
"""
Mixin that provides a spawn classmethod.
Spawned objects run in another process, and spawn returns an interface to that object.
"""
@classmethod
def spawn(cls, *args, event_loop=None, **kwargs):
return ProcessInterface(cls, *args, event_loop=event_loop, **kwargs) | {
"repo_name": "dustyrockpyle/mpworker",
"path": "mpworker/__init__.py",
"copies": "1",
"size": "6859",
"license": "mit",
"hash": 948675471580182100,
"line_mean": 34.1794871795,
"line_max": 118,
"alpha_frac": 0.6028575594,
"autogenerated": false,
"ratio": 4.046607669616519,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149465229016519,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import inspect
import logging
import sys
import re
from django.utils import six
from silk.profiling.profiler import silk_profile
Logger = logging.getLogger('silk.profiling.dynamic')
def _get_module(module_name):
"""
Given a module name in form 'path.to.module' return module object for 'module'.
"""
if '.' in module_name:
splt = module_name.split('.')
imp = '.'.join(splt[:-1])
frm = splt[-1]
module = __import__(imp, globals(), locals(), [frm], 0)
module = getattr(module, frm)
else:
module = __import__(module_name, globals(), locals(), [], 0)
return module
def _get_func(module, func_name):
"""
Given a module and a function name, return the function.
func_name can be of the forms:
- 'foo': return a function
- 'Class.foo': return a method
"""
cls_name = None
cls = None
if '.' in func_name:
cls_name, func_name = func_name.split('.')
if cls_name:
cls = getattr(module, cls_name)
func = getattr(cls, func_name)
else:
func = getattr(module, func_name)
return cls, func
def profile_function_or_method(module, func, name=None):
"""
Programmatically apply a decorator to a function in a given module [+ class]
@param module: module object or module name in form 'path.to.module'
@param func: function object or function name in form 'foo' or 'Class.method'
"""
if isinstance(module, six.string_types) or isinstance(module, six.text_type):
module = _get_module(module)
decorator = silk_profile(name, _dynamic=True)
func_name = func
cls, func = _get_func(module, func_name)
wrapped_target = decorator(func)
if cls:
setattr(cls, func_name.split('.')[-1], wrapped_target)
else:
setattr(module, func_name, wrapped_target)
def _get_parent_module(module):
parent = sys.modules
splt = module.__name__.split('.')
if len(splt) > 1:
for module_name in splt[:-1]:
try:
parent = getattr(parent, module_name)
except AttributeError:
parent = parent[module_name]
return parent
def _get_context_manager_source(end_line, file_path, name, start_line):
inject_code = "with silk_profile('%s', _dynamic=True):\n" % name
code = 'from silk.profiling.profiler import silk_profile\n'
with open(file_path, 'r') as f:
ws = ''
for i, line in enumerate(f):
if i == start_line:
# Use the same amount of whitespace as the line currently occupying
x = re.search(r"^(\s+).*$", line)
try:
ws = x.groups()[0]
except IndexError:
ws = ''
code += ws + inject_code
code += ws + ' ' + line
elif start_line < i <= end_line:
code += ws + ' ' + line
else:
code += line
return code
def _get_ws(txt):
"""
Return whitespace at the beginning of a string
"""
m = re.search(r"^(\s+).*$", txt)
try:
fws = m.groups()[0]
except AttributeError:
fws = ''
return fws
def _get_source_lines(func):
source = inspect.getsourcelines(func)[0]
fws = _get_ws(source[0])
for i in range(0, len(source)):
source[i] = source[i].replace(fws, '', 1)
return source
def _new_func_from_source(source, func):
"""
Create new function defined in source but maintain context from func
@param func: The function whose global + local context we will use
@param source: Python source code containing def statement
"""
src_str = ''.join(source)
frames = inspect.getouterframes(inspect.currentframe())
calling_frame = frames[2][0]
context = {}
# My initial instict was: exec src_str in func.func_globals.items(), calling_frame.f_locals
# however this seems to break the function closure so caveat here is that we create a new
# function with the locals merged into the globals.
#
# Possible consequences I can think of:
# - If a global exists that already has the same name as the local, it will be overwritten in
# in the context of this function. This shouldnt matter though as the global should have already
# been hidden by the new name?
#
# This functionality should be considered experimental as no idea what other consequences there
# could be.
#
# relevant: http://stackoverflow.com/questions/2749655/why-are-closures-broken-within-exec
globals = six.get_function_globals(func)
locals = calling_frame.f_locals
combined = globals.copy()
combined.update(locals)
Logger.debug('New src_str:\n %s' % src_str)
six.exec_(src_str, combined, context)
return context[func.__name__]
def _inject_context_manager_func(func, start_line, end_line, name):
"""
injects a context manager into the given function
e.g given:
x = 5
def foo():
print x
print '1'
print '2'
print '3'
inject_context_manager_func(foo, 0, 2, 'cm')
foo will now have the definition:
def foo():
with silk_profile('cm'):
print x
print '1'
print '2'
print '3'
closures, globals & locals are honoured
@param func: object of type<function> or type<instancemethod>
@param start_line: line at which to inject 'with' statement. line num. is relative to the func, not the module.
@param end_line: line at which to exit the context
@param name: name of the profiler
"""
source = _get_source_lines(func)
start_line += 1
end_line += 1
ws = _get_ws(source[start_line])
for i in range(start_line, end_line):
try:
source[i] = ' ' + source[i]
except IndexError:
raise IndexError('Function %s does not have line %d' % (func.__name__, i))
source.insert(start_line, ws + "from silk.profiling.profiler import silk_profile\n")
source.insert(start_line + 1, ws + "with silk_profile('%s', _dynamic=True):\n" % name)
return _new_func_from_source(source, func)
def is_str_typ(o):
return any(map(partial(isinstance, o), six.string_types)) \
or isinstance(o, six.text_type)
def inject_context_manager_func(module, func, start_line, end_line, name):
if is_str_typ(module):
module = _get_module(module)
cls = None
if is_str_typ(func):
func_name = func
cls, func = _get_func(module, func_name)
else:
func_name = func.__name__
new_func = _inject_context_manager_func(func, start_line, end_line, name)
if cls:
setattr(cls, func_name, new_func)
else:
setattr(module, func_name, new_func)
| {
"repo_name": "crunchr/silk",
"path": "silk/profiling/dynamic.py",
"copies": "3",
"size": "6905",
"license": "mit",
"hash": 1596864540213770000,
"line_mean": 30.5296803653,
"line_max": 115,
"alpha_frac": 0.5989862419,
"autogenerated": false,
"ratio": 3.70042872454448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007488148180389279,
"num_lines": 219
} |
from functools import partial
import inspect
import logging
import sys
import re
from silk.profiling.profiler import silk_profile
Logger = logging.getLogger('silk.profiling.dynamic')
def _get_module(module_name):
"""
Given a module name in form 'path.to.module' return module object for 'module'.
"""
if '.' in module_name:
splt = module_name.split('.')
imp = '.'.join(splt[:-1])
frm = splt[-1]
module = __import__(imp, globals(), locals(), [frm], 0)
module = getattr(module, frm)
else:
module = __import__(module_name, globals(), locals(), [], 0)
return module
def _get_func(module, func_name):
"""
Given a module and a function name, return the function.
func_name can be of the forms:
- 'foo': return a function
- 'Class.foo': return a method
"""
cls_name = None
cls = None
if '.' in func_name:
cls_name, func_name = func_name.split('.')
if cls_name:
cls = getattr(module, cls_name)
func = getattr(cls, func_name)
else:
func = getattr(module, func_name)
return cls, func
def profile_function_or_method(module, func, name=None):
"""
Programmatically apply a decorator to a function in a given module [+ class]
@param module: module object or module name in form 'path.to.module'
@param func: function object or function name in form 'foo' or 'Class.method'
"""
if isinstance(module, str):
module = _get_module(module)
decorator = silk_profile(name, _dynamic=True)
func_name = func
cls, func = _get_func(module, func_name)
wrapped_target = decorator(func)
if cls:
setattr(cls, func_name.split('.')[-1], wrapped_target)
else:
setattr(module, func_name, wrapped_target)
def _get_parent_module(module):
parent = sys.modules
splt = module.__name__.split('.')
if len(splt) > 1:
for module_name in splt[:-1]:
try:
parent = getattr(parent, module_name)
except AttributeError:
parent = parent[module_name]
return parent
def _get_context_manager_source(end_line, file_path, name, start_line):
inject_code = "with silk_profile('%s', _dynamic=True):\n" % name
code = 'from silk.profiling.profiler import silk_profile\n'
with open(file_path, 'r') as f:
ws = ''
for i, line in enumerate(f):
if i == start_line:
# Use the same amount of whitespace as the line currently occupying
x = re.search(r"^(\s+).*$", line)
try:
ws = x.groups()[0]
except IndexError:
ws = ''
code += ws + inject_code
code += ws + ' ' + line
elif start_line < i <= end_line:
code += ws + ' ' + line
else:
code += line
return code
def _get_ws(txt):
"""
Return whitespace at the beginning of a string
"""
m = re.search(r"^(\s+).*$", txt)
try:
fws = m.groups()[0]
except AttributeError:
fws = ''
return fws
def _get_source_lines(func):
source = inspect.getsourcelines(func)[0]
fws = _get_ws(source[0])
for i in range(0, len(source)):
source[i] = source[i].replace(fws, '', 1)
return source
def _new_func_from_source(source, func):
"""
Create new function defined in source but maintain context from func
@param func: The function whose global + local context we will use
@param source: Python source code containing def statement
"""
src_str = ''.join(source)
frames = inspect.getouterframes(inspect.currentframe())
calling_frame = frames[2][0]
context = {}
# My initial instict was: exec src_str in func.func_globals.items(), calling_frame.f_locals
# however this seems to break the function closure so caveat here is that we create a new
# function with the locals merged into the globals.
#
# Possible consequences I can think of:
# - If a global exists that already has the same name as the local, it will be overwritten in
# in the context of this function. This shouldnt matter though as the global should have already
# been hidden by the new name?
#
# This functionality should be considered experimental as no idea what other consequences there
# could be.
#
# relevant: http://stackoverflow.com/questions/2749655/why-are-closures-broken-within-exec
globals = func.__globals__
locals = calling_frame.f_locals
combined = globals.copy()
combined.update(locals)
Logger.debug('New src_str:\n %s' % src_str)
exec(src_str, combined, context)
return context[func.__name__]
def _inject_context_manager_func(func, start_line, end_line, name):
"""
injects a context manager into the given function
e.g given:
x = 5
def foo():
print x
print '1'
print '2'
print '3'
inject_context_manager_func(foo, 0, 2, 'cm')
foo will now have the definition:
def foo():
with silk_profile('cm'):
print x
print '1'
print '2'
print '3'
closures, globals & locals are honoured
@param func: object of type<function> or type<instancemethod>
@param start_line: line at which to inject 'with' statement. line num. is relative to the func, not the module.
@param end_line: line at which to exit the context
@param name: name of the profiler
"""
source = _get_source_lines(func)
start_line += 1
end_line += 1
ws = _get_ws(source[start_line])
for i in range(start_line, end_line):
try:
source[i] = ' ' + source[i]
except IndexError:
raise IndexError('Function %s does not have line %d' % (func.__name__, i))
source.insert(start_line, ws + "from silk.profiling.profiler import silk_profile\n")
source.insert(start_line + 1, ws + "with silk_profile('%s', _dynamic=True):\n" % name)
return _new_func_from_source(source, func)
def is_str_typ(o):
return isinstance(o, str)
def inject_context_manager_func(module, func, start_line, end_line, name):
if is_str_typ(module):
module = _get_module(module)
cls = None
if is_str_typ(func):
func_name = func
cls, func = _get_func(module, func_name)
else:
func_name = func.__name__
new_func = _inject_context_manager_func(func, start_line, end_line, name)
if cls:
setattr(cls, func_name, new_func)
else:
setattr(module, func_name, new_func)
| {
"repo_name": "jazzband/silk",
"path": "silk/profiling/dynamic.py",
"copies": "1",
"size": "6732",
"license": "mit",
"hash": -3945707038645118000,
"line_mean": 30.1666666667,
"line_max": 115,
"alpha_frac": 0.5952168746,
"autogenerated": false,
"ratio": 3.7152317880794703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48104486626794707,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import inspect
import logging
import sys
import re
from silk.utils import six
from silk.profiling.profiler import silk_profile
Logger = logging.getLogger('silk')
def _get_module(module_name):
"""
Given a module name in form 'path.to.module' return module object for 'module'.
"""
if '.' in module_name:
splt = module_name.split('.')
imp = '.'.join(splt[:-1])
frm = splt[-1]
module = __import__(imp, globals(), locals(), [frm], 0)
module = getattr(module, frm)
else:
module = __import__(module_name, globals(), locals(), [], 0)
return module
def _get_func(module, func_name):
"""
Given a module and a function name, return the function.
func_name can be of the forms:
- 'foo': return a function
- 'Class.foo': return a method
"""
cls_name = None
cls = None
if '.' in func_name:
cls_name, func_name = func_name.split('.')
if cls_name:
cls = getattr(module, cls_name)
func = getattr(cls, func_name)
else:
func = getattr(module, func_name)
return cls, func
def profile_function_or_method(module, func, name=None):
"""
Programmatically apply a decorator to a function in a given module [+ class]
@param module: module object or module name in form 'path.to.module'
@param func: function object or function name in form 'foo' or 'Class.method'
"""
if type(module) is str or type(module) is unicode:
module = _get_module(module)
decorator = silk_profile(name, _dynamic=True)
func_name = func
cls, func = _get_func(module, func_name)
wrapped_target = decorator(func)
if cls:
setattr(cls, func_name.split('.')[-1], wrapped_target)
else:
setattr(module, func_name, wrapped_target)
def _get_parent_module(module):
parent = sys.modules
splt = module.__name__.split('.')
if len(splt) > 1:
for module_name in splt[:-1]:
try:
parent = getattr(parent, module_name)
except AttributeError:
parent = parent[module_name]
return parent
def _get_context_manager_source(end_line, file_path, name, start_line):
inject_code = "with silk_profile('%s', _dynamic=True):\n" % name
code = 'from silk.profiling.profiler import silk_profile\n'
with open(file_path, 'r') as f:
ws = ''
for i, line in enumerate(f):
if i == start_line:
# Use the same amount of whitespace as the line currently occupying
x = re.search(r"^(\s+).*$", line)
try:
ws = x.groups()[0]
except IndexError:
ws = ''
code += ws + inject_code
code += ws + ' ' + line
elif start_line < i <= end_line:
code += ws + ' ' + line
else:
code += line
return code
def _get_ws(txt):
"""
Return whitespace at the beginning of a string
"""
m = re.search(r"^(\s+).*$", txt)
try:
fws = m.groups()[0]
except AttributeError:
fws = ''
return fws
def _get_source_lines(func):
source = inspect.getsourcelines(func)[0]
fws = _get_ws(source[0])
for i in range(0, len(source)):
source[i] = source[i].replace(fws, '', 1)
return source
def _new_func_from_source(source, func):
"""
Create new function defined in source but maintain context from func
@param func: The function whose global + local context we will use
@param source: Python source code containing def statement
"""
src_str = ''.join(source)
frames = inspect.getouterframes(inspect.currentframe())
calling_frame = frames[2][0]
context = {}
# My initial instict was: exec src_str in func.func_globals.items(), calling_frame.f_locals
# however this seems to break the function closure so caveat here is that we create a new
# function with the locals merged into the globals.
#
# Possible consequences I can think of:
# - If a global exists that already has the same name as the local, it will be overwritten in
# in the context of this function. This shouldnt matter though as the global should have already
# been hidden by the new name?
#
# This functionality should be considered experimental as no idea what other consequences there
# could be.
#
# relevant: http://stackoverflow.com/questions/2749655/why-are-closures-broken-within-exec
globals = six.get_function_globals(func)
locals = calling_frame.f_locals
combined = globals.copy()
combined.update(locals)
Logger.debug('New src_str:\n %s' % src_str)
six.exec_(src_str, combined, context)
new_func = context[func.__name__]
return new_func
def _inject_context_manager_func(func, start_line, end_line, name):
"""
injects a context manager into the given function
e.g given:
x = 5
def foo():
print x
print '1'
print '2'
print '3'
inject_context_manager_func(foo, 0, 2, 'cm')
foo will now have the definition:
def foo():
with silk_profile('cm'):
print x
print '1'
print '2'
print '3'
closures, globals & locals are honoured
@param func: object of type<function> or type<instancemethod>
@param start_line: line at which to inject 'with' statement. line num. is relative to the func, not the module.
@param end_line: line at which to exit the context
@param name: name of the profiler
"""
source = _get_source_lines(func)
start_line += 1
end_line += 1
ws = _get_ws(source[start_line])
for i in range(start_line, end_line):
try:
source[i] = ' ' + source[i]
except IndexError:
raise IndexError('Function %s does not have line %d' % (func.__name__, i))
source.insert(start_line, ws + "from silk.profiling.profiler import silk_profile\n")
source.insert(start_line + 1, ws + "with silk_profile('%s', _dynamic=True):\n" % name)
return _new_func_from_source(source, func)
def is_str_typ(o):
return any(map(partial(isinstance, o), six.string_types)) \
or isinstance(o, six.text_type)
def inject_context_manager_func(module, func, start_line, end_line, name):
if is_str_typ(module):
module = _get_module(module)
cls = None
if is_str_typ(func):
func_name = func
cls, func = _get_func(module, func_name)
else:
func_name = func.__name__
new_func = _inject_context_manager_func(func, start_line, end_line, name)
if cls:
setattr(cls, func_name, new_func)
else:
setattr(module, func_name, new_func)
| {
"repo_name": "Alkalit/silk",
"path": "silk/profiling/dynamic.py",
"copies": "1",
"size": "6882",
"license": "mit",
"hash": -5570775661581381000,
"line_mean": 30.2818181818,
"line_max": 115,
"alpha_frac": 0.5970648067,
"autogenerated": false,
"ratio": 3.6940418679549114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9787656780945837,
"avg_score": 0.0006899787418150259,
"num_lines": 220
} |
from functools import partial
import inspect
import logging
import sys
import re
import six
from silk.profiling.profiler import silk_profile
Logger = logging.getLogger('silk')
def _get_module(module_name):
"""
Given a module name in form 'path.to.module' return module object for 'module'.
"""
if '.' in module_name:
splt = module_name.split('.')
imp = '.'.join(splt[:-1])
frm = splt[-1]
module = __import__(imp, globals(), locals(), [frm], 0)
module = getattr(module, frm)
else:
module = __import__(module_name, globals(), locals(), [], 0)
return module
def _get_func(module, func_name):
"""
Given a module and a function name, return the function.
func_name can be of the forms:
- 'foo': return a function
- 'Class.foo': return a method
"""
cls_name = None
cls = None
if '.' in func_name:
cls_name, func_name = func_name.split('.')
if cls_name:
cls = getattr(module, cls_name)
func = getattr(cls, func_name)
else:
func = getattr(module, func_name)
return cls, func
def profile_function_or_method(module, func, name=None):
"""
Programmatically apply a decorator to a function in a given module [+ class]
@param module: module object or module name in form 'path.to.module'
@param func: function object or function name in form 'foo' or 'Class.method'
"""
if type(module) is str or type(module) is unicode:
module = _get_module(module)
decorator = silk_profile(name, _dynamic=True)
func_name = func
cls, func = _get_func(module, func_name)
wrapped_target = decorator(func)
if cls:
setattr(cls, func_name.split('.')[-1], wrapped_target)
else:
setattr(module, func_name, wrapped_target)
def _get_parent_module(module):
parent = sys.modules
splt = module.__name__.split('.')
if len(splt) > 1:
for module_name in splt[:-1]:
try:
parent = getattr(parent, module_name)
except AttributeError:
parent = parent[module_name]
return parent
def _get_context_manager_source(end_line, file_path, name, start_line):
inject_code = "with silk_profile('%s', _dynamic=True):\n" % name
code = 'from silk.profiling.profiler import silk_profile\n'
with open(file_path, 'r') as f:
ws = ''
for i, line in enumerate(f):
if i == start_line:
# Use the same amount of whitespace as the line currently occupying
x = re.search(r"^(\s+).*$", line)
try:
ws = x.groups()[0]
except IndexError:
ws = ''
code += ws + inject_code
code += ws + ' ' + line
elif start_line < i <= end_line:
code += ws + ' ' + line
else:
code += line
return code
def _get_ws(txt):
"""
Return whitespace at the beginning of a string
"""
m = re.search(r"^(\s+).*$", txt)
try:
fws = m.groups()[0]
except AttributeError:
fws = ''
return fws
def _get_source_lines(func):
source = inspect.getsourcelines(func)[0]
fws = _get_ws(source[0])
for i in range(0, len(source)):
source[i] = source[i].replace(fws, '', 1)
return source
def _new_func_from_source(source, func):
"""
Create new function defined in source but maintain context from func
@param func: The function whose global + local context we will use
@param source: Python source code containing def statement
"""
src_str = ''.join(source)
frames = inspect.getouterframes(inspect.currentframe())
calling_frame = frames[2][0]
context = {}
# My initial instict was: exec src_str in func.func_globals.items(), calling_frame.f_locals
# however this seems to break the function closure so caveat here is that we create a new
# function with the locals merged into the globals.
#
# Possible consequences I can think of:
# - If a global exists that already has the same name as the local, it will be overwritten in
# in the context of this function. This shouldnt matter though as the global should have already
# been hidden by the new name?
#
# This functionality should be considered experimental as no idea what other consequences there
# could be.
#
# relevant: http://stackoverflow.com/questions/2749655/why-are-closures-broken-within-exec
globals = six.get_function_globals(func)
locals = calling_frame.f_locals
combined = globals.copy()
combined.update(locals)
Logger.debug('New src_str:\n %s' % src_str)
six.exec_(src_str, combined, context)
new_func = context[func.__name__]
return new_func
def _inject_context_manager_func(func, start_line, end_line, name):
"""
injects a context manager into the given function
e.g given:
x = 5
def foo():
print x
print '1'
print '2'
print '3'
inject_context_manager_func(foo, 0, 2, 'cm')
foo will now have the definition:
def foo():
with silk_profile('cm'):
print x
print '1'
print '2'
print '3'
closures, globals & locals are honoured
@param func: object of type<function> or type<instancemethod>
@param start_line: line at which to inject 'with' statement. line num. is relative to the func, not the module.
@param end_line: line at which to exit the context
@param name: name of the profiler
"""
source = _get_source_lines(func)
start_line += 1
end_line += 1
ws = _get_ws(source[start_line])
for i in range(start_line, end_line):
try:
source[i] = ' ' + source[i]
except IndexError:
raise IndexError('Function %s does not have line %d' % (func.__name__, i))
source.insert(start_line, ws + "from silk.profiling.profiler import silk_profile\n")
source.insert(start_line + 1, ws + "with silk_profile('%s', _dynamic=True):\n" % name)
return _new_func_from_source(source, func)
def is_str_typ(o):
return any(map(partial(isinstance, o), six.string_types)) \
or isinstance(o, six.text_type)
def inject_context_manager_func(module, func, start_line, end_line, name):
if is_str_typ(module):
module = _get_module(module)
cls = None
if is_str_typ(func):
func_name = func
cls, func = _get_func(module, func_name)
else:
func_name = func.__name__
new_func = _inject_context_manager_func(func, start_line, end_line, name)
if cls:
setattr(cls, func_name, new_func)
else:
setattr(module, func_name, new_func)
| {
"repo_name": "rosscdh/silk",
"path": "django_silky/silk/profiling/dynamic.py",
"copies": "8",
"size": "6869",
"license": "mit",
"hash": 8137319883238759000,
"line_mean": 29.802690583,
"line_max": 115,
"alpha_frac": 0.5963022274,
"autogenerated": false,
"ratio": 3.6969860064585576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8293288233858557,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import inspect
from nose.tools import ( # noqa
assert_almost_equal,
assert_almost_equals,
assert_dict_contains_subset,
assert_false,
assert_greater,
assert_greater_equal,
assert_in,
assert_is,
assert_is_instance,
assert_is_none,
assert_is_not,
assert_is_not_none,
assert_less,
assert_less_equal,
assert_multi_line_equal,
assert_not_almost_equal,
assert_not_almost_equals,
assert_not_equal,
assert_not_equals,
assert_not_in,
assert_not_is_instance,
assert_raises,
assert_raises_regexp,
assert_regexp_matches,
assert_sequence_equal,
assert_set_equal,
assert_true,
assert_tuple_equal,
)
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from six import iteritems, viewkeys, PY2
from toolz import dissoc, keyfilter
import toolz.curried.operator as op
from zipline.dispatch import dispatch
from zipline.lib.adjustment import Adjustment
from zipline.utils.functional import dzip_exact
from zipline.utils.math_utils import tolerant_equals
def keywords(func):
"""Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
Notes
-----
Taken from odo.utils
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
Notes
-----
Taken from odo.utils
"""
return keyfilter(op.contains(keywords(f)), kwargs)
def _s(word, seq, suffix='s'):
"""Adds a suffix to ``word`` if some sequence has anything other than
exactly one element.
word : str
The string to add the suffix to.
seq : sequence
The sequence to check the length of.
suffix : str, optional.
The suffix to add to ``word``
Returns
-------
maybe_plural : str
``word`` with ``suffix`` added if ``len(seq) != 1``.
"""
return word + (suffix if len(seq) != 1 else '')
def _fmt_path(path):
"""Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
"""
if not path:
return ''
return 'path: _' + ''.join(path)
def _fmt_msg(msg):
"""Format the message for final display.
Parameters
----------
msg : str
The message to show to the user to provide additional context.
returns
-------
fmtd : str
The formatted message to put into the error message.
"""
if not msg:
return ''
return msg + '\n'
@dispatch(object, object)
def assert_equal(result, expected, path=(), msg='', **kwargs):
"""Assert that two objects are equal using the ``==`` operator.
Parameters
----------
result : object
The result that came from the function under test.
expected : object
The expected result.
Raises
------
AssertionError
Raised when ``result`` is not equal to ``expected``.
"""
assert result == expected, '%s%s != %s\n%s' % (
_fmt_msg(msg),
result,
expected,
_fmt_path(path),
)
@assert_equal.register(float, float)
def assert_float_equal(result,
expected,
path=(),
msg='',
float_rtol=10e-7,
float_atol=10e-7,
float_equal_nan=True,
**kwargs):
assert tolerant_equals(
result,
expected,
rtol=float_rtol,
atol=float_atol,
equal_nan=float_equal_nan,
), '%s%s != %s with rtol=%s and atol=%s%s\n%s' % (
_fmt_msg(msg),
result,
expected,
float_rtol,
float_atol,
(' (with nan != nan)' if not float_equal_nan else ''),
_fmt_path(path),
)
@assert_equal.register(dict, dict)
def assert_dict_equal(result, expected, path=(), msg='', **kwargs):
if path is None:
path = ()
result_keys = viewkeys(result)
expected_keys = viewkeys(expected)
if result_keys != expected_keys:
if result_keys > expected_keys:
diff = result_keys - expected_keys
msg = 'extra %s in result: %r' % (_s('key', diff), diff)
elif result_keys < expected_keys:
diff = expected_keys - result_keys
msg = 'result is missing %s: %r' % (_s('key', diff), diff)
else:
sym = result_keys ^ expected_keys
in_result = sym - expected_keys
in_expected = sym - result_keys
msg = '%s only in result: %s\n%s only in expected: %s' % (
_s('key', in_result),
in_result,
_s('key', in_expected),
in_expected,
)
raise AssertionError(
'%sdict keys do not match\n%s' % (
_fmt_msg(msg),
_fmt_path(path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),)),
),
)
failures = []
for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)):
try:
assert_equal(
resultv,
expectedv,
path=path + ('[%r]' % k,),
msg=msg,
**kwargs
)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError('\n'.join(failures))
@assert_equal.register(list, list) # noqa
def assert_list_equal(result, expected, path=(), msg='', **kwargs):
result_len = len(result)
expected_len = len(expected)
assert result_len == expected_len, (
'%slist lengths do not match: %d != %d\n%s' % (
_fmt_msg(msg),
result_len,
expected_len,
_fmt_path(path),
)
)
for n, (resultv, expectedv) in enumerate(zip(result, expected)):
assert_equal(
resultv,
expectedv,
path=path + ('[%d]' % n,),
msg=msg,
**kwargs
)
@assert_equal.register(np.ndarray, np.ndarray)
def assert_array_equal(result,
expected,
path=(),
msg='',
array_verbose=True,
array_decimal=None,
**kwargs):
f = (
np.testing.assert_array_equal
if array_decimal is None else
partial(np.testing.assert_array_almost_equal, decimal=array_decimal)
)
try:
f(
result,
expected,
verbose=array_verbose,
err_msg=msg,
)
except AssertionError as e:
raise AssertionError('\n'.join((str(e), _fmt_path(path))))
@assert_equal.register(pd.DataFrame, pd.DataFrame)
def assert_dataframe_equal(result, expected, path=(), msg='', **kwargs):
try:
assert_frame_equal(
result,
expected,
**filter_kwargs(assert_frame_equal, kwargs)
)
except AssertionError as e:
raise AssertionError(
_fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path))),
)
@assert_equal.register(Adjustment, Adjustment)
def assert_adjustment_equal(result, expected, path=(), **kwargs):
for attr in ('first_row', 'last_row', 'first_col', 'last_col', 'value'):
assert_equal(
getattr(result, attr),
getattr(expected, attr),
path=path + ('.' + attr,),
**kwargs
)
try:
# pull the dshape cases in
from datashape.util.testing import assert_dshape_equal
except ImportError:
pass
else:
assert_equal.funcs.update(
dissoc(assert_dshape_equal.funcs, (object, object)),
)
| {
"repo_name": "umuzungu/zipline",
"path": "zipline/testing/predicates.py",
"copies": "1",
"size": "8350",
"license": "apache-2.0",
"hash": 479777998003796700,
"line_mean": 24.9316770186,
"line_max": 79,
"alpha_frac": 0.5405988024,
"autogenerated": false,
"ratio": 3.890959925442684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4931558727842684,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import inspect
from typing import (
Any,
Callable,
Iterable,
List,
Optional,
overload,
Union,
Tuple,
Type,
TypeVar,
)
T = TypeVar("T")
RichReprResult = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]
class ReprError(Exception):
"""An error occurred when attempting to build a repr."""
@overload
def auto(cls: Optional[T]) -> T:
...
@overload
def auto(*, angular: bool = False) -> Callable[[T], T]:
...
def auto(
cls: Optional[T] = None, *, angular: Optional[bool] = None
) -> Union[T, Callable[[T], T]]:
"""Class decorator to create __repr__ from __rich_repr__"""
def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
def auto_repr(self: Type[T]) -> str:
"""Create repr string from __rich_repr__"""
repr_str: List[str] = []
append = repr_str.append
angular = getattr(self.__rich_repr__, "angular", False) # type: ignore
for arg in self.__rich_repr__(): # type: ignore
if isinstance(arg, tuple):
if len(arg) == 1:
append(repr(arg[0]))
else:
key, value, *default = arg
if key is None:
append(repr(value))
else:
if len(default) and default[0] == value:
continue
append(f"{key}={value!r}")
else:
append(repr(arg))
if angular:
return f"<{self.__class__.__name__} {' '.join(repr_str)}>"
else:
return f"{self.__class__.__name__}({', '.join(repr_str)})"
def auto_rich_repr(self: Type[T]) -> RichReprResult:
"""Auto generate __rich_rep__ from signature of __init__"""
try:
signature = inspect.signature(self.__init__) ## type: ignore
for name, param in signature.parameters.items():
if param.kind == param.POSITIONAL_ONLY:
yield getattr(self, name)
elif param.kind in (
param.POSITIONAL_OR_KEYWORD,
param.KEYWORD_ONLY,
):
if param.default == param.empty:
yield getattr(self, param.name)
else:
yield param.name, getattr(self, param.name), param.default
except Exception as error:
raise ReprError(
f"Failed to auto generate __rich_repr__; {error}"
) from None
if not hasattr(cls, "__rich_repr__"):
auto_rich_repr.__doc__ = "Build a rich repr"
cls.__rich_repr__ = auto_rich_repr # type: ignore
auto_repr.__doc__ = "Return repr(self)"
cls.__repr__ = auto_repr # type: ignore
if angular is not None:
cls.__rich_repr__.angular = angular # type: ignore
return cls
if cls is None:
return partial(do_replace, angular=angular) # type: ignore
else:
return do_replace(cls, angular=angular) # type: ignore
@overload
def rich_repr(cls: Optional[T]) -> T:
...
@overload
def rich_repr(*, angular: bool = False) -> Callable[[T], T]:
...
def rich_repr(
cls: Optional[T] = None, *, angular: bool = False
) -> Union[T, Callable[[T], T]]:
if cls is None:
return auto(angular=angular)
else:
return auto(cls)
if __name__ == "__main__":
@auto
class Foo:
def __rich_repr__(self) -> RichReprResult:
yield "foo"
yield "bar", {"shopping": ["eggs", "ham", "pineapple"]}
yield "buy", "hand sanitizer"
foo = Foo()
from rich.console import Console
console = Console()
console.rule("Standard repr")
console.print(foo)
console.print(foo, width=60)
console.print(foo, width=30)
console.rule("Angular repr")
Foo.__rich_repr__.angular = True # type: ignore
console.print(foo)
console.print(foo, width=60)
console.print(foo, width=30)
| {
"repo_name": "willmcgugan/rich",
"path": "rich/repr.py",
"copies": "1",
"size": "4297",
"license": "mit",
"hash": -3898887246026729500,
"line_mean": 27.6466666667,
"line_max": 88,
"alpha_frac": 0.5012799628,
"autogenerated": false,
"ratio": 4.038533834586466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00031636988436678477,
"num_lines": 150
} |
from functools import partial
import io
import os
from tempfile import NamedTemporaryFile
import unittest
import six
from unittest import mock
from cloak.serverapi.cli.main import main, get_config
from cloak.serverapi.tests.mock import MockSession
class TestCase(unittest.TestCase):
def_target_id = 'tgt_z24y7miezisykwi6'
def setUp(self):
super().setUp()
# Capture stdio.
self.stdout = io.StringIO()
self.stderr = io.StringIO()
# Tests can check on our current state here.
self.session = MockSession(def_target_id=self.def_target_id)
# Intercept HTTP calls to the API.
patcher = mock.patch('cloak.serverapi.utils.http.session', self.session)
patcher.start()
self.addCleanup(patcher.stop)
# Make a fresh config file for each test.
config_file = NamedTemporaryFile()
os.environ['CLOAK_CONFIG'] = config_file.name
self.addCleanup(partial(self._cleanup_tempfile, config_file))
def _cleanup_tempfile(self, config_file):
config_file.close()
del os.environ['CLOAK_CONFIG']
def tearDown(self):
self.stdout.close()
self.stderr.close()
def main(self, argv):
return main(argv, self.stdout, self.stderr)
def get_config(self):
return get_config()
| {
"repo_name": "encryptme/private-end-points",
"path": "cloak/serverapi/tests/base.py",
"copies": "1",
"size": "1331",
"license": "mit",
"hash": -4686587875226498000,
"line_mean": 26.1632653061,
"line_max": 80,
"alpha_frac": 0.664162284,
"autogenerated": false,
"ratio": 3.869186046511628,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00025195263290501383,
"num_lines": 49
} |
from functools import partial
import itertools, datetime
from bandicoot_dev.helper.tools import mean, std, SummaryStats, advanced_wrap, AutoVivification, flatarr
DATE_GROUPERS = {
None: lambda _: None,
"day": lambda d: d.isocalendar(),
"week": lambda d: d.isocalendar()[0:2],
"month": lambda d: (d.year, d.month),
"year": lambda d: d.year
}
def _groupby_groups(min_, max_, _fun):
day_each_week = range(0, (max_ - min_).days, 7)
groups = set([_fun(min_+datetime.timedelta(days=d)) for d in day_each_week])
if list(groups)[0] == None:
return [None]
if len(list(groups)[0]) == 1:
return sorted(groups)
if len(list(groups)[0]) == 2:
return sorted(groups, key=lambda x: (x[0], x[1]))
def _group_date(user, records, _fun):
for g in _groupby_groups(user.start_time['any'], user.end_time['any'], _fun):
yield filter(lambda r: _fun(r.datetime) == g, records)
def group_records(user, interaction_types=None, groupby='week', part_of_week='allweek', part_of_day='allday'):
"""
Group records by year and week number. This function is used by the
``@grouping`` decorator.
Parameters
----------
records : iterator
An iterator over records
groupby : Default is 'week':
* 'week': group all records by year and week;
* None: records are not grouped. This is useful if you don't want to
divide records in chunks.
* "day", "month", and "year" also accepted.
part_of_week : {'allweek', 'weekday', 'weekend'}, default 'allweek'
* 'weekend': keep only the weekend records
* 'weekday': keep only the weekdays records
* 'allweek': use all the records
part_of_day : {'allday', 'day', 'night'}, default 'allday'
* 'day': keep only the records during the day
* 'night': keep only the records during the night
* 'allday': use all the records
interaction : object
The interaction to filter records:
* "callandtext", for only callandtext;
* a string, to filter for one type;
* None, to use all records.
"""
## ---------------------------------------------------------------------
## Change interaction paradigme so "callandtext" --> [['call', 'text']].
## ---------------------------------------------------------------------
def get_records(interaction_type):
if interaction_type == "call": return user.call_records
if interaction_type == "text": return user.text_records
if interaction_type == "physical": return user.physical_records
if interaction_type == "screen": return user.screen_records
if interaction_type == "stop": return user.stop_records
records = sorted(
flatarr([get_records(i) for i in flatarr(interaction_types)]),
key=lambda r: r.datetime
)
if part_of_week == 'weekday':
records = filter(lambda r: r.datetime.isoweekday() not in user.weekend, records)
elif part_of_week == 'weekend':
records = filter(lambda r: r.datetime.isoweekday() in user.weekend, records)
elif part_of_week is not 'allweek':
raise KeyError("{} is not a valid value for part_of_week. it should be 'weekday', 'weekend' or 'allweek'.".format(part_of_week))
if user.night_start < user.night_end:
night_filter = lambda r: user.night_end > r.datetime.time() > user.night_start
else:
night_filter = lambda r: not(user.night_end < r.datetime.time() < user.night_start)
if part_of_day == 'day':
records = filter(lambda r: not(night_filter(r)), records)
elif part_of_day == 'night':
records = filter(night_filter, records)
elif part_of_day is not 'allday':
raise KeyError("{} is not a valid value for part_of_day. It should be 'day', 'night' or 'allday'.".format(part_of_day))
return _group_date(user, records, DATE_GROUPERS[groupby])
def statistics(data, summary='default', datatype=None):
"""
Return statistics (mean, standard error, standard error and median, min and max) on data metrics.
Examples
--------
Given a list of integers or floating point numbers,
``statistics`` computes the mean and standard error of the mean, and the min and max.
>>> statistics([0, 1, 2, 3])
{'mean': 1.5, 'std': 1.2910, 'min': 0, 'max': 3}
Given a list of ``SummaryStats`` tuples, the function will
returns the mean, standard error of the mean, min and max for each attribute
of the tuples.
"""
def _default_stats(agg):
if agg is None or len(agg) == 0:
return {'mean': None, 'std': None}
else:
# Some functions may return None values
# It's better to filter them
agg = filter(lambda x: x is not None, agg)
return {'mean': mean(agg), 'std': std(agg)}
def _stats_dict(v):
return {key: _default_stats([getattr(s, key, None) for s in data]) for key in v}
summary_keys = {
'default': ['mean', 'std'],
'extended': ['mean', 'std', 'median', 'skewness', 'kurtosis', 'min', 'max']
}
# Infer the data type of 'data'
if datatype is None:
if isinstance(data, (type(None), int, float)):
datatype = 'scalar'
elif isinstance(data, SummaryStats):
datatype = 'summarystats'
elif hasattr(data, "__len__"): # list or numpy array
data = filter(lambda x: x is not None, data)
if len(data) == 0 or isinstance(data[0], (int, float)):
datatype = 'distribution_scalar'
elif isinstance(data[0], SummaryStats):
datatype = 'distribution_summarystats'
else:
raise TypeError("{} is not a valid input. It should be a number, a SummaryStats object, or None".format(data[0]))
else:
raise TypeError("{} is not a valid input. It should be a number, a SummaryStats object, or a list".format(data))
if datatype == 'scalar':
return data
if datatype == 'summarystats':
if summary is None:
return [item.distribution if not item==None else [] for item in data]
elif summary in ['default', 'extended']:
return {key: getattr(data, key, None) for key in summary_keys[summary]}
elif summary == "special":
return [{'mean': item.mean, 'std': item.std} if not item==None else
{'mean': None, 'std': None} for item in data]
else:
raise ValueError("{} is not a valid summary type".format(summary))
if datatype == 'distribution_scalar':
if summary is 'default':
return _default_stats(data)
elif summary is None:
return data
else:
raise ValueError("{} is not a valid summary type".format(summary))
if datatype == 'distribution_summarystats':
if summary is None:
return [item.distribution for item in data]
elif summary in ['extended', 'default']:
return _stats_dict(summary_keys[summary])
elif summary == "special":
return _stats_dict(summary_keys['default'])
else:
raise ValueError("{} is not a valid summary type".format(summary))
else:
raise ValueError("{} is not a valid data type.".format(datatype))
def grouping(f=None, user_kwd=False, interaction=['call', 'text'], summary='default'):
"""
``grouping`` is a decorator for indicator functions, used to simplify the source code.
Parameters
----------
f : function
The function to decorate
user_kwd : boolean
If user_kwd is True, the user object will be passed to the decorated function
interaction : 'call', 'text', 'location', or a list
By default, all indicators use only 'call' and 'text' records, but the
interaction keywords filters the records passed to the function.
summary: 'default', 'extended', None
An indicator returns data statistics, ether *mean* and *std* by
default, more with 'extended', or the inner distribution with None.
See :meth:`~bandicoot.helper.group.statistics` for more details.
See :ref:`new-indicator-label` to learn how to write an indicator with this decorator.
"""
if f is None:
return partial(grouping, user_kwd=user_kwd, interaction=interaction, summary=summary)
def wrapper(user, groupby='week', interaction=interaction, summary=summary, split_week=False, split_day=False, datatype=None, **kwargs):
if interaction is None:
interaction = ['call', 'text']
if type(interaction) is str:
interaction = [interaction.split("and")]
part_of_day = ['allday']
if split_day:
part_of_day += ['day', 'night']
part_of_week = ['allweek']
if split_week:
part_of_week += ['weekday', 'weekend']
for i in flatarr(interaction):
if i not in ['call', 'text', 'physical', 'screen', 'stop']:
raise ValueError("%s is not a valid interaction value. Only 'call', \
'text', 'physical', 'screen', 'stop' are accepted." % i)
def map_filters(interaction, part_of_week, part_of_day):
"""
Call the wrapped function for every combinations of interaction,
part_of_week, and part_of_day.
"""
for i in interaction:
if sum((user.supported_types[t] for t in flatarr(i))) != len(flatarr(i)):
continue
for filter_week in part_of_week:
for filter_day in part_of_day:
if user_kwd is True:
result = [f(g, user, **kwargs) if len(g) != 0 else None
for g in group_records(user, i, groupby, filter_week, filter_day)]
else:
result = [f(g, **kwargs) if len(g) != 0 else None
for g in group_records(user, i, groupby, filter_week, filter_day)]
i_label = '+'.join(i) if type(i) is list else i
yield filter_week, filter_day, i_label, result
returned = AutoVivification() # nested dict structure
for (f_w, f_d, i_label, m) in map_filters(interaction, part_of_week, part_of_day):
if groupby is None:
m = m[0] if len(m) != 0 else None
else:
if len(m) == 0:
continue
returned[f_w][f_d][i_label] = statistics(m, summary=summary, datatype=datatype)
return returned
return advanced_wrap(f, wrapper)
def _binning(records):
"""
Bin records by chunks of 30 minutes, returning the most prevalent position.
"""
records = list(records)
from collections import Counter
def get_key(d):
from datetime import datetime, timedelta
k = d + timedelta(minutes=-(d.minute % 30))
return datetime(k.year, k.month, k.day, k.hour, k.minute, 0)
chunks = itertools.groupby(records, key=lambda r: get_key(r.datetime))
for _, items in chunks:
positions = [i.position for i in items]
yield Counter(positions).most_common(1)[0][0]
def spatial_grouping(f=None, user_kwd=False, summary='default', use_records=False):
if f is None:
return partial(spatial_grouping, user_kwd=user_kwd, summary=summary,
use_records=use_records)
if use_records is True:
map_records = lambda x: x
else:
map_records = _binning
def wrapper(user, groupby='week', summary=summary, split_week=False, split_day=False, datatype=None, **kwargs):
part_of_day = ['allday']
if split_day:
part_of_day += ['day', 'night']
part_of_week = ['allweek']
if split_week:
part_of_week += ['weekday', 'weekend']
def map_filters_spatial(part_of_week, part_of_day):
"""
Call the wrapped function for every combinations of interaction,
part_of_week, and part_of_day.
"""
for filter_week in part_of_week:
for filter_day in part_of_day:
if user_kwd is True:
result = [f(map_records(g), user, **kwargs) for g in
(user, None, groupby, filter_week, filter_day)]
else:
result = [f(map_records(g), **kwargs) for g in group_records(user, None, groupby, filter_week, filter_day)]
yield filter_week, filter_day, result
returned = AutoVivification() # nested dict structure
for (f_w, f_d, m) in map_filters_spatial(part_of_week, part_of_day):
if groupby is None:
m = m[0] if len(m) != 0 else None
returned[f_w][f_d] = statistics(m, summary=summary, datatype=datatype)
return returned
return advanced_wrap(f, wrapper)
| {
"repo_name": "ulfaslak/bandicoot",
"path": "helper/group.py",
"copies": "1",
"size": "13108",
"license": "mit",
"hash": 4263673354872722400,
"line_mean": 39.5820433437,
"line_max": 140,
"alpha_frac": 0.5772047605,
"autogenerated": false,
"ratio": 3.9541478129713425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031352573471343,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import itertools
from bandicoot.helper.tools import mean, std, SummaryStats, advanced_wrap, AutoVivification
DATE_GROUPERS = {
None: lambda _: None,
"day": lambda d: d.isocalendar(),
"week": lambda d: d.isocalendar()[0:2],
"month": lambda d: (d.year, d.month),
"year": lambda d: d.year
}
def _group_date(records, _fun):
for _, chunk in itertools.groupby(records, key=lambda r: _fun(r.datetime)):
yield chunk
def group_records(user, interaction=None, groupby='week', part_of_week='allweek', part_of_day='allday'):
"""
Group records by year and week number. This function is used by the
``@grouping`` decorator.
Parameters
----------
records : iterator
An iterator over records
groupby : Default is 'week':
* 'week': group all records by year and week;
* None: records are not grouped. This is useful if you don't want to
divide records in chunks.
* "day", "month", and "year" also accepted.
part_of_week : {'allweek', 'weekday', 'weekend'}, default 'allweek'
* 'weekend': keep only the weekend records
* 'weekday': keep only the weekdays records
* 'allweek': use all the records
part_of_day : {'allday', 'day', 'night'}, default 'allday'
* 'day': keep only the records during the day
* 'night': keep only the records during the night
* 'allday': use all the records
interaction : object
The interaction to filter records:
* "callandtext", for only callandtext;
* a string, to filter for one type;
* None, to use all records.
"""
records = user.records
if interaction == 'callandtext':
records = filter(lambda r: r.interaction in ['call', 'text'], records)
elif interaction is not None:
records = filter(lambda r: r.interaction == interaction, records)
if part_of_week == 'weekday':
records = filter(lambda r: r.datetime.isoweekday() not in user.weekend, records)
elif part_of_week == 'weekend':
records = filter(lambda r: r.datetime.isoweekday() in user.weekend, records)
elif part_of_week is not 'allweek':
raise KeyError("{} is not a valid value for part_of_week. it should be 'weekday', 'weekend' or 'allweek'.".format(part_of_week))
if user.night_start < user.night_end:
night_filter = lambda r: user.night_end > r.datetime.time() > user.night_start
else:
night_filter = lambda r: not(user.night_end < r.datetime.time() < user.night_start)
if part_of_day == 'day':
records = filter(lambda r: not(night_filter(r)), records)
elif part_of_day == 'night':
records = filter(night_filter, records)
elif part_of_day is not 'allday':
raise KeyError("{} is not a valid value for part_of_day. It should be 'day', 'night' or 'allday'.".format(part_of_day))
return _group_date(records, DATE_GROUPERS[groupby])
def statistics(data, summary='default', datatype=None):
"""
Return statistics (mean, standard error, standard error and median, min and max) on data metrics.
Examples
--------
Given a list of integers or floating point numbers,
``statistics`` computes the mean and standard error of the mean, and the min and max.
>>> statistics([0, 1, 2, 3])
{'mean': 1.5, 'std': 1.2910, 'min': 0, 'max': 3}
Given a list of ``SummaryStats`` tuples, the function will
returns the mean, standard error of the mean, min and max for each attribute
of the tuples.
"""
def _default_stats(agg):
if agg is None or len(agg) == 0:
return {'mean': None, 'std': None}
else:
# Some functions may return None values
# It's better to filter them
agg = filter(lambda x: x is not None, agg)
return {'mean': mean(agg), 'std': std(agg)}
def _stats_dict(v):
return {key: _default_stats([getattr(s, key, None) for s in data]) for key in v}
summary_keys = {
'default': ['mean', 'std'],
'extended': ['mean', 'std', 'median', 'skewness', 'kurtosis', 'min', 'max']
}
# Infer the data type of 'data'
if datatype is None:
if isinstance(data, (type(None), int, float)):
datatype = 'scalar'
elif isinstance(data, SummaryStats):
datatype = 'summarystats'
elif hasattr(data, "__len__"): # list or numpy array
data = filter(lambda x: x is not None, data)
if len(data) == 0 or isinstance(data[0], (int, float)):
datatype = 'distribution_scalar'
elif isinstance(data[0], SummaryStats):
datatype = 'distribution_summarystats'
else:
raise TypeError("{} is not a valid input. It should be a number, a SummaryStats object, or None".format(data[0]))
else:
raise TypeError("{} is not a valid input. It should be a number, a SummaryStats object, or a list".format(data))
if datatype == 'scalar':
return data
if datatype == 'summarystats':
if summary is None:
return data.distribution
elif summary in ['default', 'extended']:
return {key: getattr(data, key, None) for key in summary_keys[summary]}
else:
raise ValueError("{} is not a valid summary type".format(summary))
if datatype == 'distribution_scalar':
if summary is 'default':
return _default_stats(data)
elif summary is None:
return data
else:
raise ValueError("{} is not a valid summary type".format(summary))
if datatype == 'distribution_summarystats':
if summary is None:
return [item.distribution for item in data]
elif summary in ['extended', 'default']:
return _stats_dict(summary_keys[summary])
else:
raise ValueError("{} is not a valid summary type".format(summary))
else:
raise ValueError("{} is not a valid data type.".format(datatype))
def grouping(f=None, user_kwd=False, interaction=['call', 'text'], summary='default'):
"""
``grouping`` is a decorator for indicator functions, used to simplify the source code.
Parameters
----------
f : function
The function to decorate
user_kwd : boolean
If user_kwd is True, the user object will be passed to the decorated function
interaction : 'call', 'text', 'location', or a list
By default, all indicators use only 'call' and 'text' records, but the
interaction keywords filters the records passed to the function.
summary: 'default', 'extended', None
An indicator returns data statistics, ether *mean* and *std* by
default, more with 'extended', or the inner distribution with None.
See :meth:`~bandicoot.helper.group.statistics` for more details.
See :ref:`new-indicator-label` to learn how to write an indicator with this decorator.
"""
if f is None:
return partial(grouping, user_kwd=user_kwd, interaction=interaction, summary=summary)
def wrapper(user, groupby='week', interaction=interaction, summary=summary, split_week=False, split_day=False, datatype=None, **kwargs):
if interaction is None:
interaction = ['call', 'text']
elif isinstance(interaction, str):
interaction = [interaction]
else:
interaction = interaction[:] # copy the list for more safety
part_of_day = ['allday']
if split_day:
part_of_day += ['day', 'night']
part_of_week = ['allweek']
if split_week:
part_of_week += ['weekday', 'weekend']
for i in interaction:
if i not in ['callandtext', 'call', 'text', 'location']:
raise ValueError("%s is not a valid interaction value. Only 'call', "
"'text', and 'location' are accepted." % i)
def map_filters(interaction, part_of_week, part_of_day):
"""
Call the wrapped function for every combinations of interaction,
part_of_week, and part_of_day.
"""
for i in interaction:
for filter_week in part_of_week:
for filter_day in part_of_day:
if user_kwd is True:
result = [f(g, user, **kwargs) for g in group_records(user, i, groupby, filter_week, filter_day)]
else:
result = [f(g, **kwargs) for g in group_records(user, i, groupby, filter_week, filter_day)]
yield filter_week, filter_day, i, result
returned = AutoVivification() # nested dict structure
for (f_w, f_d, i, m) in map_filters(interaction, part_of_week, part_of_day):
if groupby is None:
m = m[0] if len(m) != 0 else None
returned[f_w][f_d][i] = statistics(m, summary=summary, datatype=datatype)
return returned
return advanced_wrap(f, wrapper)
def _binning(records):
"""
Bin records by chunks of 30 minutes, returning the most prevalent position.
"""
records = list(records)
from collections import Counter
def get_key(d):
from datetime import datetime, timedelta
k = d + timedelta(minutes=-(d.minute % 30))
return datetime(k.year, k.month, k.day, k.hour, k.minute, 0)
chunks = itertools.groupby(records, key=lambda r: get_key(r.datetime))
for _, items in chunks:
positions = [i.position for i in items]
yield Counter(positions).most_common(1)[0][0]
def spatial_grouping(f=None, user_kwd=False, summary='default', use_records=False):
if f is None:
return partial(spatial_grouping, user_kwd=user_kwd, summary=summary,
use_records=use_records)
if use_records is True:
map_records = lambda x: x
else:
map_records = _binning
def wrapper(user, groupby='week', summary=summary, split_week=False, split_day=False, datatype=None, **kwargs):
part_of_day = ['allday']
if split_day:
part_of_day += ['day', 'night']
part_of_week = ['allweek']
if split_week:
part_of_week += ['weekday', 'weekend']
def map_filters_spatial(part_of_week, part_of_day):
"""
Call the wrapped function for every combinations of interaction,
part_of_week, and part_of_day.
"""
for filter_week in part_of_week:
for filter_day in part_of_day:
if user_kwd is True:
result = [f(map_records(g), user, **kwargs) for g in group_records(user, None, groupby, filter_week, filter_day)]
else:
result = [f(map_records(g), **kwargs) for g in group_records(user, None, groupby, filter_week, filter_day)]
yield filter_week, filter_day, result
returned = AutoVivification() # nested dict structure
for (f_w, f_d, m) in map_filters_spatial(part_of_week, part_of_day):
if groupby is None:
m = m[0] if len(m) != 0 else None
returned[f_w][f_d] = statistics(m, summary=summary, datatype=datatype)
return returned
return advanced_wrap(f, wrapper)
| {
"repo_name": "econandrew/bandicoot",
"path": "bandicoot/helper/group.py",
"copies": "1",
"size": "11439",
"license": "mit",
"hash": -8694669272772959000,
"line_mean": 38.0409556314,
"line_max": 140,
"alpha_frac": 0.5949820789,
"autogenerated": false,
"ratio": 3.9636174636174637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5058599542517463,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import itertools
import os
from _pydev_bundle._pydev_imports_tipper import TYPE_IMPORT, TYPE_CLASS, TYPE_FUNCTION, TYPE_ATTR, \
TYPE_BUILTIN, TYPE_PARAM
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle.pydev_override import overrides
from _pydevd_bundle._debug_adapter import pydevd_schema
from _pydevd_bundle._debug_adapter.pydevd_schema import ModuleEvent, ModuleEventBody, Module, \
OutputEventBody, OutputEvent, ContinuedEventBody
from _pydevd_bundle.pydevd_comm_constants import CMD_THREAD_CREATE, CMD_RETURN, CMD_MODULE_EVENT, \
CMD_WRITE_TO_CONSOLE, CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE, CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE, \
CMD_STEP_RETURN, CMD_STEP_CAUGHT_EXCEPTION, CMD_ADD_EXCEPTION_BREAK, CMD_SET_BREAK, \
CMD_SET_NEXT_STATEMENT, CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, \
CMD_THREAD_RESUME_SINGLE_NOTIFICATION, CMD_THREAD_KILL, CMD_STOP_ON_START, CMD_INPUT_REQUESTED, \
CMD_EXIT, CMD_STEP_INTO_COROUTINE, CMD_STEP_RETURN_MY_CODE, CMD_SMART_STEP_INTO
from _pydevd_bundle.pydevd_constants import get_thread_id, dict_values, ForkSafeLock
from _pydevd_bundle.pydevd_net_command import NetCommand, NULL_NET_COMMAND
from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory
from _pydevd_bundle.pydevd_utils import get_non_pydevd_threads
import pydevd_file_utils
from _pydevd_bundle.pydevd_comm import build_exception_info_response
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle import pydevd_frame_utils, pydevd_constants, pydevd_utils
import linecache
from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id
try:
from StringIO import StringIO
except:
from io import StringIO
class ModulesManager(object):
def __init__(self):
self._lock = ForkSafeLock()
self._modules = {}
self._next_id = partial(next, itertools.count(0))
def track_module(self, filename_in_utf8, module_name, frame):
'''
:return list(NetCommand):
Returns a list with the module events to be sent.
'''
if filename_in_utf8 in self._modules:
return []
module_events = []
with self._lock:
# Must check again after getting the lock.
if filename_in_utf8 in self._modules:
return
try:
version = str(frame.f_globals.get('__version__', ''))
except:
version = '<unknown>'
try:
package_name = str(frame.f_globals.get('__package__', ''))
except:
package_name = '<unknown>'
module_id = self._next_id()
module = Module(module_id, module_name, filename_in_utf8)
if version:
module.version = version
if package_name:
# Note: package doesn't appear in the docs but seems to be expected?
module.kwargs['package'] = package_name
module_event = ModuleEvent(ModuleEventBody('new', module))
module_events.append(NetCommand(CMD_MODULE_EVENT, 0, module_event, is_json=True))
self._modules[filename_in_utf8] = module.to_dict()
return module_events
def get_modules_info(self):
'''
:return list(Module)
'''
with self._lock:
return dict_values(self._modules)
class NetCommandFactoryJson(NetCommandFactory):
'''
Factory for commands which will provide messages as json (they should be
similar to the debug adapter where possible, although some differences
are currently Ok).
Note that it currently overrides the xml version so that messages
can be done one at a time (any message not overridden will currently
use the xml version) -- after having all messages handled, it should
no longer use NetCommandFactory as the base class.
'''
def __init__(self):
NetCommandFactory.__init__(self)
self.modules_manager = ModulesManager()
@overrides(NetCommandFactory.make_version_message)
def make_version_message(self, seq):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_protocol_set_message)
def make_protocol_set_message(self, seq):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_thread_created_message)
def make_thread_created_message(self, thread):
# Note: the thread id for the debug adapter must be an int
# (make the actual id from get_thread_id respect that later on).
msg = pydevd_schema.ThreadEvent(
pydevd_schema.ThreadEventBody('started', get_thread_id(thread)),
)
return NetCommand(CMD_THREAD_CREATE, 0, msg, is_json=True)
@overrides(NetCommandFactory.make_thread_killed_message)
def make_thread_killed_message(self, tid):
msg = pydevd_schema.ThreadEvent(
pydevd_schema.ThreadEventBody('exited', tid),
)
return NetCommand(CMD_THREAD_KILL, 0, msg, is_json=True)
@overrides(NetCommandFactory.make_list_threads_message)
def make_list_threads_message(self, py_db, seq):
threads = []
for thread in get_non_pydevd_threads():
if is_thread_alive(thread):
thread_id = get_thread_id(thread)
# Notify that it's created (no-op if we already notified before).
py_db.notify_thread_created(thread_id, thread)
thread_schema = pydevd_schema.Thread(id=thread_id, name=thread.getName())
threads.append(thread_schema.to_dict())
body = pydevd_schema.ThreadsResponseBody(threads)
response = pydevd_schema.ThreadsResponse(
request_seq=seq, success=True, command='threads', body=body)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_get_completions_message)
def make_get_completions_message(self, seq, completions, qualifier, start):
COMPLETION_TYPE_LOOK_UP = {
TYPE_IMPORT: pydevd_schema.CompletionItemType.MODULE,
TYPE_CLASS: pydevd_schema.CompletionItemType.CLASS,
TYPE_FUNCTION: pydevd_schema.CompletionItemType.FUNCTION,
TYPE_ATTR: pydevd_schema.CompletionItemType.FIELD,
TYPE_BUILTIN: pydevd_schema.CompletionItemType.KEYWORD,
TYPE_PARAM: pydevd_schema.CompletionItemType.VARIABLE,
}
qualifier = qualifier.lower()
qualifier_len = len(qualifier)
targets = []
for completion in completions:
label = completion[0]
if label.lower().startswith(qualifier):
completion = pydevd_schema.CompletionItem(
label=label, type=COMPLETION_TYPE_LOOK_UP[completion[3]], start=start, length=qualifier_len)
targets.append(completion.to_dict())
body = pydevd_schema.CompletionsResponseBody(targets)
response = pydevd_schema.CompletionsResponse(
request_seq=seq, success=True, command='completions', body=body)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def _format_frame_name(self, fmt, initial_name, module_name, line, path):
if fmt is None:
return initial_name
frame_name = initial_name
if fmt.get('module', False):
if module_name:
if initial_name == '<module>':
frame_name = module_name
else:
frame_name = '%s.%s' % (module_name, initial_name)
else:
basename = os.path.basename(path)
basename = basename[0:-3] if basename.lower().endswith('.py') else basename
if initial_name == '<module>':
frame_name = '%s in %s' % (initial_name, basename)
else:
frame_name = '%s.%s' % (basename, initial_name)
if fmt.get('line', False):
frame_name = '%s : %d' % (frame_name, line)
return frame_name
@overrides(NetCommandFactory.make_get_thread_stack_message)
def make_get_thread_stack_message(self, py_db, seq, thread_id, topmost_frame, fmt, must_be_suspended=False, start_frame=0, levels=0):
frames = []
module_events = []
try:
# : :type suspended_frames_manager: SuspendedFramesManager
suspended_frames_manager = py_db.suspended_frames_manager
frames_list = suspended_frames_manager.get_frames_list(thread_id)
if frames_list is None:
# Could not find stack of suspended frame...
if must_be_suspended:
return None
else:
frames_list = pydevd_frame_utils.create_frames_list_from_frame(topmost_frame)
for frame_id, frame, method_name, original_filename, filename_in_utf8, lineno, applied_mapping, show_as_current_frame in self._iter_visible_frames_info(
py_db, frames_list
):
try:
module_name = str(frame.f_globals.get('__name__', ''))
except:
module_name = '<unknown>'
module_events.extend(self.modules_manager.track_module(filename_in_utf8, module_name, frame))
presentation_hint = None
if not getattr(frame, 'IS_PLUGIN_FRAME', False): # Never filter out plugin frames!
if py_db.is_files_filter_enabled and py_db.apply_files_filter(frame, original_filename, False):
continue
if not py_db.in_project_scope(frame):
presentation_hint = 'subtle'
formatted_name = self._format_frame_name(fmt, method_name, module_name, lineno, filename_in_utf8)
if show_as_current_frame:
formatted_name += ' (Current frame)'
source_reference = pydevd_file_utils.get_client_filename_source_reference(filename_in_utf8)
if not source_reference and not applied_mapping and not os.path.exists(original_filename):
if getattr(frame.f_code, 'co_lnotab', None):
# Create a source-reference to be used where we provide the source by decompiling the code.
# Note: When the time comes to retrieve the source reference in this case, we'll
# check the linecache first (see: get_decompiled_source_from_frame_id).
source_reference = pydevd_file_utils.create_source_reference_for_frame_id(frame_id, original_filename)
else:
# Check if someone added a source reference to the linecache (Python attrs does this).
if linecache.getline(original_filename, 1):
source_reference = pydevd_file_utils.create_source_reference_for_linecache(
original_filename)
frames.append(pydevd_schema.StackFrame(
frame_id, formatted_name, lineno, column=1, source={
'path': filename_in_utf8,
'sourceReference': source_reference,
},
presentationHint=presentation_hint).to_dict())
finally:
topmost_frame = None
for module_event in module_events:
py_db.writer.add_command(module_event)
total_frames = len(frames)
stack_frames = frames
if bool(levels):
start = start_frame
end = min(start + levels, total_frames)
stack_frames = frames[start:end]
response = pydevd_schema.StackTraceResponse(
request_seq=seq,
success=True,
command='stackTrace',
body=pydevd_schema.StackTraceResponseBody(stackFrames=stack_frames, totalFrames=total_frames))
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_warning_message)
def make_warning_message(self, msg):
category = 'console'
body = OutputEventBody(msg, category)
event = OutputEvent(body)
return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)
@overrides(NetCommandFactory.make_io_message)
def make_io_message(self, msg, ctx):
category = 'stdout' if int(ctx) == 1 else 'stderr'
body = OutputEventBody(msg, category)
event = OutputEvent(body)
return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)
_STEP_REASONS = set([
CMD_STEP_INTO,
CMD_STEP_INTO_MY_CODE,
CMD_STEP_OVER,
CMD_STEP_OVER_MY_CODE,
CMD_STEP_RETURN,
CMD_STEP_RETURN_MY_CODE,
CMD_STEP_INTO_MY_CODE,
CMD_STOP_ON_START,
CMD_STEP_INTO_COROUTINE,
CMD_SMART_STEP_INTO,
])
_EXCEPTION_REASONS = set([
CMD_STEP_CAUGHT_EXCEPTION,
CMD_ADD_EXCEPTION_BREAK,
])
@overrides(NetCommandFactory.make_thread_suspend_single_notification)
def make_thread_suspend_single_notification(self, py_db, thread_id, stop_reason):
exc_desc = None
exc_name = None
thread = pydevd_find_thread_by_id(thread_id)
info = set_additional_thread_info(thread)
if stop_reason in self._STEP_REASONS:
if info.pydev_original_step_cmd == CMD_STOP_ON_START:
# Just to make sure that's not set as the original reason anymore.
info.pydev_original_step_cmd = -1
stop_reason = 'entry'
else:
stop_reason = 'step'
elif stop_reason in self._EXCEPTION_REASONS:
stop_reason = 'exception'
elif stop_reason == CMD_SET_BREAK:
stop_reason = 'breakpoint'
elif stop_reason == CMD_SET_NEXT_STATEMENT:
stop_reason = 'goto'
else:
stop_reason = 'pause'
if stop_reason == 'exception':
exception_info_response = build_exception_info_response(
py_db, thread_id, -1, set_additional_thread_info, self._iter_visible_frames_info, max_frames=-1)
exception_info_response
exc_name = exception_info_response.body.exceptionId
exc_desc = exception_info_response.body.description
body = pydevd_schema.StoppedEventBody(
reason=stop_reason,
description=exc_desc,
threadId=thread_id,
text=exc_name,
allThreadsStopped=True,
preserveFocusHint=stop_reason not in ['step', 'exception', 'breakpoint', 'entry', 'goto'],
)
event = pydevd_schema.StoppedEvent(body)
return NetCommand(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, 0, event, is_json=True)
@overrides(NetCommandFactory.make_thread_resume_single_notification)
def make_thread_resume_single_notification(self, thread_id):
body = ContinuedEventBody(threadId=thread_id, allThreadsContinued=True)
event = pydevd_schema.ContinuedEvent(body)
return NetCommand(CMD_THREAD_RESUME_SINGLE_NOTIFICATION, 0, event, is_json=True)
@overrides(NetCommandFactory.make_set_next_stmnt_status_message)
def make_set_next_stmnt_status_message(self, seq, is_success, exception_msg):
response = pydevd_schema.GotoResponse(
request_seq=int(seq),
success=is_success,
command='goto',
body={},
message=(None if is_success else exception_msg))
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_send_curr_exception_trace_message)
def make_send_curr_exception_trace_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_send_curr_exception_trace_proceeded_message)
def make_send_curr_exception_trace_proceeded_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_send_breakpoint_exception_message)
def make_send_breakpoint_exception_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_process_created_message)
def make_process_created_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_thread_suspend_message)
def make_thread_suspend_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_thread_run_message)
def make_thread_run_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_reloaded_code_message)
def make_reloaded_code_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_input_requested_message)
def make_input_requested_message(self, started):
event = pydevd_schema.PydevdInputRequestedEvent(body={})
return NetCommand(CMD_INPUT_REQUESTED, 0, event, is_json=True)
@overrides(NetCommandFactory.make_skipped_step_in_because_of_filters)
def make_skipped_step_in_because_of_filters(self, py_db, frame):
msg = 'Frame skipped from debugging during step-in.'
if py_db.get_use_libraries_filter():
msg += ('\nNote: may have been skipped because of "justMyCode" option (default == true). '
'Try setting \"justMyCode\": false in the debug configuration (e.g., launch.json).\n')
return self.make_warning_message(msg)
@overrides(NetCommandFactory.make_evaluation_timeout_msg)
def make_evaluation_timeout_msg(self, py_db, expression, curr_thread):
msg = '''Evaluating: %s did not finish after %.2f seconds.
This may mean a number of things:
- This evaluation is really slow and this is expected.
In this case it's possible to silence this error by raising the timeout, setting the
PYDEVD_WARN_EVALUATION_TIMEOUT environment variable to a bigger value.
- The evaluation may need other threads running while it's running:
In this case, it's possible to set the PYDEVD_UNBLOCK_THREADS_TIMEOUT
environment variable so that if after a given timeout an evaluation doesn't finish,
other threads are unblocked or you can manually resume all threads.
Alternatively, it's also possible to skip breaking on a particular thread by setting a
`pydev_do_not_trace = True` attribute in the related threading.Thread instance
(if some thread should always be running and no breakpoints are expected to be hit in it).
- The evaluation is deadlocked:
In this case you may set the PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT
environment variable to true so that a thread dump is shown along with this message and
optionally, set the PYDEVD_INTERRUPT_THREAD_TIMEOUT to some value so that the debugger
tries to interrupt the evaluation (if possible) when this happens.
''' % (expression, pydevd_constants.PYDEVD_WARN_EVALUATION_TIMEOUT)
if pydevd_constants.PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT:
stream = StringIO()
pydevd_utils.dump_threads(stream, show_pydevd_threads=False)
msg += '\n\n%s\n' % stream.getvalue()
return self.make_warning_message(msg)
@overrides(NetCommandFactory.make_exit_command)
def make_exit_command(self, py_db):
event = pydevd_schema.TerminatedEvent(pydevd_schema.TerminatedEventBody())
return NetCommand(CMD_EXIT, 0, event, is_json=True)
| {
"repo_name": "fabioz/PyDev.Debugger",
"path": "_pydevd_bundle/pydevd_net_command_factory_json.py",
"copies": "2",
"size": "20002",
"license": "epl-1.0",
"hash": 9076747113526776000,
"line_mean": 44.0495495495,
"line_max": 164,
"alpha_frac": 0.6423857614,
"autogenerated": false,
"ratio": 3.9436119873817033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018793891722985812,
"num_lines": 444
} |
from functools import partial
import itertools
import os.path
from sys import float_info
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib import animation
from edge import Edge
from point import AbstractPoint
#import random
#print plt.cm._cmapnames; exit()
class ScreenPresentingDirectorMixin(object):
def direct(self):
self._prepare_world()
redraw = RedrawHints()
stop_condition = StopCondition()
my_animate = lambda frame_number: self.animate(frame_number, stop_condition, redraw)
anim = animation.FuncAnimation(self.fig, my_animate, frames=1500, init_func=self.init, interval=20, blit=False)
plt.show()
anim # documentation and examples say that this has to be like that. Suspect some garbage collector issues.
class FileSavingDirectorMixin(object):
def __init__(self, simulation, artifact_directory=None, *args, **kwargs):
self.artifact_directory = artifact_directory
super(FileSavingDirectorMixin, self).__init__(simulation, artifact_directory, *args, **kwargs)
def direct(self):
self._prepare_world()
redraw = RedrawHints()
stop_condition = StopCondition()
for frame_number in itertools.count():
self.animate(frame_number, stop_condition, redraw)
self._save_to_file(frame_number)
if self.simulation.reality.world.is_resolved():
break
class AbstractEdgePainterMixin(object):
EDGE_CMAP = None
def _paint_edges(self, edge_objects):
vmin = 0
edge_tuples = [(edge.a_end.point, edge.b_end.point) for edge in edge_objects]
edges_colorlevel = [self._get_edge_colorval(edge) for edge in edge_objects]
vmax = max(edges_colorlevel + [float_info.min])
nx.draw_networkx_edges(self.g, edgelist=edge_tuples, pos=self.all_points, edge_color=edges_colorlevel, width=4, edge_cmap=self.EDGE_CMAP, edge_vmin=vmin, edge_vmax=vmax)
class PheromoneEdgePainterMixin(AbstractEdgePainterMixin):
EDGE_CMAP = plt.cm.winter_r
@classmethod
def _get_edge_colorval(cls, edge):
return edge.pheromone_level()
class CostEdgePainterMixin(AbstractEdgePainterMixin):
EDGE_CMAP = plt.cm.jet
@classmethod
def _get_edge_colorval(cls, edge):
return edge.cost
class AbstractVisualizer(object):
NODE_SIZE = 100 # default was 600
def __init__(self, simulation, *args, **kwargs):
self.simulation = simulation
self.fig = None
def direct(self):
raise NotImplementedError('You should use one of the director mixins')
def render_reality(self, reality, filename=None):
return self.render_world(reality.world, filename)
def _prepare_world(self):
world = self.simulation.reality.world
self.g = nx.Graph()
g = self.g
g.add_nodes_from(world.points)
edgelist = [
(edge.a_end.point, edge.b_end.point, min(edge.pheromone_level(), 1))
for edge in world.edges
]
g.add_weighted_edges_from(edgelist)
if self.fig is not None:
self.fig.clear()
else:
self.fig = plt.figure(figsize=(24, 13.5), dpi=100) # figsize is in inches. x, y.
plt.autoscale(enable=True, axis='both', tight=True)
normal_points = {point: point.coordinates for point in world.points if not point.is_anthill() and not point.is_foodpoint()}
nx.draw_networkx_nodes(g, pos=normal_points, nodelist=normal_points.keys(), node_color='w', node_size=self.NODE_SIZE)
food_points = {point: point.coordinates for point in world.get_food_points()}
nx.draw_networkx_nodes(g, pos=food_points, nodelist=food_points.keys(), node_color='g', node_size=self.NODE_SIZE, label='food')
anthills = {point: point.coordinates for point in world.get_anthills()}
nx.draw_networkx_nodes(g, pos=anthills, nodelist=anthills.keys(), node_color='b', node_size=self.NODE_SIZE, label='anthill')
self.all_points = {point: point.coordinates for point in world.points}
def render_world(self, world, filename_part=None):
self._prepare_world()
self._paint_edges(world.edges)
#plt.sci(nodes)
plt.colorbar()
#,width=2,edge_cmap=plt.cm.Jet,with_labels=True
#nx.draw(g)
if filename_part:
self._save_to_file(filename_part)
else:
plt.show()
def _save_to_file(self, filename_part):
plt.savefig(os.path.join(self.artifact_directory, "%s.png" % (filename_part,)), bbox_inches='tight')
def init(self):
return []
def pre_animate(self):
pass
def animate(self, frame_number, stop_condition, redraw_hints):
#if stop_condition():
# print 'simulation should end!'
# return []
changed_entities, end, edges_to_mark = self.simulation.advance()
g = self.g
all_points = self.all_points
if end:
stop_condition.stop()
print 'simulation should end!'
return []
self.pre_animate()
changed_edge_objects = self.get_changed_edge_objects(changed_entities, redraw_hints)
changed_point_objects = [point for point in changed_entities if isinstance(point, AbstractPoint)]
changed_points = changed_point_objects
nx.draw_networkx_nodes(g, pos=all_points, nodelist=redraw_hints.points, node_color='w', node_size=self.NODE_SIZE)
nx.draw_networkx_nodes(g, pos=all_points, nodelist=changed_points, node_color='r', node_size=self.NODE_SIZE)
redraw_hints.points = changed_points
self._paint_edges(changed_edge_objects)
self.process_visited_edges(redraw_hints, edges_to_mark)
#plt.colorbar()
return []
def process_visited_edges(self, redraw_hints, edges_to_mark):
redraw_hints.edges = set(edges_to_mark)
class RedrawHints(object):
def __init__(self):
self.points = set()
self.edges = set()
class StopCondition(object):
def __init__(self):
self.keep_going = 1
def stop(self):
self.keep_going = 0
def __call__(self, *args):
return self.keep_going <= 0
class ResettingVisualizer(AbstractVisualizer):
def pre_animate(self):
self.fig.clear()
self._prepare_world()
def get_changed_edge_objects(self, changed_entities, redraw_hints):
return self.simulation.reality.world.edges
class StateVisualizer(AbstractVisualizer):
def get_changed_edge_objects(self, changed_entities, redraw_hints):
return list(
set(
[edge for edge in changed_entities if isinstance(edge, Edge)]
) & redraw_hints.edges
)
class FileDrawingVisualizer(PheromoneEdgePainterMixin, FileSavingDirectorMixin, ResettingVisualizer):
pass
#class AnimatingVisualizer(StateVisualizer, ScreenPresentingDirectorMixin): # this makes sense when We render each tick, but performance improvement is questionable
class AnimatingVisualizer(PheromoneEdgePainterMixin, ScreenPresentingDirectorMixin, ResettingVisualizer):
pass
class RouteDrawingVisualizer(PheromoneEdgePainterMixin, ResettingVisualizer):
def process_visited_edges(self, redraw_hints, edges_to_mark, *args, **kwargs):
super(RouteDrawingVisualizer, self).process_visited_edges(redraw_hints, edges_to_mark, *args, **kwargs)
marked_edges = [(edge.a_end.point, edge.b_end.point) for edge in edges_to_mark]
nx.draw_networkx_edges(self.g, edgelist=marked_edges, pos=self.all_points, edge_color='r', width=1)
class ScreenRouteDrawingVisualizer(ScreenPresentingDirectorMixin, RouteDrawingVisualizer):
pass
class FileRouteDrawingVisualizer(FileSavingDirectorMixin, RouteDrawingVisualizer):
pass
class FileCostDrawingVisualizer(CostEdgePainterMixin, FileSavingDirectorMixin, ResettingVisualizer):
pass
def _drawing_helper(drawer_class, prefix, simulation, artifact_directory, reality, force_name=False):
drawer_class(simulation, artifact_directory).render_reality(reality, '%s_%s' % (prefix, force_name or simulation.ticks,))
draw_link_costs = partial(_drawing_helper, FileCostDrawingVisualizer, 'link_costs')
draw_pheromone_levels = partial(_drawing_helper, FileDrawingVisualizer, 'pheromone_levels')
if __name__=='__main__':
edgelist = [
(0, 1, 1),
(1, 2, 2),
(2, 3, 3),
(3, 4, 4),
(4, 0, 5),
]
g = nx.Graph()
g.add_weighted_edges_from(edgelist)
nx.draw(g)
plt.show()
| {
"repo_name": "ppolewicz/ant-colony",
"path": "antcolony/vizualizer.py",
"copies": "1",
"size": "8550",
"license": "bsd-3-clause",
"hash": -1964502984151239400,
"line_mean": 36.6651982379,
"line_max": 177,
"alpha_frac": 0.6692397661,
"autogenerated": false,
"ratio": 3.555093555093555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47243333211935545,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import itertools
import posixpath
import threading
try:
from twitter.common import log
except ImportError:
import logging as log
from twitter.common.concurrent import Future
from .group_base import (
Capture,
GroupBase,
GroupInterface,
Membership,
set_different)
from kazoo.client import KazooClient
from kazoo.protocol.states import (
EventType,
KazooState,
KeeperState)
import kazoo.security as ksec
import kazoo.exceptions as ke
# TODO(wickman) Put this in twitter.common somewhere?
def partition(items, predicate=bool):
a, b = itertools.tee((predicate(item), item) for item in items)
return ([item for pred, item in a if not pred], [item for pred, item in b if pred])
class KazooGroup(GroupBase, GroupInterface):
"""
An implementation of GroupInterface against Kazoo.
"""
DISCONNECT_EXCEPTIONS = (ke.ConnectionLoss, ke.OperationTimeoutError, ke.SessionExpiredError)
@classmethod
def translate_acl(cls, acl):
if not isinstance(acl, dict) or any(key not in acl for key in ('perms', 'scheme', 'id')):
raise TypeError('Expected acl to be Acl-like, got %s' % type(acl))
return ksec.ACL(acl['perms'], ksec.Id(acl['scheme'], acl['id']))
@classmethod
def translate_acl_list(cls, acls):
if acls is None:
return acls
try:
acls = list(acls)
except (ValueError, TypeError):
raise TypeError('ACLs should be a list, got %s' % type(acls))
if all(isinstance(acl, ksec.ACL) for acl in acls):
return acls
else:
return [cls.translate_acl(acl) for acl in acls]
def __init__(self, zk, path, acl=None):
if not isinstance(zk, KazooClient):
raise TypeError('KazooGroup must be initialized with a KazooClient')
self._zk = zk
self.__state = zk.state
self.__listener_queue = []
self.__queue_lock = threading.Lock()
self._zk.add_listener(self.__state_listener)
self._path = '/' + '/'.join(filter(None, path.split('/'))) # normalize path
self._members = {}
self._member_lock = threading.Lock()
self._acl = self.translate_acl_list(acl)
def __state_listener(self, state):
"""Process appropriate callbacks on any kazoo state transition."""
with self.__queue_lock:
self.__state = state
self.__listener_queue, triggered = partition(self.__listener_queue,
lambda element: element[0] == state)
for _, callback in triggered:
callback()
def _once(self, keeper_state, callback):
"""Ensure a callback is called once we reach the given state: either
immediately, if currently in that state, or on the next transition to
that state."""
invoke = False
with self.__queue_lock:
if self.__state != keeper_state:
self.__listener_queue.append((keeper_state, callback))
else:
invoke = True
if invoke:
callback()
def __on_connected(self, callback):
return self.__on_state(callback, KazooState.CONNECTED)
def __on_expired(self, callback):
return self.__on_state(callback, KazooState.LOST)
def info(self, member, callback=None):
if member == Membership.error():
raise self.InvalidMemberError('Cannot get info on error member!')
capture = Capture(callback)
def do_info():
self._zk.get_async(path).rawlink(info_completion)
with self._member_lock:
member_future = self._members.setdefault(member, Future())
member_future.add_done_callback(lambda future: capture.set(future.result()))
dispatch = False
with self._member_lock:
if not member_future.done() and not member_future.running():
try:
dispatch = member_future.set_running_or_notify_cancel()
except:
pass
def info_completion(result):
try:
content, stat = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_info)
return
except ke.NoNodeException:
future = self._members.pop(member, Future())
future.set_result(Membership.error())
return
except ke.KazooException as e:
log.warning('Unexpected Kazoo result in info: (%s)%s' % (type(e), e))
future = self._members.pop(member, Future())
future.set_result(Membership.error())
return
self._members[member].set_result(content)
if dispatch:
path = posixpath.join(self._path, self.id_to_znode(member.id))
do_info()
return capture()
def join(self, blob, callback=None, expire_callback=None):
membership_capture = Capture(callback)
expiry_capture = Capture(expire_callback)
def do_join():
self._zk.create_async(
path=posixpath.join(self._path, self.MEMBER_PREFIX),
value=blob,
acl=self._acl,
sequence=True,
ephemeral=True,
makepath=True
).rawlink(acreate_completion)
def do_exists(path):
self._zk.exists_async(path, watch=exists_watch).rawlink(partial(exists_completion, path))
def exists_watch(event):
if event.type == EventType.DELETED:
expiry_capture.set()
def expire_notifier():
self._once(KazooState.LOST, expiry_capture.set)
def exists_completion(path, result):
try:
if result.get() is None:
expiry_capture.set()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, partial(do_exists, path))
def acreate_completion(result):
try:
path = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_join)
return
except ke.KazooException as e:
log.warning('Unexpected Kazoo result in join: (%s)%s' % (type(e), e))
membership = Membership.error()
else:
created_id = self.znode_to_id(path)
membership = Membership(created_id)
with self._member_lock:
result_future = self._members.get(membership, Future())
result_future.set_result(blob)
self._members[membership] = result_future
if expire_callback:
self._once(KazooState.CONNECTED, expire_notifier)
do_exists(path)
membership_capture.set(membership)
do_join()
return membership_capture()
def cancel(self, member, callback=None):
capture = Capture(callback)
def do_cancel():
self._zk.delete_async(posixpath.join(self._path, self.id_to_znode(member.id))).rawlink(
adelete_completion)
def adelete_completion(result):
try:
success = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_cancel)
return
except ke.NoNodeError:
success = True
except ke.KazooException as e:
log.warning('Unexpected Kazoo result in cancel: (%s)%s' % (type(e), e))
success = False
future = self._members.pop(member.id, Future())
future.set_result(Membership.error())
capture.set(success)
do_cancel()
return capture()
def monitor(self, membership=frozenset(), callback=None):
capture = Capture(callback)
def wait_exists():
self._zk.exists_async(self._path, exists_watch).rawlink(exists_completion)
def exists_watch(event):
if event.state == KeeperState.EXPIRED_SESSION:
wait_exists()
return
if event.type == EventType.CREATED:
do_monitor()
elif event.type == EventType.DELETED:
wait_exists()
def exists_completion(result):
try:
stat = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, wait_exists)
return
except ke.NoNodeError:
wait_exists()
return
except ke.KazooException as e:
log.warning('Unexpected exists_completion result: (%s)%s' % (type(e), e))
return
if stat:
do_monitor()
def do_monitor():
self._zk.get_children_async(self._path, get_watch).rawlink(get_completion)
def get_watch(event):
if event.state == KeeperState.EXPIRED_SESSION:
wait_exists()
return
if event.state != KeeperState.CONNECTED:
return
if event.type == EventType.DELETED:
wait_exists()
return
if event.type != EventType.CHILD:
return
if set_different(capture, membership, self._members):
return
do_monitor()
def get_completion(result):
try:
children = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_monitor)
return
except ke.NoNodeError:
wait_exists()
return
except ke.KazooException as e:
log.warning('Unexpected get_completion result: (%s)%s' % (type(e), e))
capture.set(set([Membership.error()]))
return
self._update_children(children)
set_different(capture, membership, self._members)
do_monitor()
return capture()
def list(self):
wait_event = threading.Event()
while True:
wait_event.clear()
try:
try:
return sorted(Membership(self.znode_to_id(znode))
for znode in self._zk.get_children(self._path)
if self.znode_owned(znode))
except ke.NoNodeException:
return []
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, wait_event.set)
wait_event.wait()
class ActiveKazooGroup(KazooGroup):
def __init__(self, *args, **kwargs):
super(ActiveKazooGroup, self).__init__(*args, **kwargs)
self._monitor_queue = []
self._monitor_members()
def monitor(self, membership=frozenset(), callback=None):
capture = Capture(callback)
if not set_different(capture, membership, self._members):
self._monitor_queue.append((membership, capture))
return capture()
def _monitor_members(self):
def wait_exists():
self._zk.exists_async(self._path, exists_watch).rawlink(exists_completion)
def exists_watch(event):
if event.state == KeeperState.EXPIRED_SESSION:
wait_exists()
return
if event.type == EventType.CREATED:
do_monitor()
elif event.type == EventType.DELETED:
wait_exists()
def exists_completion(result):
try:
stat = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, wait_exists)
return
except ke.NoNodeError:
wait_exists()
return
except ke.KazooException as e:
log.warning('Unexpected exists_completion result: (%s)%s' % (type(e), e))
return
if stat:
do_monitor()
def do_monitor():
self._zk.get_children_async(self._path, get_watch).rawlink(get_completion)
def get_watch(event):
if event.state == KeeperState.EXPIRED_SESSION:
wait_exists()
return
if event.state != KeeperState.CONNECTED:
return
if event.type == EventType.DELETED:
wait_exists()
return
do_monitor()
def get_completion(result):
try:
children = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_monitor)
return
except ke.NoNodeError:
wait_exists()
return
except ke.KazooException as e:
log.warning('Unexpected get_completion result: (%s)%s' % (type(e), e))
return
children = [child for child in children if self.znode_owned(child)]
_, new = self._update_children(children)
for child in new:
def devnull(*args, **kw): pass
self.info(child, callback=devnull)
monitor_queue = self._monitor_queue[:]
self._monitor_queue = []
members = set(Membership(self.znode_to_id(child)) for child in children)
for membership, capture in monitor_queue:
if set(membership) != members:
capture.set(members)
else:
self._monitor_queue.append((membership, capture))
do_monitor()
| {
"repo_name": "jsirois/commons",
"path": "src/python/twitter/common/zookeeper/group/kazoo_group.py",
"copies": "14",
"size": "12041",
"license": "apache-2.0",
"hash": 2334406609272874000,
"line_mean": 29.2537688442,
"line_max": 95,
"alpha_frac": 0.6309276638,
"autogenerated": false,
"ratio": 3.814063984795692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import itertools
import sublime
from ...common import util
from ..git_command import GitCommand
class PanelActionMixin(object):
"""
Use this mixin to initially display a quick panel, select from pre-defined
actions and execute the matching instance method.
The `default_actions` are copied into self.actions and should be a list of
list/tuple items of at least length of 2, e.g:
default_actions = [
['some_method', 'Run some method'],
['other_method', 'Run some other method'],
['some_method', 'Run method with arg1 and arg2', ('arg1', 'arg2')],
['some_method', 'Run method with kwargs1: foo', (), {'kwarg1': 'foo'}],
]
Will result in the following method calls accordingly:
self.some_method()
self.other_method()
self.other_method('arg1', 'arg2')
self.other_method(kwarg1='foo')
"""
selected_index = 0 # Every instance gets it's own `selected_index`
default_actions = None # must be set by inheriting class
async_action = False # if True, executes action with set_timeout_async
def run(self, *args, **kwargs):
self.update_actions()
self.show_panel(pre_selected_index=kwargs.get('pre_selected_index', None))
def update_actions(self):
self.actions = self.default_actions[:] # copy default actions
def show_panel(self, actions=None, pre_selected_index=None):
window = self.window if hasattr(self, 'window') else self.view.window()
if pre_selected_index:
self.on_action_selection(pre_selected_index)
return
window.show_quick_panel(
[a[1] for a in actions or self.actions],
self.on_action_selection,
flags=sublime.MONOSPACE_FONT,
selected_index=self.selected_index,
)
def on_action_selection(self, index):
if index == -1:
return
self.selected_index = index # set last selected choice as default
selected_action = self.actions[index]
func = self.get_callable(selected_action)
args, kwargs = self.get_arguments(selected_action)
if self.async_action:
sublime.set_timeout_async(lambda: func(*args, **kwargs))
else:
func(*args, **kwargs)
def get_callable(self, selected_action):
return getattr(self, selected_action[0])
def get_arguments(self, selected_action):
if len(selected_action) == 3:
return selected_action[2], {}
elif len(selected_action) == 4:
return selected_action[2:4]
return (), {}
class PanelCommandMixin(PanelActionMixin):
"""
Same basic functionality as PanelActionMixin, except that it executes a given
sublime command rather than a given instance method. For example:
default_actions = [
['foo', 'Run FooCommand'],
['bar', 'Run BarCommand with arg1 and arg2', ('arg1', 'arg2')],
['bar', 'Run BarCommand with kwargs1: foo', ({'kwarg1': 'foo'}, )],
['bar', 'Run BarCommand with kwargs1: foo', (), {'kwarg1': 'foo'}],
]
Will result in the following commands accordingly:
self.window.run_command("foo")
self.window.run_command("bar", 'arg1', 'arg2')
self.window.run_command("bar", {'kwarg1': 'foo'})
self.window.run_command("bar", kwarg1='foo')
"""
def get_callable(self, selected_action):
if hasattr(self, 'window'):
return self.window.run_command
elif hasattr(self, 'view'):
return self.view.run_command
else:
return sublime.run_command
def get_arguments(self, selected_action):
"""Prepares `run_command` arguments:
- (required) Command name is 1st argument
- (optional) args is 2nd (and next) arguments
- (optional) kwargs are simply keyword arguments
"""
args, kwargs = super().get_arguments(selected_action)
return ((selected_action[0], ) + args), kwargs
def show_remote_panel(
on_done, show_option_all=False, selected_remote=None, allow_direct=False,
show_url=False):
"""
Show a quick panel with remotes. The callback `on_done(remote)` will
be called when a remote is selected. If the panel is cancelled, `None`
will be passed to `on_done`.
on_done: a callable
show_option_all: whether the option "All remotes" should be shown. `True` will
be passed to `on_done` if the all remotes option is selected.
"""
rp = RemotePanel(on_done, show_option_all, selected_remote, allow_direct, show_url)
rp.show()
return rp
class RemotePanel(GitCommand):
def __init__(
self, on_done, show_option_all=False, selected_remote=None,
allow_direct=False, show_url=False):
self.window = sublime.active_window()
self.on_done = on_done
self.selected_remote = selected_remote
self.show_option_all = show_option_all
self.allow_direct = allow_direct
self.show_url = show_url
def show(self):
_remotes = self.get_remotes()
self.remotes = list(_remotes.keys())
if not self.remotes:
self.window.show_quick_panel(["There are no remotes available."], None)
return
if self.allow_direct and len(self.remotes) == 1:
self.on_remote_selection(0)
return
if self.show_option_all and len(self.remotes) > 1:
self.remotes.insert(0, "All remotes.")
# We don't use the GitCommand.last_remote_used property because we don't want default values
last_remote_used = self._last_remotes_used.get(self.repo_path)
if last_remote_used in self.remotes:
pre_selected_index = self.remotes.index(last_remote_used)
else:
pre_selected_index = 0
self.window.show_quick_panel(
[[remote, _remotes[remote]] for remote in self.remotes] if self.show_url else self.remotes,
self.on_remote_selection,
flags=sublime.MONOSPACE_FONT,
selected_index=pre_selected_index
)
def on_remote_selection(self, index):
if index == -1:
self.on_done(None)
elif self.show_option_all and len(self.remotes) > 1 and index == 0:
self.last_remote_used = None
self.on_done(True)
else:
self.remote = self.remotes[index]
self.last_remote_used = self.remote
self.on_done(self.remote)
def show_branch_panel(
on_done,
local_branches_only=False,
remote_branches_only=False,
ignore_current_branch=False,
ask_remote_first=False,
local_branch=None,
selected_branch=None):
"""
Show a quick panel with branches. The callback `on_done(branch)` will
be called when a branch is selected. If the panel is cancelled, `None`
will be passed to `on_done`.
on_done: a callable
ask_remote_first: whether remote should be asked before the branch panel
if `False`. the options will be in forms of `remote/branch`
selected_branch: if `ask_remote_first`, the selected branch will be
`{remote}/{selected_branch}`
"""
bp = BranchPanel(
on_done,
local_branches_only,
remote_branches_only,
ignore_current_branch,
ask_remote_first,
selected_branch)
bp.show()
return bp
class BranchPanel(GitCommand):
def __init__(
self, on_done, local_branches_only=False, remote_branches_only=False,
ignore_current_branch=False, ask_remote_first=False, selected_branch=None):
self.window = sublime.active_window()
self.on_done = on_done
self.local_branches_only = local_branches_only
self.remote_branches_only = True if ask_remote_first else remote_branches_only
self.ignore_current_branch = ignore_current_branch
self.ask_remote_first = ask_remote_first
self.selected_branch = selected_branch
def show(self):
if self.ask_remote_first:
show_remote_panel(
lambda remote: sublime.set_timeout_async(
lambda: self.on_remote_selection(remote), 100))
else:
self.select_branch(remote=None)
def on_remote_selection(self, remote):
if not remote:
return
self.select_branch(remote)
def select_branch(self, remote=None):
if self.local_branches_only:
self.all_branches = [b.name_with_remote for b in self.get_branches() if not b.remote]
elif self.remote_branches_only:
self.all_branches = [b.name_with_remote for b in self.get_branches() if b.remote]
else:
self.all_branches = [b.name_with_remote for b in self.get_branches()]
if self.ignore_current_branch:
current_branch = self.get_current_branch_name()
self.all_branches = [b for b in self.all_branches if b != current_branch]
elif self.selected_branch is None:
self.selected_branch = self.get_current_branch_name()
if remote:
self.all_branches = [b for b in self.all_branches if b.startswith(remote + "/")]
if not self.all_branches:
self.window.show_quick_panel(["There are no branches available."], None)
return
if self.selected_branch:
selected_index = self.get_pre_selected_branch_index(self.selected_branch, remote)
else:
selected_index = 0
self.window.show_quick_panel(
self.all_branches,
self.on_branch_selection,
flags=sublime.MONOSPACE_FONT,
selected_index=selected_index
)
def get_pre_selected_branch_index(self, selected_branch, remote):
if remote:
branch_candidates = ["{}/{}".format(remote, selected_branch), selected_branch]
else:
branch_candidates = [selected_branch]
for candidate in branch_candidates:
try:
return self.all_branches.index(candidate)
except ValueError:
pass
else:
return 0
def on_branch_selection(self, index):
if index == -1:
self.branch = None
else:
self.branch = self.all_branches[index]
self.on_done(self.branch)
def show_paginated_panel(items, on_done, **kwargs):
"""
A version of QuickPanel which supports pagination.
"""
_kwargs = {}
for option in ['flags', 'selected_index', 'on_highlight', 'limit', 'format_item',
'next_page_message', 'empty_page_message', 'last_page_empty_message',
'status_message']:
if option in kwargs:
_kwargs[option] = kwargs[option]
pp = PaginatedPanel(items, on_done, **_kwargs)
pp.show()
return pp
class PaginatedPanel:
"""
Display items in quick panel with pagination, and execute on_done
when item is selected.
items: can be either a list or a generator.
on_done: a callback will take one argument
limit: the number of items per page
selected_index: an integer or a callable returning boolean.
If callable, takes either an integer or an entry.
on_highlight: a callable, takes either an integer or an entry.
format_item: a function to format each item
next_page_message: a message of next page, default is ">>> NEXT PAGE >>>"
empty_page_message: a message to show when the first page is empty.
last_page_empty_message: a message to show when the last page is empty. It is
less confusing to inform user than to show nothing.
status_message: a message to display at statusbar while loading the entries.
If the elements are tuples of the form `(value1, value2)`,
`value1` would be displayed via quick panel and `value2` will be passed to
`on_done`, `selected_index` and `on_highlight`.
Furthermore, if the quick panel is cancelled, `None` will be passed to `on_done`.
"""
flags = sublime.MONOSPACE_FONT | sublime.KEEP_OPEN_ON_FOCUS_LOST
next_page_message = ">>> NEXT PAGE >>>"
empty_page_message = None
last_page_empty_message = ">>> LAST PAGE >>>"
status_message = None
limit = 6000
selected_index = None
on_highlight = None
def __init__(self, items, on_done, **kwargs):
self._is_empty = True
self._is_done = False
self._empty_message_shown = False
self.skip = 0
self.item_generator = (item for item in items)
self.on_done = on_done
for option in kwargs:
setattr(self, option, kwargs[option])
def load_next_batch(self):
self.display_list = []
self.ret_list = []
for item in itertools.islice(self.item_generator, self.limit):
self.extract_item(item)
if self.ret_list and len(self.ret_list) != len(self.display_list):
raise Exception("the lengths of display_list and ret_list are different.")
def extract_item(self, item):
item = self.format_item(item)
if type(item) is tuple and len(item) == 2:
self.display_list.append(item[0])
self.ret_list.append(item[1])
else:
self.display_list.append(item)
def format_item(self, item):
return item
def show(self):
if self.status_message:
sublime.active_window().status_message(self.status_message)
try:
self.load_next_batch()
finally:
if self.status_message:
sublime.active_window().status_message("")
if len(self.display_list) == self.limit:
self.display_list.append(self.next_page_message)
self._is_empty = False
elif len(self.display_list) == 0:
if self._is_empty:
# first page but empty
if self.empty_page_message:
self.display_list.append(self.empty_page_message)
else:
# last page but empty
if self.last_page_empty_message:
self.display_list.append(self.last_page_empty_message)
self._is_done = True
self._empty_message_shown = True
else:
self._is_empty = False
self._is_done = True
kwargs = {}
if self.flags:
kwargs["flags"] = self.flags
selected_index = self.get_selected_index()
if selected_index:
kwargs["selected_index"] = selected_index
if self.on_highlight:
kwargs["on_highlight"] = self._on_highlight
if self.display_list:
sublime.active_window().show_quick_panel(
self.display_list,
self._on_selection,
**kwargs
)
def get_selected_index(self):
if callable(self.selected_index):
for idx, entry in enumerate(self.ret_list):
if self.selected_index(entry):
return idx
elif self.selected_index and self.skip <= self.selected_index < self.skip + self.limit:
return self.selected_index - self.skip
def _on_highlight(self, index):
if self._empty_message_shown:
return
if index == self.limit or index == -1:
return
elif self.ret_list:
self.on_highlight(self.ret_list[index])
else:
self.on_highlight(self.skip + index)
def _on_selection(self, index):
if self._empty_message_shown:
return
if index == self.limit:
self.skip = self.skip + self.limit
sublime.set_timeout_async(self.show, 10)
elif self.ret_list:
if index == -1:
self.on_selection(None)
else:
self.on_selection(self.ret_list[index])
else:
if index == -1:
self.on_selection(-1)
else:
self.on_selection(self.skip + index)
def on_selection(self, value):
self.value = value
self.on_done(value)
def is_empty(self):
return self._is_empty
def is_done(self):
return self._is_done
def show_log_panel(entries, on_done, **kwargs):
"""
Display log entries in quick panel with pagination, and execute on_done(commit)
when item is selected. `entries` can be either a list or a generator of LogEnty.
"""
_kwargs = {}
for option in ['selected_index', 'limit', 'on_highlight']:
if option in kwargs:
_kwargs[option] = kwargs[option]
lp = LogPanel(entries, on_done, **_kwargs)
lp.show()
return lp
def short_ref(ref):
def simplify(r):
if r.startswith('HEAD -> '):
return '{}*'.format(r[8:])
if r.startswith('tag: '):
return r[5:]
return r
def remote_diverged_from_local(refs, r):
try:
a, b = r.split('/', 1)
except ValueError:
return True
else:
return False if b in refs else True
if not ref:
return ''
refs = ref.split(', ')
refs = [simplify(r) for r in refs]
refs = [r for r in refs if remote_diverged_from_local(refs, r)]
refs = ["|{}|".format(r) for r in refs]
return ' '.join(refs)
filter_ = partial(filter, None)
class LogPanel(PaginatedPanel):
def format_item(self, entry):
return (
[
" ".join(filter_((entry.short_hash, short_ref(entry.ref), entry.summary))),
", ".join(filter_((entry.author, util.dates.fuzzy(entry.datetime)))),
],
entry.long_hash
)
@property
def next_page_message(self):
return [">>> NEXT {} COMMITS >>>".format(self.limit),
"Skip this set of commits and choose from the next-oldest batch."]
def on_selection(self, commit):
self.commit = commit
sublime.set_timeout_async(lambda: self.on_selection_async(commit), 10)
def on_selection_async(self, commit):
self.on_done(commit)
def show_stash_panel(on_done, **kwargs):
"""
Display stash entries in quick panel with pagination, and execute on_done(stash)
when item is selected.
"""
sp = StashPanel(on_done, **kwargs)
sp.show()
return sp
class StashPanel(PaginatedPanel, GitCommand):
empty_page_message = "There are no stashes available."
def __init__(self, on_done, **kwargs):
self.window = sublime.active_window()
super().__init__(self.get_stashes(), on_done, **kwargs)
def format_item(self, entry):
return (entry.id + " " + entry.description, entry.id)
| {
"repo_name": "divmain/GitSavvy",
"path": "core/ui_mixins/quick_panel.py",
"copies": "1",
"size": "18947",
"license": "mit",
"hash": 7949672426559235000,
"line_mean": 31.8370883882,
"line_max": 103,
"alpha_frac": 0.5927059693,
"autogenerated": false,
"ratio": 3.90659793814433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.499930390744433,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import itertools
import urlparse
import json
import collections
from flask import current_app
from flask.ext.wtf import Form
from wtforms import Form as InsecureForm
from wtforms import (TextField, DateField, DecimalField, IntegerField,
SelectField, SelectMultipleField, FieldList, FormField)
from wtforms.widgets import (TextInput, ListWidget, html_params, HTMLString,
CheckboxInput, Select, TextArea)
from wtforms.validators import (DataRequired, Optional, URL, Email, Length,
NumberRange, ValidationError, StopValidation)
from flask.ext.babel import lazy_gettext as _, gettext
from babel.support import LazyProxy
from ispformat.validator import validate_geojson
from .constants import STEPS
from .models import ISP
from .utils import check_geojson_spatialite, filesize_fmt
class InputListWidget(ListWidget):
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
html = ['<%s %s>' % (self.html_tag, html_params(**kwargs))]
for subfield in field:
html.append('<li>%s</li>' % (subfield()))
html.append('</%s>' % self.html_tag)
return HTMLString(''.join(html))
class MultiCheckboxField(SelectMultipleField):
"""
A multiple-select, except displays a list of checkboxes.
Iterating the field will produce subfields, allowing custom rendering of
the enclosed checkbox fields.
"""
widget = ListWidget(prefix_label=False)
option_widget = CheckboxInput()
class MyFormField(FormField):
@property
def flattened_errors(self):
return list(itertools.chain.from_iterable(self.errors.values()))
class GeoJSONField(TextField):
widget = TextArea()
def process_formdata(self, valuelist):
if valuelist and valuelist[0]:
max_size = current_app.config['ISP_FORM_GEOJSON_MAX_SIZE']
if len(valuelist[0]) > max_size:
raise ValueError(_(u'JSON value too big, must be less than %(max_size)s',
max_size=filesize_fmt(max_size)))
try:
self.data = json.loads(valuelist[0], object_pairs_hook=collections.OrderedDict)
except Exception as e:
raise ValueError(_(u'Not a valid JSON value'))
elif valuelist and valuelist[0].strip() == '':
self.data = None # if an empty string was passed, set data as None
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return json.dumps(self.data)
else:
return ''
def pre_validate(self, form):
if self.data is not None:
if not validate_geojson(self.data):
raise StopValidation(_(u'Invalid GeoJSON, please check it'))
if not check_geojson_spatialite(self.data):
current_app.logger.error('Spatialite could not decode the following GeoJSON: %s', self.data)
raise StopValidation(_(u'Unable to store GeoJSON in database'))
class Unique(object):
""" validator that checks field uniqueness """
def __init__(self, model, field, message=None, allow_edit=False):
self.model = model
self.field = field
if not message:
message = _(u'this element already exists')
self.message = message
def __call__(self, form, field):
default = field.default() if callable(field.default) else field.default
if field.object_data != default and field.object_data == field.data:
return
check = self.model.query.filter(self.field == field.data).first()
if check:
raise ValidationError(self.message)
TECHNOLOGIES_CHOICES = (
('ftth', _('FTTH')),
('dsl', _('DSL')),
('wifi', _('Wi-Fi')),
)
class CoveredArea(InsecureForm):
name = TextField(_(u'name'), widget=partial(TextInput(), class_='input-medium', placeholder=_(u'Area')))
technologies = SelectMultipleField(_(u'technologies'), choices=TECHNOLOGIES_CHOICES,
widget=partial(Select(True), **{'class': 'selectpicker', 'data-title': _(u'Technologies deployed')}))
area = GeoJSONField(_('area'), widget=partial(TextArea(), class_='geoinput'))
def validate(self, *args, **kwargs):
r = super(CoveredArea, self).validate(*args, **kwargs)
if bool(self.name.data) != bool(self.technologies.data):
self._fields['name'].errors += [_(u'You must fill both fields')]
r = False
return r
class OtherWebsites(InsecureForm):
name = TextField(_(u'name'), widget=partial(TextInput(), class_='input-small', placeholder=_(u'Name')))
url = TextField(_(u'url'), widget=partial(TextInput(), class_='input-medium', placeholder=_(u'URL')),
validators=[Optional(), URL(require_tld=True)])
STEP_CHOICES = [(k, LazyProxy(lambda k, s: u'%u - %s' % (k, s), k, STEPS[k], enable_cache=False)) for k in STEPS]
class ProjectForm(Form):
name = TextField(_(u'full name'), description=[_(u'E.g. French Data Network')],
validators=[DataRequired(), Length(min=2), Unique(ISP, ISP.name)])
shortname = TextField(_(u'short name'), description=[_(u'E.g. FDN')],
validators=[Optional(), Length(min=2, max=15), Unique(ISP, ISP.shortname)])
description = TextField(_(u'description'), description=[None, _(u'Short text describing the project')])
logo_url = TextField(_(u'logo url'), validators=[Optional(), URL(require_tld=True)])
website = TextField(_(u'website'), validators=[Optional(), URL(require_tld=True)])
other_websites = FieldList(MyFormField(OtherWebsites, widget=partial(InputListWidget(), class_='formfield')),
min_entries=1, widget=InputListWidget(),
description=[None, _(u'Additional websites that you host (e.g. wiki, etherpad...)')])
contact_email = TextField(_(u'contact email'), validators=[Optional(), Email()],
description=[None, _(u'General contact email address')])
main_ml = TextField(_(u'main mailing list'), validators=[Optional(), Email()],
description=[None, _(u'Address of your main mailing list')])
creation_date = DateField(_(u'creation date'), validators=[Optional()], widget=partial(TextInput(), placeholder=_(u'YYYY-mm-dd')),
description=[None, _(u'Date at which the legal structure for your project was created')])
chatrooms = FieldList(TextField(_(u'chatrooms')), min_entries=1, widget=InputListWidget(),
description=[None, _(u'In URI form, e.g. <code>irc://irc.isp.net/#isp</code> or ' +
'<code>xmpp:isp@chat.isp.net?join</code>')])
covered_areas = FieldList(MyFormField(CoveredArea, _('Covered Areas'), widget=partial(InputListWidget(), class_='formfield')),
min_entries=1, widget=InputListWidget(),
description=[None, _(u'Descriptive name of the covered areas and technologies deployed')])
latitude = DecimalField(_(u'latitude'), validators=[Optional(), NumberRange(min=-90, max=90)],
description=[None, _(u'Coordinates of your registered office or usual meeting location. '
'<strong>Required in order to appear on the map.</strong>')])
longitude = DecimalField(_(u'longitude'), validators=[Optional(), NumberRange(min=-180, max=180)])
step = SelectField(_(u'progress step'), choices=STEP_CHOICES, coerce=int)
member_count = IntegerField(_(u'members'), validators=[Optional(), NumberRange(min=0)],
description=[None, _('Number of members')])
subscriber_count = IntegerField(_(u'subscribers'), validators=[Optional(), NumberRange(min=0)],
description=[None, _('Number of subscribers to an internet access')])
tech_email = TextField(_('Email'), validators=[Email(), DataRequired()], description=[None,
_('Technical contact, in case of problems with your submission')])
def validate(self, *args, **kwargs):
r = super(ProjectForm, self).validate(*args, **kwargs)
if (self.latitude.data is None) != (self.longitude.data is None):
self._fields['longitude'].errors += [_(u'You must fill both fields')]
r = False
return r
def validate_covered_areas(self, field):
if len(filter(lambda e: e['name'], field.data)) == 0:
# not printed, whatever..
raise ValidationError(_(u'You must specify at least one area'))
geojson_size = sum([len(ca.area.raw_data[0]) for ca in self.covered_areas if ca.area.raw_data])
max_size = current_app.config['ISP_FORM_GEOJSON_MAX_SIZE_TOTAL']
if geojson_size > max_size:
# TODO: XXX This is not printed !
raise ValidationError(gettext(u'The size of all GeoJSON data combined must not exceed %(max_size)s',
max_size=filesize_fmt(max_size)))
def to_json(self, json=None):
if json is None:
json = {}
json['name'] = self.name.data
def optstr(k, v):
if k in json or v:
json[k] = v
def optlist(k, v):
if k in json or len(v):
json[k] = v
def transform_covered_areas(cas):
for ca in cas:
if not ca['name']:
continue
if 'area' in ca and ca['area'] is None:
del ca['area']
yield ca
optstr('shortname', self.shortname.data)
optstr('description', self.description.data)
optstr('logoURL', self.logo_url.data)
optstr('website', self.website.data)
optstr('otherWebsites', dict(((w['name'], w['url']) for w in self.other_websites.data if w['name'])))
optstr('email', self.contact_email.data)
optstr('mainMailingList', self.main_ml.data)
optstr('creationDate', self.creation_date.data.isoformat() if self.creation_date.data else None)
optstr('progressStatus', self.step.data)
optstr('memberCount', self.member_count.data)
optstr('subscriberCount', self.subscriber_count.data)
optlist('chatrooms', filter(bool, self.chatrooms.data)) # remove empty strings
optstr('coordinates', {'latitude': self.latitude.data, 'longitude': self.longitude.data}
if self.latitude.data else {})
optlist('coveredAreas', list(transform_covered_areas(self.covered_areas.data)))
return json
@classmethod
def edit_json(cls, isp):
json = isp.json
obj = type('abject', (object,), {})()
def set_attr(attr, itemk=None, d=json):
if itemk is None:
itemk = attr
if itemk in d:
setattr(obj, attr, d[itemk])
set_attr('name')
set_attr('shortname')
set_attr('description')
set_attr('logo_url', 'logoURL')
set_attr('website')
set_attr('contact_email', 'email')
set_attr('main_ml', 'mainMailingList')
set_attr('creation_date', 'creationDate')
if hasattr(obj, 'creation_date'):
obj.creation_date = ISP.str2date(obj.creation_date)
set_attr('step', 'progressStatus')
set_attr('member_count', 'memberCount')
set_attr('subscriber_count', 'subscriberCount')
set_attr('chatrooms', 'chatrooms')
if 'coordinates' in json:
set_attr('latitude', d=json['coordinates'])
set_attr('longitude', d=json['coordinates'])
if 'otherWebsites' in json:
setattr(obj, 'other_websites', [{'name': n, 'url': w} for n, w in json['otherWebsites'].iteritems()])
set_attr('covered_areas', 'coveredAreas')
obj.tech_email = isp.tech_email
return cls(obj=obj)
class URLField(TextField):
def _value(self):
if isinstance(self.data, basestring):
return self.data
elif self.data is None:
return ''
else:
return urlparse.urlunsplit(self.data)
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = urlparse.urlsplit(valuelist[0])
except:
self.data = None
raise ValidationError(_(u'Invalid URL'))
def is_url_unique(url):
if isinstance(url, basestring):
url = urlparse.urlsplit(url)
t = list(url)
t[2] = ''
u1 = urlparse.urlunsplit(t)
t[0] = 'http' if t[0] == 'https' else 'https'
u2 = urlparse.urlunsplit(t)
if ISP.query.filter(ISP.json_url.startswith(u1) | ISP.json_url.startswith(u2)).count() > 0:
return False
return True
class ProjectJSONForm(Form):
json_url = URLField(_(u'base url'), description=[_(u'E.g. https://isp.com/'),
_(u'A ressource implementing our JSON-Schema specification ' +
'must exist at path /isp.json')])
tech_email = TextField(_(u'Email'), validators=[Email()], description=[None,
_(u'Technical contact, in case of problems')])
def validate_json_url(self, field):
if not field.data.netloc:
raise ValidationError(_(u'Invalid URL'))
if field.data.scheme not in ('http', 'https'):
raise ValidationError(_(u'Invalid URL (must be HTTP(S))'))
if not field.object_data and not is_url_unique(field.data):
raise ValidationError(_(u'This URL is already in our database'))
class RequestEditToken(Form):
tech_email = TextField(_(u'Tech Email'), validators=[Email()], description=[None,
_(u'The Technical contact you provided while registering')])
| {
"repo_name": "Psycojoker/ffdn-db",
"path": "ffdnispdb/forms.py",
"copies": "1",
"size": "14004",
"license": "bsd-3-clause",
"hash": 1072523343347227400,
"line_mean": 44.0289389068,
"line_max": 140,
"alpha_frac": 0.6007569266,
"autogenerated": false,
"ratio": 4.038062283737024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5138819210337023,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import itertools
import numpy as np
import pandas._libs.algos as _algos
import pandas._libs.reshape as _reshape
from pandas._libs.sparse import IntIndex
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int, is_bool_dtype, is_extension_array_dtype,
is_integer_dtype, is_list_like, is_object_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import notna
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import _factorize_from_iterable
from pandas.core.frame import DataFrame
from pandas.core.index import Index, MultiIndex
from pandas.core.internals.arrays import extract_array
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index, decons_obs_group_ids, get_compressed_ids,
get_group_index)
class _Unstacker:
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
values : ndarray
Values of DataFrame to "Unstack"
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
value_columns : Index, optional
Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame or SparseDataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None, constructor=None):
if values.ndim == 1:
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
constructor = DataFrame
self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise an error before this happens
num_rows = np.max([index_level.size for index_level
in self.new_index_levels])
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError('Unstacked DataFrame is too big, '
'causing int32 overflow')
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
codes = list(self.index.codes)
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1:] + [codes[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
values, _ = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
return self.constructor(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{name}".format(name=name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, lev._na_value)
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level_full,)
new_names = self.value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator)
for lab in self.value_columns.codes]
else:
new_levels = [self.value_columns, self.removed_level_full]
new_names = [self.value_columns.name, self.removed_name]
new_codes = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new codes to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_codes.append(np.tile(repeater, width))
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_codes = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_codes[0]
if (lab == -1).any():
lev = lev.insert(len(lev), lev._na_value)
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, codes=result_codes,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
ccodes = [index.codes[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rcodes = [index.codes[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(ccodes, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes,
xnull=False)
if rlocs == []:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name='__placeholder__')
else:
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
codes=rcodes + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_codes = recons_codes
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_codes = [unstcols.codes[0]]
for rec in recons_codes:
new_codes.append(rec.take(unstcols.codes[-1]))
new_columns = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
if is_extension_array_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value,
constructor=obj._constructor_expanddim)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = partial(_Unstacker, index=obj.index,
level=level, fill_value=fill_value)
blocks = obj._data.unstack(unstacker,
fill_value=fill_value)
return obj._constructor(blocks)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value,
constructor=obj._constructor)
return unstacker.get_result()
def _unstack_extension_series(series, level, fill_value):
"""
Unstack an ExtensionArray-backed Series.
The ExtensionDtype is preserved.
Parameters
----------
series : Series
A Series with an ExtensionArray for values
level : Any
The level name or number.
fill_value : Any
The user-level (not physical storage) fill value to use for
missing values introduced by the reshape. Passed to
``series.values.take``.
Returns
-------
DataFrame
Each column of the DataFrame will have the same dtype as
the input Series.
"""
# Implementation note: the basic idea is to
# 1. Do a regular unstack on a dummy array of integers
# 2. Followup with a columnwise take.
# We use the dummy take to discover newly-created missing values
# introduced by the reshape.
from pandas.core.reshape.concat import concat
dummy_arr = np.arange(len(series))
# fill_value=-1, since we will do a series.values.take later
result = _Unstacker(dummy_arr, series.index,
level=level, fill_value=-1).get_result()
out = []
values = extract_array(series, extract_numpy=False)
for col, indices in result.iteritems():
out.append(Series(values.take(indices.values,
allow_fill=True,
fill_value=fill_value),
name=col, index=result.index))
return concat(out, axis='columns', copy=False, keys=result.columns)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
if frame._is_homogeneous_type:
# For homogeneous EAs, frame.values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes.values)
dtype = dtypes[0]
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type([
col._values for _, col in frame.iteritems()
])
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame.values.ravel()
else:
# non-homogeneous
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(level_codes) for lev, level_codes
in zip(this.columns.levels[:-1],
this.columns.codes[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_codes = sorted(set(this.columns.codes[-1]))
level_vals_used = level_vals[level_codes]
levsize = len(level_codes)
drop_cols = []
for key in unique_groups:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.codes[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if (frame._is_homogeneous_type and
is_extension_array_dtype(frame.dtypes.iloc[0])):
dtype = this[this.columns[loc]].dtypes.iloc[0]
subset = this[this.columns[loc]]
value_slice = dtype.construct_array_type()._concat_same_type(
[x._values for _, x in subset.iteritems()]
)
N, K = this.shape
idx = np.arange(N * K).reshape(K, N).T.ravel()
value_slice = value_slice.take(idx)
elif frame._is_mixed_type:
value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()
new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
new_levels = [this.index]
new_codes = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_codes.append(np.tile(level_codes, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False, dtype=None):
"""
Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
Dummy-coded data.
See Also
--------
Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
dtypes_to_encode = ['object', 'category']
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(
include=dtypes_to_encode)
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = len_msg.format(name=name, len_item=len(item),
len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, str):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, str):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False, dtype=None):
from pandas.core.reshape.concat import concat
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_frame(data):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
return DataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_frame(data)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_frame(data)
number_of_cols = len(levels)
if prefix is None:
dummy_cols = levels
else:
# PY2 embedded unicode, gh-22084
def _make_col_name(prefix, prefix_sep, level):
fstr = '{prefix}{prefix_sep}{level}'
return fstr.format(prefix=prefix,
prefix_sep=prefix_sep,
level=level)
dummy_cols = [_make_col_name(prefix, prefix_sep, level)
for level in levels]
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
sparse_series = []
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
mask = codes != -1
codes = codes[mask]
n_idx = np.arange(N)[mask]
for ndx, code in zip(n_idx, codes):
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs),
fill_value=fill_value,
dtype=dtype)
sparse_series.append(Series(data=sarr, index=index, name=col))
out = concat(sparse_series, axis=1, copy=False)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
codes = frame.index.codes[num]
if transform is not None:
mapped_items = items.map(transform)
codes, items = _factorize_from_iterable(mapped_items.take(codes))
values = np.eye(len(items), dtype=float)
values = values.take(codes, axis=0)
return DataFrame(values, columns=items, index=frame.index)
def _reorder_for_extension_array_stack(arr, n_rows, n_columns):
"""
Re-orders the values when stacking multiple extension-arrays.
The indirect stacking method used for EAs requires a followup
take to get the order correct.
Parameters
----------
arr : ExtensionArray
n_rows, n_columns : int
The number of rows and columns in the original DataFrame.
Returns
-------
taken : ExtensionArray
The original `arr` with elements re-ordered appropriately
Examples
--------
>>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
>>> _reorder_for_extension_array_stack(arr, 3, 2)
array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
"""
# final take to get the order correct.
# idx is an indexer like
# [c0r0, c1r0, c2r0, ...,
# c0r1, c1r1, c2r1, ...]
idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()
return arr.take(idx)
| {
"repo_name": "cbertinato/pandas",
"path": "pandas/core/reshape/reshape.py",
"copies": "1",
"size": "36241",
"license": "bsd-3-clause",
"hash": -5374240042623319000,
"line_mean": 33.8806544755,
"line_max": 79,
"alpha_frac": 0.5767776827,
"autogenerated": false,
"ratio": 3.8603536429484446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49371313256484445,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import json
from os.path import join
from . import base
from buildercore import core, utils, project
from unittest import skip
from mock import patch
class SimpleCases(base.BaseCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_hostname_struct_no_subdomain(self):
expected = {
'domain': "example.org",
'int_domain': "example.internal",
'subdomain': None,
'project_hostname': None,
'int_project_hostname': None,
'hostname': None,
'full_hostname': None,
'int_full_hostname': None,
}
stackname = 'dummy1--test'
self.assertEqual(core.hostname_struct(stackname), expected)
def test_hostname_struct_with_subdomain(self):
expected = {
'domain': "example.org",
'int_domain': "example.internal",
'subdomain': 'dummy2',
'hostname': 'ci--dummy2',
'project_hostname': 'dummy2.example.org',
'int_project_hostname': 'dummy2.example.internal',
'full_hostname': 'ci--dummy2.example.org',
'int_full_hostname': 'ci--dummy2.example.internal',
'ext_node_hostname': 'ci--dummy2--%s.example.org',
'int_node_hostname': 'ci--dummy2--%s.example.internal',
}
stackname = 'dummy2--ci'
self.assertEqual(core.hostname_struct(stackname), expected)
def test_project_name_from_stackname(self):
expected = [
('elife-bot--2015-04-29', 'elife-bot'),
('elife-bot--2015-05-11v2', 'elife-bot'),
('elife-bot--large-gnott-again-2015-12-13', 'elife-bot'),
('elife-dashboard--2016-01-26', 'elife-dashboard'),
('elife-lax--2015-10-15-develop', 'elife-lax'),
('elife-metrics--2015-09-25', 'elife-metrics'),
('elife-metrics--prod-candidate', 'elife-metrics'),
('master-server--2014-12-24', 'master-server'),
]
self.assertAllPairsEqual(core.project_name_from_stackname, expected)
def test_parse_stackname(self):
# basic
expected = [
('lax--prod', ['lax', 'prod']),
('lax--prod--1', ['lax', 'prod--1']), # is this really what we're expecting?
('journal-cms--end2end', ['journal-cms', 'end2end']),
('journal-cms--end2end--2', ['journal-cms', 'end2end--2']), # again, really?
]
self.assertAllPairsEqual(core.parse_stackname, expected)
# extended
expected = [
('lax--prod', ['lax', 'prod']),
('lax--prod--1', ['lax', 'prod', '1']),
('journal-cms--end2end', ['journal-cms', 'end2end']),
('journal-cms--end2end--2', ['journal-cms', 'end2end', '2']),
]
self.assertAllPairsEqual(partial(core.parse_stackname, all_bits=True), expected)
# as dict
expected = [
('lax--prod', {"project_name": 'lax', "instance_id": 'prod'}),
('lax--prod--1', {"project_name": 'lax', "instance_id": 'prod', "cluster_id": '1'}),
('journal-cms--end2end', {"project_name": 'journal-cms', "instance_id": 'end2end'}),
('journal-cms--end2end--2', {"project_name": 'journal-cms', "instance_id": 'end2end', "cluster_id": '2'}),
]
self.assertAllPairsEqual(partial(core.parse_stackname, all_bits=True, idx=True), expected)
def test_master_server_stackname(self):
self.assertTrue(core.is_master_server_stack('master-server--temp'))
self.assertFalse(core.is_master_server_stack('master-some-project--end2end'))
self.assertFalse(core.is_master_server_stack('lax--end2end'))
def test_bad_pname_from_stackname(self):
expected_error = [
# project names by themselves. a stackname must be projectname + instance_id
'elife-lax',
# master server isn't special here
'master-server',
'asdf', # project name that doesn't exist
# just bad values
'', None, -1,
]
for expected in expected_error:
self.assertRaises(ValueError, core.project_name_from_stackname, expected)
def test_master_server_identified(self):
true_cases = [
'master-server--master',
'master-server--2016-01-01',
'master-server--master--ci',
]
results = list(map(core.is_master_server_stack, true_cases))
self.assertTrue(all(results), "not all master servers identified: %r" % list(zip(true_cases, results)))
def test_master_server_identified_false_cases(self):
false_cases = [
'master-server', # *stack* names not project names
'', None, 123, {}, [], self
]
results = list(map(core.is_master_server_stack, false_cases))
self.assertFalse(all(results), "not all false cases identified: %r" % list(zip(false_cases, results)))
def test_find_region(self):
self.assertEqual(core.find_region(), "us-east-1")
def test_find_region_when_more_than_one_is_available(self):
try:
base.switch_in_test_settings([
'src/tests/fixtures/projects/dummy-project.yaml',
'src/tests/fixtures/additional-projects/dummy-project-eu.yaml',
])
core.find_region()
self.fail("Shouldn't be able to choose a region")
except core.MultipleRegionsError as e:
self.assertCountEqual(["us-east-1", "eu-central-1"], e.regions())
finally:
base.switch_out_test_settings()
def test_find_ec2_instances(self):
self.assertEqual([], core.find_ec2_instances('dummy1--prod', allow_empty=True))
def test_find_ec2_instances_requiring_a_non_empty_list(self):
self.assertRaises(core.NoRunningInstances, core.find_ec2_instances, 'dummy1--prod', allow_empty=False)
def test_all_sns_subscriptions_filters_correctly(self):
cases = [
('lax--prod', []), # lax doesn't subscribe to anything
('observer--prod', ['bus-articles--prod', 'bus-metrics--prod']),
]
fixture = json.load(open(join(self.fixtures_dir, 'sns_subscriptions.json'), 'r'))
with patch('buildercore.core._all_sns_subscriptions', return_value=fixture):
for stackname, expected_subs in cases:
res = core.all_sns_subscriptions('someregion', stackname)
actual_subs = [sub['Topic'] for sub in res]
#self.assertItemsEqual(expected_subs, actual_subs)
# https://bugs.python.org/issue17866
self.assertCountEqual(expected_subs, actual_subs)
class Errors(base.BaseCase):
@patch('buildercore.core.stack_data')
def test_no_running_instances_found(self, stack_data):
stack_data.return_value = []
self.assertEqual(
core.stack_all_ec2_nodes('dummy1--test', lambda: True),
{}
)
@patch('buildercore.core.stack_data')
def test_no_public_ips_available(self, stack_data):
stack_data.return_value = [
{'InstanceId': 'i-1', 'PublicIpAddress': None, 'Tags': []},
]
self.assertRaises(
core.NoPublicIps,
core.stack_all_ec2_nodes, 'dummy1--test', lambda: True
)
class TestCoreNewProjectData(base.BaseCase):
def setUp(self):
self.dummy1_config = join(self.fixtures_dir, 'dummy1-project.json')
self.dummy2_config = join(self.fixtures_dir, 'dummy2-project.json')
self.dummy3_config = join(self.fixtures_dir, 'dummy3-project.json')
def tearDown(self):
pass
def test_configurations(self):
expected = [
('dummy1', self.dummy1_config),
('dummy2', self.dummy2_config),
('dummy3', self.dummy3_config),
]
for pname, expected_path in expected:
expected_data = json.load(open(expected_path, 'r'))
project_data = project.project_data(pname)
project_data = utils.remove_ordereddict(project_data)
self.assertEqual(expected_data, project_data)
# snippets
@skip("depends on old project config generation")
def test_merge_default_snippet(self):
"merging a snippet into the defaults ensures all projects get that new default"
# all projects now get 999 cpus. perfectly sane requirement.
project_data = project.project_data('dummy1')
project_data = utils.remove_ordereddict(project_data)
expected_data = json.load(open(self.dummy1_config, 'r'))
expected_data['vagrant']['cpus'] = 999
self.assertEqual(project_data, expected_data)
@skip("depends on old project config generation")
def test_merge_multiple_default_snippets(self):
"""merging multiple overlapping snippets into the defaults
ensures all projects get the new defaults"""
# all projects now get 999 cpus. perfectly sane requirement.
project_data = project.project_data('dummy1')
project_data = utils.remove_ordereddict(project_data)
expected_data = json.load(open(self.dummy1_config, 'r'))
expected_data['vagrant']['cpus'] = 999
expected_data['vagrant']['cpucap'] = 111
self.assertEqual(project_data, expected_data)
| {
"repo_name": "elifesciences/builder",
"path": "src/tests/test_buildercore_core.py",
"copies": "1",
"size": "9386",
"license": "mit",
"hash": 2394419617615650000,
"line_mean": 41.0896860987,
"line_max": 118,
"alpha_frac": 0.5950351587,
"autogenerated": false,
"ratio": 3.723125743752479,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4818160902452479,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import json
from warnings import warn
import numpy as np
from pandas import Series, DataFrame
from pandas.core.indexing import _NDFrameIndexer
from pandas.util.decorators import cache_readonly
import pyproj
from shapely.geometry import box, shape, Polygon, Point
from shapely.geometry.collection import GeometryCollection
from shapely.geometry.base import BaseGeometry
from shapely.ops import transform
from geopandas.plotting import plot_series
from geopandas.base import GeoPandasBase
OLD_PANDAS = issubclass(Series, np.ndarray)
def _is_empty(x):
try:
return x.is_empty
except:
return False
def _convert_array_args(args):
if len(args) == 1 and isinstance(args[0], BaseGeometry):
args = ([args[0]],)
return args
class _CoordinateIndexer(_NDFrameIndexer):
""" Indexing by coordinate slices """
def _getitem_tuple(self, tup):
obj = self.obj
xs, ys = tup
# handle numeric values as x and/or y coordinate index
if type(xs) is not slice:
xs = slice(xs, xs)
if type(ys) is not slice:
ys = slice(ys, ys)
# don't know how to handle step; should this raise?
if xs.step is not None or ys.step is not None:
warn("Ignoring step - full interval is used.")
xmin, ymin, xmax, ymax = obj.total_bounds
bbox = box(xs.start or xmin,
ys.start or ymin,
xs.stop or xmax,
ys.stop or ymax)
idx = obj.intersects(bbox)
return obj[idx]
class GeoSeries(GeoPandasBase, Series):
"""A Series object designed to store shapely geometry objects."""
_metadata = ['name', 'crs']
def __new__(cls, *args, **kwargs):
kwargs.pop('crs', None)
if OLD_PANDAS:
args = _convert_array_args(args)
arr = Series.__new__(cls, *args, **kwargs)
else:
arr = Series.__new__(cls)
if type(arr) is GeoSeries:
return arr
else:
return arr.view(GeoSeries)
def __init__(self, *args, **kwargs):
if not OLD_PANDAS:
args = _convert_array_args(args)
crs = kwargs.pop('crs', None)
super(GeoSeries, self).__init__(*args, **kwargs)
self.crs = crs
self._invalidate_sindex()
def append(self, *args, **kwargs):
return self._wrapped_pandas_method('append', *args, **kwargs)
@property
def geometry(self):
return self
@classmethod
def from_file(cls, filename, **kwargs):
"""
Alternate constructor to create a GeoSeries from a file
Parameters
----------
filename : str
File path or file handle to read from. Depending on which kwargs
are included, the content of filename may vary, see:
http://toblerity.github.io/fiona/README.html#usage
for usage details.
kwargs : key-word arguments
These arguments are passed to fiona.open, and can be used to
access multi-layer data, data stored within archives (zip files),
etc.
"""
import fiona
geoms = []
with fiona.open(filename, **kwargs) as f:
crs = f.crs
for rec in f:
geoms.append(shape(rec['geometry']))
g = GeoSeries(geoms)
g.crs = crs
return g
@property
def __geo_interface__(self):
"""Returns a GeoSeries as a python feature collection
"""
from geopandas import GeoDataFrame
return GeoDataFrame({'geometry': self}).__geo_interface__
def to_file(self, filename, driver="ESRI Shapefile", **kwargs):
from geopandas import GeoDataFrame
data = GeoDataFrame({"geometry": self,
"id":self.index.values},
index=self.index)
data.crs = self.crs
data.to_file(filename, driver, **kwargs)
#
# Implement pandas methods
#
@property
def _constructor(self):
return GeoSeries
def _wrapped_pandas_method(self, mtd, *args, **kwargs):
"""Wrap a generic pandas method to ensure it returns a GeoSeries"""
val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)
if type(val) == Series:
val.__class__ = GeoSeries
val.crs = self.crs
val._invalidate_sindex()
return val
def __getitem__(self, key):
return self._wrapped_pandas_method('__getitem__', key)
def sort_index(self, *args, **kwargs):
return self._wrapped_pandas_method('sort_index', *args, **kwargs)
def take(self, *args, **kwargs):
return self._wrapped_pandas_method('take', *args, **kwargs)
def select(self, *args, **kwargs):
return self._wrapped_pandas_method('select', *args, **kwargs)
@property
def _can_hold_na(self):
return False
def __finalize__(self, other, method=None, **kwargs):
""" propagate metadata from other to self """
# NOTE: backported from pandas master (upcoming v0.13)
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def copy(self, order='C'):
"""
Make a copy of this GeoSeries object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : GeoSeries
"""
# FIXME: this will likely be unnecessary in pandas >= 0.13
return GeoSeries(self.values.copy(order), index=self.index,
name=self.name).__finalize__(self)
def isnull(self):
"""Null values in a GeoSeries are represented by empty geometric objects"""
non_geo_null = super(GeoSeries, self).isnull()
val = self.apply(_is_empty)
return np.logical_or(non_geo_null, val)
def fillna(self, value=None, method=None, inplace=False,
**kwargs):
"""Fill NA/NaN values with a geometry (empty polygon by default).
"method" is currently not implemented for pandas <= 0.12.
"""
if value is None:
value = Point()
if not OLD_PANDAS:
return super(GeoSeries, self).fillna(value=value, method=method,
inplace=inplace, **kwargs)
else:
# FIXME: this is an ugly way to support pandas <= 0.12
if method is not None:
raise NotImplementedError('Fill method is currently not implemented for GeoSeries')
if isinstance(value, BaseGeometry):
result = self.copy() if not inplace else self
mask = self.isnull()
result[mask] = value
if not inplace:
return GeoSeries(result)
else:
raise ValueError('Non-geometric fill values not allowed for GeoSeries')
def align(self, other, join='outer', level=None, copy=True,
fill_value=None, **kwargs):
if fill_value is None:
fill_value = Point()
left, right = super(GeoSeries, self).align(other, join=join,
level=level, copy=copy,
fill_value=fill_value,
**kwargs)
if isinstance(other, GeoSeries):
return GeoSeries(left), GeoSeries(right)
else: # It is probably a Series, let's keep it that way
return GeoSeries(left), right
def __contains__(self, other):
"""Allow tests of the form "geom in s"
Tests whether a GeoSeries contains a geometry.
Note: This is not the same as the geometric method "contains".
"""
if isinstance(other, BaseGeometry):
return np.any(self.geom_equals(other))
else:
return False
def plot(self, *args, **kwargs):
return plot_series(self, *args, **kwargs)
plot.__doc__ = plot_series.__doc__
#
# Additional methods
#
def to_crs(self, crs=None, epsg=None):
"""Transform geometries to a new coordinate reference system
This method will transform all points in all objects. It has
no notion or projecting entire geometries. All segments
joining points are assumed to be lines in the current
projection, not geodesics. Objects crossing the dateline (or
other projection boundary) will have undesirable behavior.
`to_crs` passes the `crs` argument to the `Proj` function from the
`pyproj` library (with the option `preserve_units=True`). It can
therefore accept proj4 projections in any format
supported by `Proj`, including dictionaries, or proj4 strings.
"""
from fiona.crs import from_epsg
if self.crs is None:
raise ValueError('Cannot transform naive geometries. '
'Please set a crs on the object first.')
if crs is None:
try:
crs = from_epsg(epsg)
except TypeError:
raise TypeError('Must set either crs or epsg for output.')
proj_in = pyproj.Proj(self.crs, preserve_units=True)
proj_out = pyproj.Proj(crs, preserve_units=True)
project = partial(pyproj.transform, proj_in, proj_out)
result = self.apply(lambda geom: transform(project, geom))
result.__class__ = GeoSeries
result.crs = crs
result._invalidate_sindex()
return result
def to_json(self, **kwargs):
"""
Returns a GeoJSON string representation of the GeoSeries.
Parameters
----------
*kwargs* that will be passed to json.dumps().
"""
return json.dumps(self.__geo_interface__, **kwargs)
#
# Implement standard operators for GeoSeries
#
def __xor__(self, other):
"""Implement ^ operator as for builtin set type"""
return self.symmetric_difference(other)
def __or__(self, other):
"""Implement | operator as for builtin set type"""
return self.union(other)
def __and__(self, other):
"""Implement & operator as for builtin set type"""
return self.intersection(other)
def __sub__(self, other):
"""Implement - operator as for builtin set type"""
return self.difference(other)
GeoSeries._create_indexer('cx', _CoordinateIndexer)
| {
"repo_name": "IamJeffG/geopandas",
"path": "geopandas/geoseries.py",
"copies": "2",
"size": "10658",
"license": "bsd-3-clause",
"hash": -186227760433238620,
"line_mean": 32.9426751592,
"line_max": 99,
"alpha_frac": 0.5765622068,
"autogenerated": false,
"ratio": 4.318476499189627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5895038705989627,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import json
import feedparser
from feeds_repository import RssFeedsRepository
import os
from tornado.httpclient import AsyncHTTPClient
import tornado.web
from tornado.ioloop import IOLoop
from tornado.web import asynchronous
from tornado.gen import coroutine
from utils import json_encode, get_unread_entries
from lxml import etree
class RssFeedsHandler(tornado.web.RequestHandler):
def initialize(self, rss_feeds):
self.rss_feeds = rss_feeds
def get(self):
rssfeeds = self.rss_feeds.get_feeds('bruno')
self.set_header('Content-Type', 'application/json; charset=utf-8')
self.write(json.dumps(rssfeeds))
@asynchronous
def post(self, *args, **kwargs):
rss_feed_url = json.loads(self.request.body)['url']
AsyncHTTPClient().fetch(rss_feed_url, callback=partial(self.handle_feed_insert, rss_feed_url))
def handle_feed_insert(self, rss_feed_url, response):
rss = feedparser.parse(response.body)
self.rss_feeds.insert_feed('bruno', {'url': rss_feed_url, 'main_url': rss.feed.link, 'title': rss.feed.title})
class OpmlHandler(tornado.web.RequestHandler):
def initialize(self, rss_feeds):
self.rss_feeds = rss_feeds
def post(self, *args, **kwargs):
opml_file = self.request.files['opmlFile'][0]
opml_content = etree.fromstring(opml_file['body'].strip())
for feed in opml_content.iterfind(".//outline"):
self.rss_feeds.insert_feed('bruno', {'url': feed.get('xmlUrl'), 'main_url': feed.get('htmlUrl'), 'title': feed.get('title')})
class ArticleHandler(tornado.web.RequestHandler):
def initialize(self, rss_feeds):
self.rss_feeds = rss_feeds
def put(self, *args, **kwargs):
article = json.loads(self.request.body)
self.rss_feeds.insert_fetched_article('bruno', article)
class CheckRssFeedsHandlder(tornado.web.RequestHandler):
def initialize(self, rss_feeds):
self.rss_feeds = rss_feeds
self.set_header('Content-Type', 'text/event-stream')
self.set_header('Cache-Control', 'no-cache')
@coroutine
def get(self):
rssfeeds = self.rss_feeds.get_feeds('bruno')
yield [AsyncHTTPClient().fetch(feed['url'], callback=partial(self.handle_feed_check, feed)) for feed in rssfeeds]
self.write('event: close\ndata:\n\n')
def handle_feed_check(self, rss_feed, response):
rss = feedparser.parse(response.body)
read_articles = self.rss_feeds.get_feed_read_articles('bruno', rss_feed['id'])
entries = get_unread_entries(rss.entries, read_articles)
self.write('data: %s' % json.dumps({'id': rss_feed['id'], 'url': rss_feed['main_url'], 'entries': json_encode(entries)}))
self.write('\n\n')
self.flush()
if __name__ == "__main__":
rss_feeds_repository = RssFeedsRepository()
application = tornado.web.Application([
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': os.path.join(os.getcwd() + '/static')}),
(r'/rssfeeds', RssFeedsHandler, {'rss_feeds': rss_feeds_repository}),
(r'/updatefeeds', CheckRssFeedsHandlder, {'rss_feeds': rss_feeds_repository}),
(r'/opml/import', OpmlHandler, {'rss_feeds': rss_feeds_repository}),
(r'/article', ArticleHandler, {'rss_feeds': rss_feeds_repository}),
])
application.listen(8888)
IOLoop.instance().start()
| {
"repo_name": "bamthomas/myrefs",
"path": "site/app_tornado.py",
"copies": "1",
"size": "3409",
"license": "mit",
"hash": -4828005539949403000,
"line_mean": 39.5833333333,
"line_max": 137,
"alpha_frac": 0.6661777647,
"autogenerated": false,
"ratio": 3.4574036511156185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9616996960922721,
"avg_score": 0.0013168909785795195,
"num_lines": 84
} |
from functools import partial
import json
import logging
import sys
import click
import fiona
from fiona.transform import transform_geom
from fiona.fio.cli import cli, obj_gen
def make_ld_context(context_items):
"""Returns a JSON-LD Context object.
See http://json-ld.org/spec/latest/json-ld."""
ctx = {
"@context": {
"geojson": "http://ld.geojson.org/vocab#",
"Feature": "geojson:Feature",
"FeatureCollection": "geojson:FeatureCollection",
"GeometryCollection": "geojson:GeometryCollection",
"LineString": "geojson:LineString",
"MultiLineString": "geojson:MultiLineString",
"MultiPoint": "geojson:MultiPoint",
"MultiPolygon": "geojson:MultiPolygon",
"Point": "geojson:Point",
"Polygon": "geojson:Polygon",
"bbox": {
"@container": "@list",
"@id": "geojson:bbox"
},
"coordinates": "geojson:coordinates",
"datetime": "http://www.w3.org/2006/time#inXSDDateTime",
"description": "http://purl.org/dc/terms/description",
"features": {
"@container": "@set",
"@id": "geojson:features"
},
"geometry": "geojson:geometry",
"id": "@id",
"properties": "geojson:properties",
"start": "http://www.w3.org/2006/time#hasBeginning",
"stop": "http://www.w3.org/2006/time#hasEnding",
"title": "http://purl.org/dc/terms/title",
"type": "@type",
"when": "geojson:when"
}
}
for item in context_items or []:
t, uri = item.split("=")
ctx[t.strip()] = uri.strip()
return ctx
def id_record(rec):
"""Converts a record's id to a blank node id and returns the record."""
rec['id'] = '_:f%s' % rec['id']
return rec
# Cat command
@cli.command(short_help="Concatenate and print the features of datasets")
# One or more files.
@click.argument('input', nargs=-1, type=click.Path(exists=True))
# Coordinate precision option.
@click.option('--precision', type=int, default=-1, metavar="N",
help="Decimal precision of coordinates.")
@click.option('--indent', default=None, type=int, metavar="N",
help="Indentation level for pretty printed output.")
@click.option('--compact/--no-compact', default=False,
help="Use compact separators (',', ':').")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@click.option('--dst_crs', default=None, metavar="EPSG:NNNN",
help="Destination CRS.")
# Use ASCII RS control code to signal a sequence item (False is default).
# See http://tools.ietf.org/html/draft-ietf-json-text-sequence-05.
# Experimental.
@click.option('--x-json-seq-rs/--x-json-seq-no-rs', default=True,
help="Use RS as text separator instead of LF. Experimental.")
@click.option('--bbox', default=None, metavar="w,s,e,n",
help="filter for features intersecting a bounding box")
@click.pass_context
def cat(ctx, input, precision, indent, compact, ignore_errors, dst_crs,
x_json_seq_rs, bbox):
"""Concatenate and print the features of input datasets as a
sequence of GeoJSON features."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
try:
with fiona.drivers(CPL_DEBUG=verbosity>2):
for path in input:
with fiona.open(path) as src:
if bbox:
bbox = tuple(map(float, bbox.split(',')))
for i, feat in src.items(bbox=bbox):
if dst_crs or precision > 0:
g = transform_geom(
src.crs, dst_crs, feat['geometry'],
antimeridian_cutting=True,
precision=precision)
feat['geometry'] = g
feat['bbox'] = fiona.bounds(g)
if x_json_seq_rs:
sink.write(u'\u001e')
json.dump(feat, sink, **dump_kwds)
sink.write("\n")
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1)
# Collect command
@cli.command(short_help="Collect a sequence of features.")
# Coordinate precision option.
@click.option('--precision', type=int, default=-1, metavar="N",
help="Decimal precision of coordinates.")
@click.option('--indent', default=None, type=int, metavar="N",
help="Indentation level for pretty printed output.")
@click.option('--compact/--no-compact', default=False,
help="Use compact separators (',', ':').")
@click.option('--record-buffered/--no-record-buffered', default=False,
help="Economical buffering of writes at record, not collection "
"(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@click.option('--src_crs', default=None, metavar="EPSG:NNNN",
help="Source CRS.")
@click.option('--with-ld-context/--without-ld-context', default=False,
help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
help="map a term to a URI and add it to the output's JSON LD context.")
@click.pass_context
def collect(ctx, precision, indent, compact, record_buffered, ignore_errors,
src_crs, with_ld_context, add_ld_context_item):
"""Make a GeoJSON feature collection from a sequence of GeoJSON
features and print it."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
if src_crs:
transformer = partial(transform_geom, src_crs, 'EPSG:4326',
antimeridian_cutting=True, precision=precision)
else:
transformer = lambda x: x
first_line = next(stdin)
# If input is RS-delimited JSON sequence.
if first_line.startswith(u'\x1e'):
def feature_gen():
buffer = first_line.strip(u'\x1e')
for line in stdin:
if line.startswith(u'\x1e'):
if buffer:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
buffer = line.strip(u'\x1e')
else:
buffer += line
else:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
else:
def feature_gen():
feat = json.loads(first_line)
feat['geometry'] = transformer(feat['geometry'])
yield feat
for line in stdin:
feat = json.loads(line)
feat['geometry'] = transformer(feat['geometry'])
yield feat
try:
source = feature_gen()
if record_buffered:
# Buffer GeoJSON data at the feature level for smaller
# memory footprint.
indented = bool(indent)
rec_indent = "\n" + " " * (2 * (indent or 0))
collection = {
'type': 'FeatureCollection',
'features': [] }
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
head, tail = json.dumps(collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
# Try the first record.
try:
i, first = 0, next(source)
if with_ld_context:
first = id_record(first)
if indented:
sink.write(rec_indent)
sink.write(
json.dumps(first, **dump_kwds
).replace("\n", rec_indent))
except StopIteration:
pass
except Exception as exc:
# Ignoring errors is *not* the default.
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
# Log error and close up the GeoJSON, leaving it
# more or less valid no matter what happens above.
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Because trailing commas aren't valid in JSON arrays
# we'll write the item separator before each of the
# remaining features.
for i, rec in enumerate(source, 1):
try:
if with_ld_context:
rec = id_record(rec)
if indented:
sink.write(rec_indent)
sink.write(item_sep)
sink.write(
json.dumps(rec, **dump_kwds
).replace("\n", rec_indent))
except Exception as exc:
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Close up the GeoJSON after writing all features.
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
else:
# Buffer GeoJSON data at the collection level. The default.
collection = {'type': 'FeatureCollection'}
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
collection['features'] = [
id_record(rec) for rec in source]
else:
collection['features'] = list(source)
json.dump(collection, sink, **dump_kwds)
sink.write("\n")
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1)
# Distribute command
@cli.command(short_help="Distribute features from a collection")
@click.option('--x-json-seq-rs/--x-json-seq-no-rs', default=False,
help="Use RS as text separator instead of LF. "
"Experimental (default: no).")
@click.pass_context
def distrib(ctx, x_json_seq_rs):
"""Print the features of GeoJSON objects read from stdin.
"""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
try:
source = obj_gen(stdin)
for i, obj in enumerate(source):
obj_id = obj.get('id', 'collection:' + str(i))
features = obj.get('features') or [obj]
for j, feat in enumerate(features):
if obj.get('type') == 'FeatureCollection':
feat['parent'] = obj_id
feat_id = feat.get('id', 'feature:' + str(i))
feat['id'] = feat_id
stdout.write(json.dumps(feat))
stdout.write('\n')
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1)
# Dump command
@cli.command(short_help="Dump a dataset to GeoJSON.")
@click.argument('input', type=click.Path(), required=True)
@click.option('--encoding', help="Specify encoding of the input file.")
# Coordinate precision option.
@click.option('--precision', type=int, default=-1,
help="Decimal precision of coordinates.")
@click.option('--indent', default=None, type=int,
help="Indentation level for pretty printed output.")
@click.option('--compact/--no-compact', default=False,
help="Use compact separators (',', ':').")
@click.option('--record-buffered/--no-record-buffered', default=False,
help="Economical buffering of writes at record, not collection "
"(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@click.option('--with-ld-context/--without-ld-context', default=False,
help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
help="map a term to a URI and add it to the output's JSON LD context.")
@click.option('--x-json-seq/--x-json-obj', default=False,
help="Write a LF-delimited JSON sequence (default is object). "
"Experimental.")
# Use ASCII RS control code to signal a sequence item (False is default).
# See http://tools.ietf.org/html/draft-ietf-json-text-sequence-05.
# Experimental.
@click.option('--x-json-seq-rs/--x-json-seq-no-rs', default=True,
help="Use RS as text separator. Experimental.")
@click.pass_context
def dump(ctx, input, encoding, precision, indent, compact, record_buffered,
ignore_errors, with_ld_context, add_ld_context_item,
x_json_seq, x_json_seq_rs):
"""Dump a dataset either as a GeoJSON feature collection (the default)
or a sequence of GeoJSON features."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
open_kwds = {}
if encoding:
open_kwds['encoding'] = encoding
def transformer(crs, feat):
tg = partial(transform_geom, crs, 'EPSG:4326',
antimeridian_cutting=True, precision=precision)
feat['geometry'] = tg(feat['geometry'])
return feat
try:
with fiona.drivers(CPL_DEBUG=verbosity>2):
with fiona.open(input, **open_kwds) as source:
meta = source.meta
meta['fields'] = dict(source.schema['properties'].items())
if x_json_seq:
for feat in source:
feat = transformer(source.crs, feat)
if x_json_seq_rs:
sink.write(u'\u001e')
json.dump(feat, sink, **dump_kwds)
sink.write("\n")
elif record_buffered:
# Buffer GeoJSON data at the feature level for smaller
# memory footprint.
indented = bool(indent)
rec_indent = "\n" + " " * (2 * (indent or 0))
collection = {
'type': 'FeatureCollection',
'fiona:schema': meta['schema'],
'fiona:crs': meta['crs'],
'features': [] }
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
head, tail = json.dumps(collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
itr = iter(source)
# Try the first record.
try:
i, first = 0, next(itr)
first = transformer(first)
if with_ld_context:
first = id_record(first)
if indented:
sink.write(rec_indent)
sink.write(
json.dumps(first, **dump_kwds
).replace("\n", rec_indent))
except StopIteration:
pass
except Exception as exc:
# Ignoring errors is *not* the default.
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
# Log error and close up the GeoJSON, leaving it
# more or less valid no matter what happens above.
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Because trailing commas aren't valid in JSON arrays
# we'll write the item separator before each of the
# remaining features.
for i, rec in enumerate(itr, 1):
rec = transformer(rec)
try:
if with_ld_context:
rec = id_record(rec)
if indented:
sink.write(rec_indent)
sink.write(item_sep)
sink.write(
json.dumps(rec, **dump_kwds
).replace("\n", rec_indent))
except Exception as exc:
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Close up the GeoJSON after writing all features.
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
else:
# Buffer GeoJSON data at the collection level. The default.
collection = {
'type': 'FeatureCollection',
'fiona:schema': meta['schema'],
'fiona:crs': meta['crs']}
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
collection['features'] = [
id_record(transformer(rec)) for rec in source]
else:
collection['features'] = [transformer(source.crs, rec) for rec in source]
json.dump(collection, sink, **dump_kwds)
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1)
| {
"repo_name": "johanvdw/Fiona",
"path": "fiona/fio/cat.py",
"copies": "1",
"size": "20823",
"license": "bsd-3-clause",
"hash": 7366666735776433000,
"line_mean": 39.4330097087,
"line_max": 97,
"alpha_frac": 0.4902271527,
"autogenerated": false,
"ratio": 4.555458324217896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5545685476917896,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import json
import mock
from flask import url_for
from app import db
from app.models.users import User
from common import BaseTest
class TestUsersApi(BaseTest):
"""
Users API Functional Tests.
"""
username = 'testuser'
password = 'password'
email = 'testuser@test.com'
first_name = 'test'
last_name = 'user'
def setUp(self):
self.register = partial(self.client.post,
url_for('users.register_user', _external=True),
headers=self.headers)
def tearDown(self):
User.query.delete()
db.session.commit()
def _create_user(self):
user = User(self.username, self.password, self.email)
db.session.add(user)
db.session.commit()
return user
def test_registration_missing_username(self):
"""
Asserts that missing username responds with 400.
"""
data = json.dumps({
'password': self.password,
})
resp = self.register(data=data)
self.assertEquals(resp.status_code, 400)
def test_registration_missing_password(self):
"""
Assert that missing password responds with 400.
"""
data = json.dumps({
'username': self.username,
})
resp = self.register(data=data)
self.assertEquals(resp.status_code, 400)
def test_user_already_exists(self):
"""
Assert that trying to register with a username that already exists
responds with 409.
"""
user = self._create_user()
db.session.add(user)
db.session.commit()
data = json.dumps({
'username': self.username,
'password': self.password,
'email': self.email,
})
resp = self.register(data=data)
self.assertEquals(resp.status_code, 409)
@mock.patch('app.api.users.users.send_email')
def test_registration_success(self, send_email):
"""
Happy path user registration.
"""
send_email.return_value = mock.MagicMock()
user = User(self.username, self.password, self.email)
data = json.dumps({
'username': self.username,
'password': self.password,
'email': self.email,
'firstName': self.first_name,
'lastName': self.last_name
})
resp = self.register(data=data)
self.assertEquals(resp.status_code, 200)
expected_return_obj = user.serialize
expected_return_obj['uri'] = url_for('users.get_user',
username=self.username,
_external=True)
self.assertEquals(resp.json['username'],
expected_return_obj['username'])
self.assertEquals(resp.json['firstName'],
expected_return_obj['firstName'])
self.assertEquals(resp.json['lastName'],
expected_return_obj['lastName'])
self.assertEquals(resp.json['uri'], expected_return_obj['uri'])
def test_confirm_registration_success(self):
"""Happy Path user registration confirmation."""
user = self._create_user()
registration_code = user.registration_code
data = json.dumps({
'registrationCode': registration_code
})
resp = self.client.post(
url_for('users.confirm_registration', username=self.username,
_external=True),
headers=self._headers_with_username_password(
username=self.username,
password=self.password), data=data
)
self.assertEquals(resp.status_code, 200)
def test_confirm_registration_wrong_auth(self):
"""Test that incorrect authentication for confirm registration
responds with 401."""
user = self._create_user()
registration_code = user.registration_code
data = json.dumps({
'registrationCode': registration_code
})
resp = self.client.post(
url_for('users.confirm_registration',
username=self.username,
_external=True),
headers=self._headers_with_username_password(
username=self.username,
password='incorrect'), data=data
)
self.assertEquals(resp.status_code, 401)
def test_confirm_registration_wrong_registration_code(self):
"""Confirm that providing the wrong registration code
responds with 400."""
self._create_user()
data = json.dumps({
'registrationCode': 1
})
resp = self.client.post(
url_for('users.confirm_registration',
username=self.username,
_external=True),
headers=self._headers_with_username_password(
username=self.username,
password=self.password), data=data
)
self.assertEquals(resp.status_code, 400)
def test_get_auth_token_and_get_user(self):
self._create_user()
resp = self.client.get(
url_for('users.get_auth_token', _external=True),
headers=self._headers_with_username_password(
self.username, self.password)
)
self.assertEquals(resp.status_code, 200)
token = resp.json['token']
resp = self.client.get(
url_for('users.get_user',
username=self.username,
_external=True),
headers=self._headers_with_auth_token(token=token))
self.assertEquals(resp.status_code, 200)
def test_get_user_bad_auth(self):
resp = self.client.get(
url_for('users.get_user',
username=self.username,
_external=True),
headers=self._headers_with_auth_token(token='bad_token'))
self.assertEquals(resp.status_code, 401)
def test_get_token_bad_password(self):
resp = self.client.get(
url_for('users.get_auth_token', _external=True),
headers=self._headers_with_username_password(
self.username, 'bad_password')
)
self.assertEquals(resp.status_code, 401)
| {
"repo_name": "chamilto/flask_starter_pack",
"path": "tests/test_users.py",
"copies": "1",
"size": "6380",
"license": "mit",
"hash": 1967650177789387800,
"line_mean": 32.5789473684,
"line_max": 79,
"alpha_frac": 0.5631661442,
"autogenerated": false,
"ratio": 4.3758573388203015,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 190
} |
from functools import partial
import json
import tornado.web
from tornado.autoreload import _reload
import os
IMAGE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'images')
class JSONHandler(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
super(JSONHandler, self).__init__(application, request, **kwargs)
self.set_header('Content-Type', 'application/json')
def _get_remote_address(self):
origin = self.request.remote_ip
return origin
def _get_request_args(self):
args = self.request.arguments
for k in args:
args[k] = args[k][0].decode("utf-8")
return args
class BasicImageHandler(tornado.web.RequestHandler):
def _set_header(self, content_type=None):
headers = self.request.headers
accept = headers['accept'].lower()
self.content_type2img = {
'image/webp': "wolf_1.webp",
"image/jpeg": "jackal.jpg",
"image/png": "pig_icon.png",
"image/*": "pig_icon.png"
}
if content_type is None:
if 'image/webp' in accept:
self.set_header('Content-Type', 'image/webp')
self.path = os.path.join(IMAGE_DIR, "wolf_1.webp")
elif 'image/jpeg' in accept:
self.set_header('Content-Type', 'image/jpeg')
self.path = os.path.join(IMAGE_DIR, "jackal.jpg")
elif 'image/png' in accept or 'image/*' in accept:
self.set_header('Content-Type', 'pig_icon.png')
else:
self.set_status(404)
else:
assert content_type in self.content_type2img
self.set_header('Content-Type', content_type)
self.path = os.path.join(IMAGE_DIR, self.content_type2img[content_type])
def _write(self):
if self.get_status() != 404:
self.write(open(self.path, 'rb').read())
else:
self.write_error(404)
pretty_json = partial(json.dumps, indent=4, separators=(',', ': '))
def reload(signo, frame):
_reload() | {
"repo_name": "josephok/httpbin2",
"path": "utils.py",
"copies": "1",
"size": "2118",
"license": "bsd-2-clause",
"hash": 4601753203503716000,
"line_mean": 31.6,
"line_max": 84,
"alpha_frac": 0.581680831,
"autogenerated": false,
"ratio": 3.632933104631218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4714613935631218,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import json
from django.http import HttpResponseNotAllowed, HttpResponseNotFound, HttpResponseBadRequest
from rip.django_adapter import django_response_builder, \
action_resolver
from rip.django_adapter import api_request_builder
def handle_api_call(http_request, url, api):
if not action_resolver.is_valid_resource(url, api):
return HttpResponseNotFound()
action = action_resolver.resolve_action(http_request, url, api)
if action is None:
# we could not resolve what action to call for this http request.
# return method not allowed response
return HttpResponseNotAllowed("%s:%s" % (url, http_request.method))
request_body = http_request.read()
request_data = api_request_builder.build_request_data(request_body, http_request.META)
if request_data.get('error_message'):
return HttpResponseBadRequest(
json.dumps(request_data), content_type='application/json')
request = api_request_builder.build_request(http_request=http_request,
url=url, api=api,
request_data=request_data,
request_body=request_body)
response = action(request)
http_response = django_response_builder.build_http_response(
http_request, response)
return http_response
def create_http_handler(api):
return partial(handle_api_call, api=api)
| {
"repo_name": "Aplopio/rip",
"path": "rip/django_adapter/django_http_handler.py",
"copies": "2",
"size": "1501",
"license": "mit",
"hash": -956374395139568800,
"line_mean": 36.525,
"line_max": 92,
"alpha_frac": 0.6615589607,
"autogenerated": false,
"ratio": 4.363372093023256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00054354247902635,
"num_lines": 40
} |
from functools import partial
import json
import numpy as np
from pandas import Series
import pyproj
from shapely.geometry import shape, Point
from shapely.geometry.base import BaseGeometry
from shapely.ops import transform
from geopandas.plotting import plot_series
from geopandas.base import GeoPandasBase, _series_unary_op, _CoordinateIndexer
def _is_empty(x):
try:
return x.is_empty
except:
return False
class GeoSeries(GeoPandasBase, Series):
"""A Series object designed to store shapely geometry objects."""
_metadata = ['name', 'crs']
def __new__(cls, *args, **kwargs):
kwargs.pop('crs', None)
arr = Series.__new__(cls)
if type(arr) is GeoSeries:
return arr
else:
return arr.view(GeoSeries)
def __init__(self, *args, **kwargs):
# fix problem for scalar geometries passed, ensure the list of
# scalars is of correct length if index is specified
if len(args) == 1 and isinstance(args[0], BaseGeometry):
n = len(kwargs.get('index', [1]))
args = ([args[0]] * n,)
crs = kwargs.pop('crs', None)
super(GeoSeries, self).__init__(*args, **kwargs)
self.crs = crs
self._invalidate_sindex()
def append(self, *args, **kwargs):
return self._wrapped_pandas_method('append', *args, **kwargs)
@property
def geometry(self):
return self
@property
def x(self):
"""Return the x location of point geometries in a GeoSeries"""
if (self.geom_type == "Point").all():
return _series_unary_op(self, 'x', null_value=np.nan)
else:
message = "x attribute access only provided for Point geometries"
raise ValueError(message)
@property
def y(self):
"""Return the y location of point geometries in a GeoSeries"""
if (self.geom_type == "Point").all():
return _series_unary_op(self, 'y', null_value=np.nan)
else:
message = "y attribute access only provided for Point geometries"
raise ValueError(message)
@classmethod
def from_file(cls, filename, **kwargs):
"""Alternate constructor to create a ``GeoSeries`` from a file.
Can load a ``GeoSeries`` from a file from any format recognized by
`fiona`. See http://toblerity.org/fiona/manual.html for details.
Parameters
----------
filename : str
File path or file handle to read from. Depending on which kwargs
are included, the content of filename may vary. See
http://toblerity.org/fiona/README.html#usage for usage details.
kwargs : key-word arguments
These arguments are passed to fiona.open, and can be used to
access multi-layer data, data stored within archives (zip files),
etc.
"""
import fiona
geoms = []
with fiona.open(filename, **kwargs) as f:
crs = f.crs
for rec in f:
geoms.append(shape(rec['geometry']))
g = GeoSeries(geoms)
g.crs = crs
return g
@property
def __geo_interface__(self):
"""Returns a ``GeoSeries`` as a python feature collection.
Implements the `geo_interface`. The returned python data structure
represents the ``GeoSeries`` as a GeoJSON-like ``FeatureCollection``.
Note that the features will have an empty ``properties`` dict as they
don't have associated attributes (geometry only).
"""
from geopandas import GeoDataFrame
return GeoDataFrame({'geometry': self}).__geo_interface__
def to_file(self, filename, driver="ESRI Shapefile", **kwargs):
from geopandas import GeoDataFrame
data = GeoDataFrame({"geometry": self,
"id":self.index.values},
index=self.index)
data.crs = self.crs
data.to_file(filename, driver, **kwargs)
#
# Implement pandas methods
#
@property
def _constructor(self):
return GeoSeries
def _wrapped_pandas_method(self, mtd, *args, **kwargs):
"""Wrap a generic pandas method to ensure it returns a GeoSeries"""
val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)
if type(val) == Series:
val.__class__ = GeoSeries
val.crs = self.crs
val._invalidate_sindex()
return val
def __getitem__(self, key):
return self._wrapped_pandas_method('__getitem__', key)
def sort_index(self, *args, **kwargs):
return self._wrapped_pandas_method('sort_index', *args, **kwargs)
def take(self, *args, **kwargs):
return self._wrapped_pandas_method('take', *args, **kwargs)
def select(self, *args, **kwargs):
return self._wrapped_pandas_method('select', *args, **kwargs)
@property
def _can_hold_na(self):
return False
def __finalize__(self, other, method=None, **kwargs):
""" propagate metadata from other to self """
# NOTE: backported from pandas master (upcoming v0.13)
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def copy(self, order='C'):
"""
Make a copy of this GeoSeries object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : GeoSeries
"""
# FIXME: this will likely be unnecessary in pandas >= 0.13
return GeoSeries(self.values.copy(order), index=self.index,
name=self.name).__finalize__(self)
def isna(self):
"""
N/A values in a GeoSeries can be represented by empty geometric
objects, in addition to standard representations such as None and
np.nan.
Returns
-------
A boolean pandas Series of the same size as the GeoSeries,
True where a value is N/A.
See Also
--------
GeoSereies.notna : inverse of isna
"""
non_geo_null = super(GeoSeries, self).isnull()
val = self.apply(_is_empty)
return np.logical_or(non_geo_null, val)
def isnull(self):
"""Alias for `isna` method. See `isna` for more detail."""
return self.isna()
def notna(self):
"""
N/A values in a GeoSeries can be represented by empty geometric
objects, in addition to standard representations such as None and
np.nan.
Returns
-------
A boolean pandas Series of the same size as the GeoSeries,
False where a value is N/A.
See Also
--------
GeoSeries.isna : inverse of notna
"""
return ~self.isna()
def notnull(self):
"""Alias for `notna` method. See `notna` for more detail."""
return self.notna()
def fillna(self, value=None, method=None, inplace=False,
**kwargs):
"""Fill NA/NaN values with a geometry (empty polygon by default).
"method" is currently not implemented for pandas <= 0.12.
"""
if value is None:
value = BaseGeometry()
return super(GeoSeries, self).fillna(value=value, method=method,
inplace=inplace, **kwargs)
def align(self, other, join='outer', level=None, copy=True,
fill_value=None, **kwargs):
if fill_value is None:
fill_value = BaseGeometry()
left, right = super(GeoSeries, self).align(other, join=join,
level=level, copy=copy,
fill_value=fill_value,
**kwargs)
if isinstance(other, GeoSeries):
return GeoSeries(left), GeoSeries(right)
else: # It is probably a Series, let's keep it that way
return GeoSeries(left), right
def __contains__(self, other):
"""Allow tests of the form "geom in s"
Tests whether a GeoSeries contains a geometry.
Note: This is not the same as the geometric method "contains".
"""
if isinstance(other, BaseGeometry):
return np.any(self.geom_equals(other))
else:
return False
def plot(self, *args, **kwargs):
"""Generate a plot of the geometries in the ``GeoSeries``.
Wraps the ``plot_series()`` function, and documentation is copied from
there.
"""
return plot_series(self, *args, **kwargs)
plot.__doc__ = plot_series.__doc__
#
# Additional methods
#
def to_crs(self, crs=None, epsg=None):
"""Returns a ``GeoSeries`` with all geometries transformed to a new
coordinate reference system.
Transform all geometries in a GeoSeries to a different coordinate
reference system. The ``crs`` attribute on the current GeoSeries must
be set. Either ``crs`` in string or dictionary form or an EPSG code
may be specified for output.
This method will transform all points in all objects. It has no notion
or projecting entire geometries. All segments joining points are
assumed to be lines in the current projection, not geodesics. Objects
crossing the dateline (or other projection boundary) will have
undesirable behavior.
Parameters
----------
crs : dict or str
Output projection parameters as string or in dictionary form.
epsg : int
EPSG code specifying output projection.
"""
from fiona.crs import from_epsg
if self.crs is None:
raise ValueError('Cannot transform naive geometries. '
'Please set a crs on the object first.')
if crs is None:
try:
crs = from_epsg(epsg)
except TypeError:
raise TypeError('Must set either crs or epsg for output.')
proj_in = pyproj.Proj(self.crs, preserve_units=True)
proj_out = pyproj.Proj(crs, preserve_units=True)
project = partial(pyproj.transform, proj_in, proj_out)
result = self.apply(lambda geom: transform(project, geom))
result.__class__ = GeoSeries
result.crs = crs
result._invalidate_sindex()
return result
def to_json(self, **kwargs):
"""
Returns a GeoJSON string representation of the GeoSeries.
Parameters
----------
*kwargs* that will be passed to json.dumps().
"""
return json.dumps(self.__geo_interface__, **kwargs)
#
# Implement standard operators for GeoSeries
#
def __xor__(self, other):
"""Implement ^ operator as for builtin set type"""
return self.symmetric_difference(other)
def __or__(self, other):
"""Implement | operator as for builtin set type"""
return self.union(other)
def __and__(self, other):
"""Implement & operator as for builtin set type"""
return self.intersection(other)
def __sub__(self, other):
"""Implement - operator as for builtin set type"""
return self.difference(other)
GeoSeries._create_indexer('cx', _CoordinateIndexer)
| {
"repo_name": "ozak/geopandas",
"path": "geopandas/geoseries.py",
"copies": "1",
"size": "11473",
"license": "bsd-3-clause",
"hash": -8676075371085869000,
"line_mean": 32.4489795918,
"line_max": 79,
"alpha_frac": 0.5814521049,
"autogenerated": false,
"ratio": 4.360699353857849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006596132646834882,
"num_lines": 343
} |
from functools import partial
import json
try:
from urllib.request import urlopen
from urllib.parse import urlencode
except ImportError:
from urllib2 import urlopen
from urllib import urlencode
def utf8_encode(s):
return s if isinstance(s, bytes) else s.encode('utf8')
def utf8_encode_dict_values(d):
return {k: utf8_encode(v) for k, v in d.items()}
class EtherpadException(Exception): pass
class EtherpadLiteClient(object):
def __init__(self, base_params={}, base_url='http://140.114.79.181:9001/api', # https://localhost:9001/api
api_version='1.2.13', timeout=20):
self.api_version = api_version
self.base_params = utf8_encode_dict_values(base_params)
self.base_url = base_url
self.timeout = timeout
def __call__(self, path, **params):
params = utf8_encode_dict_values(params)
data = urlencode(dict(self.base_params, **params)).encode('ascii')
url = '%s/%s/%s' % (self.base_url, self.api_version, path)
r = json.loads(urlopen(url, data, self.timeout).read().decode('utf-8'))
if not r or not isinstance(r, dict):
raise EtherpadException('API returned: %s' % r)
if r.get('code') != 0:
raise EtherpadException(r.get('message', r))
return r.get('data')
def __getattr__(self, name):
return partial(self, name)
| {
"repo_name": "CHI2017LS/CLC_prototyping",
"path": "etherpad_lite/__init__.py",
"copies": "1",
"size": "1397",
"license": "mit",
"hash": 49184047609643630,
"line_mean": 31.488372093,
"line_max": 110,
"alpha_frac": 0.6299212598,
"autogenerated": false,
"ratio": 3.483790523690773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46137117834907726,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
from enum import Enum
from types import SimpleNamespace
from biothings.hub.databuild.backend import create_backend
from biothings.utils.es import ESIndexer
try:
from biothings.utils.mongo import doc_feeder
except ImportError:
import biothings
biothings.config = SimpleNamespace()
biothings.config.DATA_SRC_DATABASE = 'biothings_src'
biothings.config.DATA_TARGET_DATABASE = 'biothings_build'
from biothings.utils.mongo import doc_feeder
def _get_es_client(*args, **kwargs):
return ESIndexer(*args, **kwargs)
def _get_mongo_client(backend_url):
return create_backend(backend_url).target_collection
def dispatch_task(backend_url, ids, mode, name, *esargs, **eskwargs):
task = IndexingTask(
partial(_get_es_client, *esargs, **eskwargs),
partial(_get_mongo_client, backend_url),
ids, mode
)
task.name = str(name)
return task.dispatch()
class Mode(Enum):
INDEX = 'index'
PURGE = 'purge' # same as 'index' in this module
MERGE = 'merge'
RESUME = 'resume'
class IndexingTask():
def __init__(self, es, mongo, ids, mode='index'):
assert callable(es)
assert callable(mongo)
self.ids = ids
self.mode = Mode(mode)
# these are functions to create clients,
# each also associated with an organizational
# structure in the corresponding database,
# functioning as the source or destination
# of batch document manipulation.
self.backend = SimpleNamespace()
self.backend.es = es # wrt an index
self.backend.mongo = mongo # wrt a collection
self.logger = logging.getLogger(__name__)
self.name = "" # for logging only
def _get_clients(self):
clients = SimpleNamespace()
clients.es = self.backend.es()
clients.mongo = self.backend.mongo()
return clients
def dispatch(self):
try:
if self.mode in (Mode.INDEX, Mode.PURGE):
return self.index()
elif self.mode == Mode.MERGE:
return self.merge()
elif self.mode == Mode.RESUME:
return self.resume()
except Exception:
self.logger.error("Batch %s indexing failed.", self.name)
raise
def index(self):
clients = self._get_clients()
docs = doc_feeder(
clients.mongo,
step=len(self.ids),
inbatch=False,
query={'_id': {
'$in': self.ids
}})
cnt = clients.es.index_bulk(docs)
return cnt
def merge(self):
clients = self._get_clients()
upd_cnt = 0
new_cnt = 0
docs_old = {}
docs_new = {}
# populate docs_old
for doc in clients.es.get_docs(self.ids):
docs_old['_id'] = doc
# populate docs_new
for doc in doc_feeder(
clients.mongo,
step=len(self.ids),
inbatch=False,
query={'_id': {
'$in': self.ids
}}):
docs_new[doc['_id']] = doc
doc.pop("_timestamp", None)
# merge existing ids
for key in docs_new:
if key in docs_old:
docs_old.update(docs_new[key])
del docs_new[key]
# updated docs (those existing in col *and* index)
upd_cnt = clients.es.index_bulk(docs_old.values(), len(docs_old))
self.logger.debug("%s documents updated in index", str(upd_cnt))
# new docs (only in col, *not* in index)
new_cnt = clients.es.index_bulk(docs_new.values(), len(docs_new))
self.logger.debug("%s new documents in index", str(new_cnt))
# need to return one: tuple(cnt,list)
return (upd_cnt[0] + new_cnt[0], upd_cnt[1] + new_cnt[1])
def resume(self):
clients = self._get_clients()
missing_ids = [x[0] for x in clients.es.mexists(self.ids) if not x[1]]
if missing_ids:
self.ids = missing_ids
return self.index()
return (0, None)
def test_clients():
from functools import partial
from biothings.utils.es import ESIndexer
from pymongo import MongoClient
def _pymongo():
client = MongoClient()
database = client["biothings_build"]
return database["mynews_202012280220_vsdevjdk"]
return (
partial(ESIndexer, "indexer-test"),
_pymongo
)
def test0():
task = IndexingTask(*test_clients(), (
"0999b13cb8026aba",
"1111647aaf9c70b4",
"1c9828073bad510c"))
task.index()
def test1():
task = IndexingTask(*test_clients(), (
"0999b13cb8026aba",
"1111647aaf9c70b4",
"1c9828073bad510c",
"1f447d7fc6dcc2cf",
"27e81a308e4e04da"))
task.resume()
if __name__ == '__main__':
logging.basicConfig(level='DEBUG')
# test0()
# test1()
| {
"repo_name": "biothings/biothings.api",
"path": "biothings/hub/dataindex/indexer_task.py",
"copies": "1",
"size": "5013",
"license": "apache-2.0",
"hash": 39704291526830370,
"line_mean": 27.6457142857,
"line_max": 78,
"alpha_frac": 0.5765010971,
"autogenerated": false,
"ratio": 3.741044776119403,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9808448981265369,
"avg_score": 0.0018193783908069625,
"num_lines": 175
} |
from functools import partial
import logging
import numpy as np
import time
from qcodes.instrument.parameter import ManualParameter
from qcodes import Instrument
from qcodes.utils import validators as vals
log = logging.getLogger(__name__)
class VirtualSIM928(Instrument):
"""
A virtual driver, emulating the Stanford Research Systems SIM 928 DC source
modules installed in a SIM900 mainframe.
Args:
name (str): An identifier for this instrument, particularly for
attaching it to a ``Station``.
address (str): The visa resource name to use to connect.
slot_names (Dict[int]): An dictionary that optionally maps slot numbers
to user-defined module names. Default ``{}``.
timeout (number): Seconds to allow for responses. Default ``5``.
metadata (Optional[Dict]): Additional static metadata to add to this
instrument's JSON snapshot.
"""
def __init__(self, name, slot_names=None, **kw):
super().__init__(name, **kw)
if slot_names is None:
self.slot_names = {}
else:
self.slot_names = slot_names
self.module_nr = {}
for i in self.slot_names:
if self.slot_names[i] in self.module_nr:
raise ValueError('Duplicate names in slot_names')
self.module_nr[self.slot_names[i]] = i
self.modules = self.find_modules()
for i in self.modules:
module_name = self.slot_names.get(i, i)
self.add_parameter('IDN_{}'.format(module_name),
label="IDN of module {}".format(module_name),
get_cmd=partial(self.get_module_idn, i))
self.add_parameter('volt_{}'.format(module_name), unit='V',
label="Output voltage of module "
"{}".format(module_name),
vals=vals.Numbers(-20, 20),
get_cmd=partial(self.get_voltage, i),
set_cmd=partial(self.set_voltage, i))
self.add_parameter('volt_{}_step'.format(module_name), unit='V',
label="Step size when changing the voltage "
"smoothly on module "
"{}".format(module_name),
parameter_class=ManualParameter,
vals=vals.Numbers(0, 20), initial_value=0.005)
self.add_parameter('smooth_timestep', unit='s',
label="Delay between sending the write commands"
"when changing the voltage smoothly",
parameter_class=ManualParameter,
vals=vals.Numbers(0, 1), initial_value=0.05)
self._voltages = {i: 0 for i in self.modules}
def get_module_idn(self, i):
"""
Get the vendor, model, serial number and firmware version of a module.
Args:
i (int/str): Slot number or module name (as in ``slot_names``)
of the module whose id is returned.
Returns:
A dict containing vendor, model, serial, and firmware.
"""
if not isinstance(i, int):
i = self.module_nr[i]
return dict(vendor='SRS', model='virtual SIM928',
serial=i, firmware=None)
def find_modules(self):
"""
Query the SIM900 mainframe for which slots have a SIM928 module present.
In the virtual version, all modules are present.
Returns:
A list of slot numbers where a SIM928 module is present (starting
from 1)
"""
return list(range(1, 10))
def set_voltage(self, i, voltage):
"""
Set the output voltage of a module.
Args:
i (int/str): Slot number or module name (as in ``slot_names``)
of the module to set the voltage of.
voltage (float): The value to set the voltage to.
"""
if not isinstance(i, int):
name = i
i = self.module_nr[i]
else:
name = self.slot_names.get(i, i)
self._voltages[i] = voltage
self.parameters['volt_{}'.format(name)]._save_val(voltage)
def get_voltage(self, i):
"""
Get the output voltage of a module.
Args:
i (int/str): Slot number or module name (as in ``slot_names``)
of the module to get the voltage of.
Returns:
The current voltage of module ``i`` as a ``float``.
"""
if not isinstance(i, int):
i = self.module_nr[i]
return self._voltages[i]
def set_smooth(self, voltagedict, equitime=False):
"""
Set the voltages as specified in ``voltagedict` smoothly,
by changing the output on each module at a rate
``volt_#_step/smooth_timestep``.
Args:
voltagedict (Dict[float]): A dictionary where keys are module slot
numbers or names and values are the desired output voltages.
equitime (bool): If ``True``, uses smaller step sizes for some of
the modules so that all modules reach the desired value at the
same time.
"""
# convert voltagedict to contain module names only and validate inputs
vdict = {}
for i in voltagedict:
if not isinstance(i, int):
if self.module_nr[i] not in self.modules:
raise KeyError('There is no module named {}'.format(i))
name = i
else:
if i not in self.modules:
raise KeyError('There is no module in slot {}'.format(i))
name = self.slot_names.get(i, i)
vdict[name] = voltagedict[i]
self.parameters['volt_{}'.format(name)].validate(vdict[name])
intermediate = []
if equitime:
maxsteps = 0
deltav = {}
for i in vdict:
deltav[i] = vdict[i]-self.get('volt_{}'.format(i))
stepsize = self.get('volt_{}_step'.format(i))
steps = abs(int(np.ceil(deltav[i]/stepsize)))
if steps > maxsteps:
maxsteps = steps
for s in range(maxsteps):
intermediate.append({})
for i in vdict:
intermediate[-1][i] = vdict[i] - \
deltav[i]*(maxsteps-s-1)/maxsteps
else:
done = []
prevvals = {}
for i in vdict:
prevvals[i] = self.get('volt_{}'.format(i))
while len(done) != len(vdict):
intermediate.append({})
for i in vdict:
if i in done:
continue
stepsize = self.get('volt_{}_step'.format(i))
deltav = vdict[i]-prevvals[i]
if abs(deltav) <= stepsize:
intermediate[-1][i] = vdict[i]
done.append(i)
elif deltav > 0:
intermediate[-1][i] = prevvals[i] + stepsize
else:
intermediate[-1][i] = prevvals[i] - stepsize
prevvals[i] = intermediate[-1][i]
for voltages in intermediate:
for i in voltages:
self.set_voltage(i, voltages[i])
time.sleep(self.smooth_timestep())
def get_module_status(self, i):
"""
Gets and clears the status bytes corresponding to the registers ESR,
CESR and OVSR of module ``i``.
Args:
i (int/str): Slot number or module name (as in ``slot_names``)
of the module to get the status of.
Returns:
int, int, int: The bytes corresponding to standard event,
communication error and overload statuses of module ``i``
"""
stdevent = 0
commerr = 0
overload = 0
return stdevent, commerr, overload
def reset_module(self, i):
"""
Sends the SIM Reset signal to module i.
Causes a break signal (MARK level) to be asserted for 100 milliseconds
to module i. Upon receiving the break signal the modul will flush its
internal input buffer, reset its command parser, and default to 9600
baud communications.
Args:
i (int/str): Slot number or module name (as in ``slot_names``)
of the module to reset.
"""
pass
def check_module_errors(self, i, raiseexc=True):
"""
Check if any errors have occurred on module ``i`` and clear the status
registers.
Args:
i (int/str): Slot number or module name (as in ``slot_names``)
of the module to check the error of.
raiseexc (bool): If true, raises an exception if any errors have
occurred. Default ``True``.
Returns:
list[str]: A list of strings with the error messages that have
occurred.
"""
stdevent, commerr, overload = self.get_module_status(i)
OPC, INP, QYE, DDE, EXE, CME, URQ, PON \
= self.byte_to_bits(int(stdevent))
PARITY, FRAME, NOISE, HWOVRN, OVR, RTSH, CTSH, DCAS \
= self.byte_to_bits(int(commerr))
Overload, Overvoltage, BatSwitch, BatFault, _, _, _, _ \
= self.byte_to_bits(int(overload))
errors = []
warnings = []
if INP:
errors.append('Input Buffer Error.')
if QYE:
errors.append('Query Error.')
if DDE:
code = self.ask_module(i, 'LDDE?')
errors.append('Device Dependant Error: {}.'.format(code))
if EXE:
code = self.ask_module(i, 'LEXE?')
msg = {0: 'No error',
1: 'Illegal value',
2: 'Wrong token',
3: 'Invalid bit'}.get(int(code), 'Unknown')
if int(code) > 3 or int(code) == 0:
warnings.append('Execution Error: {} ({}).'.format(msg, code))
else:
errors.append('Execution Error: {} ({}).'.format(msg, code))
if CME:
code = self.ask_module(i, 'LCME?')
msg = {0: 'No error',
1: 'Illegal command',
2: 'Undefined command',
3: 'Illegal query',
4: 'Illegal set',
5: 'Missing parameter(s)',
6: 'Extra parameter(s)',
7: 'Null parameter(s)',
8: 'Parameter buffer overflow',
9: 'Bad floating-point',
10: 'Bad integer',
11: 'Bad integer token',
12: 'Bad token value',
13: 'Bad hex block',
14: 'Unknown token'}.get(int(code), 'Unknown')
if int(code) > 14 or int(code) == 0:
warnings.append('Command Error: {} ({}).'.format(msg, code))
else:
errors.append('Command Error: {} ({}).'.format(msg, code))
if PARITY:
errors.append('Parity Error.')
if FRAME:
errors.append('Framing Error.')
if NOISE:
errors.append('Noise Error.')
if HWOVRN:
errors.append('Hardware Overrun.')
if OVR:
errors.append('Input Buffer Overrun.')
if RTSH:
errors.append('Undefined Error (RTSH).')
if CTSH:
errors.append('Undefined Error (CTSH).')
if Overload:
errors.append('Current Overload.')
if Overvoltage:
errors.append('Voltage Overload.')
if BatFault:
errors.append('Battery Fault.')
if raiseexc:
if len(errors) != 0:
raise Exception(' '.join(errors + warnings))
return errors + warnings
@staticmethod
def byte_to_bits(x):
"""
Convert an integer to a list of bits
Args:
x (int): The number to convert.
Returns:
list[bool]: A list of the lowest 8 bits of ``x`` where ``True``
represents 1 and ``False`` 0.
"""
bits = []
for _ in range(8):
if x & 1 != 0:
bits.append(True)
else:
bits.append(False)
x >>= 1
return bits
| {
"repo_name": "QudevETH/PycQED_py3",
"path": "pycqed/instrument_drivers/virtual_instruments/virtual_SIM928.py",
"copies": "1",
"size": "12703",
"license": "mit",
"hash": 1046876763163879600,
"line_mean": 36.5828402367,
"line_max": 80,
"alpha_frac": 0.5061796426,
"autogenerated": false,
"ratio": 4.300270819228166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5306450461828166,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
import os
import sys
from prometheus_client import CollectorRegistry, Gauge, Histogram, Counter,\
Summary, pushadd_to_gateway, push_to_gateway
from ._timer import Timer
from ._util import maybe_labels, tags_to_labels
logger = logging.getLogger(__name__)
_version_info = sys.version_info
_PY_35 = _version_info.major >= 3 and _version_info.minor >= 5
class Stats:
"""A stats collector instance, used for collecting the metrics."""
pushgateway_uri = os.environ.get(
'PROMSTATS_PUSHGATEWAY_URI',
'http://localhost:9112',
)
def __init__(
self,
job_name,
pushgateway_handler=None,
pushgateway_uri=None):
"""
:param job_name (str): A Prometheus job name.
:param gateway_handler (function): Optional custom handler
for gateway push.
:param pushgateway_uri: An optional pushgateway URI to use.
"""
self.job_name = job_name
self.registry = CollectorRegistry()
self.metrics = {}
self.pushgateway_handler = pushgateway_handler
if pushgateway_uri:
self.pushgateway_uri = pushgateway_uri
def _get_or_create_metric(self,
metric_name, metric_type, tags, verbose_name):
"""Get or create metric.
:param metric_name (str): A metric name to use in scraper.
:param metric_type (class): A metric type to instantiate.
:param tags (list): An initial list of tags to pass to metric.
:param verbose_name (str): A metric verbose name.
"""
metric = self.metrics.get(metric_name)
if metric is None:
metric = metric_type(
metric_name,
verbose_name,
tuple(tags_to_labels(tags).keys()),
registry=self.registry,
)
self.metrics[metric_name] = metric
return metric
def increment(self, metric_name, tags=(), verbose_name='', value=1):
"""Increment the counter.
:param metric_name (str): A metric name string.
:param tags(list[str]): A list of tags to scrape.
:param verbose_name: A metric verbose name.
"""
metric = self._get_or_create_metric(
metric_name, Counter, tags, verbose_name)
maybe_labels(metric, tags).inc(value)
def gauge(self, metric_name, value, tags=(), verbose_name=''):
"""Register a gauge, an arbitrary numeric value.
:param metric_name (str): A metric name string.
:param tags(list[str]): A list of tags to scrape.
:param verbose_name: A metric verbose name.
"""
metric = self._get_or_create_metric(
metric_name, Gauge, tags, verbose_name)
maybe_labels(metric, tags).set(value)
def summary(self, metric_name, value, tags=(), verbose_name=''):
"""Register a gauge - an array of arbitrary numeric values.
:param metric_name (str): A metric name string.
:param tags(list[str]): A list of tags to scrape.
:param verbose_name: A metric verbose name.
"""
metric = self._get_or_create_metric(
metric_name, Summary, tags, verbose_name)
maybe_labels(metric, tags).observe(value)
def histogram(self, metric_name, value, tags=(), verbose_name=''):
"""Register a histogram metric.
:param metric_name (str): A metric name string.
:param tags(list[str]): A list of tags to scrape.
:param verbose_name: A metric verbose name.
"""
metric = self._get_or_create_metric(
metric_name, Histogram, tags, verbose_name)
maybe_labels(metric, tags).observe(value)
def timed(self, metric_name, tags=(), verbose_name=''):
"""A timing decorator/context manager, resulting into
histogram of timings.
:param metric_name (str): A metric name string.
:param tags(list[str]): A list of tags to scrape.
:param verbose_name: A metric verbose name.
:returns Timer: A Timer instance, used for monitoring code execution.
"""
metric = self._get_or_create_metric(
metric_name, Histogram, tags, verbose_name)
return Timer(metric, tags=tags)
def _push(self, pusher_func):
pusher = partial(pusher_func,
self.pushgateway_uri,
job=self.job_name,
registry=self.registry)
if self.pushgateway_handler:
pusher = partial(pusher, handler=self.pushgateway_handler)
pusher()
def push(self):
"""Push metrics to the gateway."""
self._push(pusher_func=pushadd_to_gateway)
def pushadd(self):
"""PushAdd metrics to the gateway."""
self._push(pusher_func=push_to_gateway)
if _PY_35:
def asynctimed(self, metric_name, tags=(), verbose_name=''):
"""An asynchronous timing decorator.
Wrap it around awaitable function, to get timing of its usage.
:param metric_name (str): A metric name string.
:param tags(list[str]): A list of tags to scrape.
:param verbose_name: A metric verbose name.
:returns AsyncTimer: An async Timer instance,
used for monitoring execution time of coroutines.
"""
from ._async import AsyncTimer
metric = self._get_or_create_metric(
metric_name, Histogram, tags, verbose_name)
return AsyncTimer(metric, tags=tags)
__all__ = ['Stats', ]
| {
"repo_name": "Intel471/prom-stats",
"path": "promstats/__init__.py",
"copies": "1",
"size": "5649",
"license": "mit",
"hash": -6162724492865202000,
"line_mean": 34.753164557,
"line_max": 77,
"alpha_frac": 0.594795539,
"autogenerated": false,
"ratio": 4.12035010940919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5215145648409191,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
import sys
import curio
from garage import asyncs
from garage.asyncs.utils import make_server_socket, serve
import http2
async def handle(sock, addr):
session = http2.Session(sock)
async with await asyncs.cancelling.spawn(session.serve()) as server:
async for stream in session:
logging.info('Request: %s %r',
stream.request.method.name,
stream.request.path.decode('utf8'))
await stream.submit_response(http2.Response(body=b'hello world'))
await server.join()
def main():
if len(sys.argv) < 2:
print('Usage: %s port [server.crt server.key]' % sys.argv[0])
sys.exit(1)
if len(sys.argv) >= 4:
make_ssl_context = partial(
http2.make_ssl_context, sys.argv[2], sys.argv[3])
else:
make_ssl_context = None
curio.run(serve(
asyncs.Event(),
partial(make_server_socket, ('', int(sys.argv[1]))),
handle,
make_ssl_context=make_ssl_context,
))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
| {
"repo_name": "clchiou/garage",
"path": "py/http2/examples/hello_world.py",
"copies": "1",
"size": "1162",
"license": "mit",
"hash": -5979846438749424000,
"line_mean": 26.023255814,
"line_max": 77,
"alpha_frac": 0.604130809,
"autogenerated": false,
"ratio": 3.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4604130809,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
import threading
import time
class TimerTask(object):
def __init__(self, callable_, *args, **kwargs):
self._callable = partial(callable_, *args, **kwargs)
self._finished = False
def is_finished(self):
return self._finished
def run(self):
try:
self._callable()
except:
logging.exception('TimerTask failed')
finally:
self._finished = True
class Timer(threading.Thread):
"""An alternative to threading.Timer. Where threading.Timer spawns a
dedicated thread for each job, this class uses a single, long-lived thread
to process multiple jobs.
Jobs are scheduled with a delay value in seconds.
"""
def __init__(self, *args, **kwargs):
super(Timer, self).__init__(*args, **kwargs)
self.lock = threading.Condition()
self._jobs = []
self.die = False
def run_later(self, callable_, timeout, *args, **kwargs):
"""Schedules the specified callable for delayed execution.
Returns a TimerTask instance that can be used to cancel pending
execution.
"""
self.lock.acquire()
try:
if self.die:
raise RuntimeError('This timer has been shut down and '
'does not accept new jobs.')
job = TimerTask(callable_, *args, **kwargs)
self._jobs.append((job, time.time() + timeout))
self._jobs.sort(key=lambda j: j[1]) # sort on time
self.lock.notify()
return job
finally:
self.lock.release()
def cancel(self, timer_task):
self.lock.acquire()
try:
self._jobs = list(filter(lambda job: job[0] is not timer_task,
self._jobs))
self.lock.notify()
finally:
self.lock.release()
def shutdown(self, cancel_jobs=False):
self.lock.acquire()
try:
self.die = True
if cancel_jobs:
self._jobs = []
self.lock.notify()
finally:
self.lock.release()
def _get_sleep_time(self):
if not self._jobs:
return 0
else:
job, scheduled_at = self._jobs[0]
return scheduled_at - time.time()
def run(self):
while True:
self.lock.acquire()
job = None
try:
if not self._jobs:
if self.die:
break
else:
self.lock.wait()
elif self._get_sleep_time() > 0:
self.lock.wait(self._get_sleep_time())
else:
job, timeout = self._jobs.pop(0)
finally:
self.lock.release()
if job:
# invoke the task without holding the lock
job.run()
| {
"repo_name": "TurboGears/backlash",
"path": "backlash/tracing/slowrequests/timer.py",
"copies": "1",
"size": "3013",
"license": "mit",
"hash": 3507517143300797000,
"line_mean": 27.1588785047,
"line_max": 78,
"alpha_frac": 0.5074676402,
"autogenerated": false,
"ratio": 4.558245083207262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005841121495327102,
"num_lines": 107
} |
from functools import partial
import logging
import time
import requests
log = logging.getLogger(__name__)
DEFAULT_ROOT_RES_PATH = '/'
class HTTPResponse(object):
"""
Wrapper around :class:`requests.Response`.
Parses ``Content-Type`` header and makes it available as a list of fields
in the :attr:`content_type` member.
"""
def __init__(self, raw_response):
self.raw_response = raw_response
content_type = raw_response.headers.get('content-type', '')
ct_fields = [f.strip() for f in content_type.split(';')]
self.content_type = ct_fields
def __getattr__(self, name):
return getattr(self.raw_response, name)
class HTTPClient(object):
"""
Convenience wrapper around Requests.
:param str endpoint: URL for the API endpoint. E.g. ``https://blah.org``.
:param dict extra_headers: When specified, these key-value pairs are added
to the default HTTP headers passed in with each request.
"""
def __init__(
self,
endpoint='',
extra_headers=None,
oauth_path=None,
refresh_token=None,
):
self.endpoint = endpoint
s = requests.session()
# Disable keepalives. They're unsafe in threaded apps that potentially
# re-use very old connection objects from the urllib3 connection pool.
s.headers['Accept'] = 'application/json'
s.headers['Connection'] = 'close'
if extra_headers:
s.headers.update(extra_headers)
self.s = s
# convenience methods
self.delete = partial(self.request, 'delete')
self.get = partial(self.request, 'get')
self.head = partial(self.request, 'head')
self.post = partial(self.request, 'post')
self.put = partial(self.request, 'put')
# keep track of when our auth token expires
self.oauth_path = oauth_path
self.refresh_token = refresh_token
self.auth_expires_at = None
def login(self):
"""
Gets and stores an OAUTH token from Rightscale.
"""
log.debug('Logging into RightScale...')
login_data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token,
}
response = self._request('post', self.oauth_path, data=login_data)
raw_token = response.json()
auth_token = "Bearer %s" % raw_token['access_token']
self.s.headers['Authorization'] = auth_token
# Generate an expiration time for our token of 60-seconds before the
# standard time returned by RightScale. This will be used in the
# self.client property to validate that our token is still usable on
# every API call.
log.debug('Auth Token expires in %s(s)' % raw_token['expires_in'])
self.auth_expires_at = time.time() + int(raw_token['expires_in']) - 60
def request(self, method, path='/', url=None, ignore_codes=[], **kwargs):
"""
Wrapper for the ._request method that verifies if we're logged into
RightScale before making a call, and sanity checks the oauth expiration
time.
:param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...)
:param str path: A path component of the target URL. This will be
appended to the value of ``self.endpoint``. If both :attr:`path`
and :attr:`url` are specified, the value in :attr:`url` is used and
the :attr:`path` is ignored.
:param str url: The target URL (e.g. ``http://server.tld/somepath/``).
If both :attr:`path` and :attr:`url` are specified, the value in
:attr:`url` is used and the :attr:`path` is ignored.
:param ignore_codes: List of HTTP error codes (e.g. 404, 500) that
should be ignored. If an HTTP error occurs and it is *not* in
:attr:`ignore_codes`, then an exception is raised.
:type ignore_codes: list of int
:param kwargs: Any other kwargs to pass to :meth:`requests.request()`.
Returns a :class:`requests.Response` object.
"""
# On every call, check if we're both logged in, and if the token is
# expiring. If it is, we'll re-login with the information passed into
# us at instantiation.
if time.time() > self.auth_expires_at:
self.login()
# Now make the actual API call
return self._request(method, path, url, ignore_codes, **kwargs)
def _request(self, method, path='/', url=None, ignore_codes=[], **kwargs):
"""
Performs HTTP request.
:param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...)
:param str path: A path component of the target URL. This will be
appended to the value of ``self.endpoint``. If both :attr:`path`
and :attr:`url` are specified, the value in :attr:`url` is used and
the :attr:`path` is ignored.
:param str url: The target URL (e.g. ``http://server.tld/somepath/``).
If both :attr:`path` and :attr:`url` are specified, the value in
:attr:`url` is used and the :attr:`path` is ignored.
:param ignore_codes: List of HTTP error codes (e.g. 404, 500) that
should be ignored. If an HTTP error occurs and it is *not* in
:attr:`ignore_codes`, then an exception is raised.
:type ignore_codes: list of int
:param kwargs: Any other kwargs to pass to :meth:`requests.request()`.
Returns a :class:`requests.Response` object.
"""
_url = url if url else (self.endpoint + path)
r = self.s.request(method, _url, **kwargs)
if not r.ok and r.status_code not in ignore_codes:
r.raise_for_status()
return HTTPResponse(r)
| {
"repo_name": "brantai/python-rightscale",
"path": "rightscale/httpclient.py",
"copies": "1",
"size": "5867",
"license": "mit",
"hash": 6036430246622865000,
"line_mean": 36.3694267516,
"line_max": 79,
"alpha_frac": 0.6001363559,
"autogenerated": false,
"ratio": 4.0184931506849315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5118629506584931,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
from django.conf import settings
from django.core.cache import caches
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
from django.db import models
from django.db.models.signals import post_save
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from markitup.fields import MarkupField
from wafer.menu import refresh_menu_cache
logger = logging.getLogger(__name__)
class PageMarkupField(MarkupField):
"""MarkupField that uses our own render function"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dotted_path, kwargs = settings.WAFER_PAGE_MARKITUP_FILTER
module, func = dotted_path.rsplit('.', 1)
func = getattr(__import__(module, {}, {}, [func]), func)
self.render_func = partial(func, **kwargs)
def pre_save(self, model_instance, add):
value = super().pre_save(model_instance, add)
rendered = self.render_func(value)
rendered_field_name = getattr(model_instance, self.attname).rendered_field_name
setattr(model_instance, rendered_field_name, rendered)
return value
class File(models.Model):
"""A file for use in page markup."""
name = models.CharField(max_length=255)
description = models.TextField(blank=True)
item = models.FileField(upload_to='pages_files')
def __str__(self):
return u'%s (%s)' % (self.name, self.item.url)
class Page(models.Model):
"""An extra page for the site."""
name = models.CharField(max_length=255)
slug = models.SlugField(help_text=_("Last component of the page URL"))
parent = models.ForeignKey(
'self', null=True, blank=True, on_delete=models.CASCADE, related_name="children")
content = PageMarkupField(
help_text=_("Markdown contents for the page."))
include_in_menu = models.BooleanField(
help_text=_("Whether to include in menus."),
default=False)
menu_order = models.PositiveSmallIntegerField(
help_text=_("Ordering in the menu (smaller numbers come first)"),
null=True,
blank=True,
)
exclude_from_static = models.BooleanField(
help_text=_("Whether to exclude this page from the static version of"
" the site (Container pages, etc.)"),
default=False)
files = models.ManyToManyField(
File, related_name="pages", blank=True,
help_text=_("Images and other files for use in"
" the content markdown field."))
people = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='pages', blank=True,
help_text=_("People associated with this page for display in the"
" schedule (Session chairs, panelists, etc.)"))
cache_time = models.IntegerField(
default=-1,
help_text=_("Length of time (in seconds) to cache the page for "
"dynamic page content. A negative value means this page "
"is not dynamic and it will be not be regenerated "
"until it is next edited."))
def __str__(self):
return u'%s' % (self.name,)
cache_name = settings.WAFER_CACHE
def get_path(self):
path, parent = [self.slug], self.parent
while parent is not None:
path.insert(0, parent.slug)
parent = parent.parent
return path
def get_absolute_url(self):
if self.slug == 'index' and not self.parent:
return reverse('wafer_page')
url = "/".join(self.get_path())
return reverse('wafer_page', args=(url,))
def _cache_key(self):
return "wafer.pages:rendered:%s" % self.get_absolute_url()
def cached_render(self):
if self.cache_time < 0:
return self.content.rendered
cache = caches[self.cache_name]
cache_key = self._cache_key()
rendered = cache.get(cache_key)
if rendered is None:
content_field = self._meta.get_field('content')
rendered = content_field.render_func(self.content.raw)
# Should reset the database copy, but this is enough for
# now
cache.set(cache_key, rendered, self.cache_time)
return rendered
def invalidate_cache(self):
cache = caches[self.cache_name]
cache.delete(self._cache_key())
get_absolute_url.short_description = 'page url'
def get_in_schedule(self):
if self.scheduleitem_set.all():
return True
return False
def get_people_display_names(self):
names = [person.userprofile.display_name()
for person in self.people.all()]
if len(names) > 2:
comma_names = ', '.join(names[:-1])
return comma_names + ' and ' + names[-1]
else:
return ' and '.join(names)
get_in_schedule.short_description = 'Added to schedule'
get_in_schedule.boolean = True
get_people_display_names.short_description = 'People'
class Model:
unique_together = (('parent', 'slug'),)
def clean(self):
keys = [self.pk]
parent = self.parent
while parent is not None:
if parent.pk in keys:
raise ValidationError(
{
NON_FIELD_ERRORS: [
_("Circular reference in parent."),
],
})
keys.append(parent.pk)
parent = parent.parent
return super().clean()
def validate_unique(self, exclude=None):
existing = Page.objects.filter(slug=self.slug, parent=self.parent)
# We could be updating the page, so don't fail if the existing
# entry is this page.
if existing.count() > 1 or (existing.count() == 1 and
existing.first().pk != self.pk):
raise ValidationError(
{
NON_FIELD_ERRORS: [
_("Duplicate parent/slug combination."),
],
})
return super().validate_unique(exclude)
def save(self, *args, **kwargs):
"""Ensure we invalidate the cache after saving"""
super().save(*args, **kwargs)
self.invalidate_cache()
def page_menus(root_menu):
"""Add page menus."""
for page in Page.objects.filter(include_in_menu=True, parent=None).prefetch_related("children").order_by('menu_order'):
subpages = page.children.filter(include_in_menu=True).order_by('menu_order')
if len(subpages) > 0:
root_menu.add_menu(
page.slug,
page.name,
[],
)
for subpage in subpages:
root_menu.add_item(
subpage.name,
subpage.get_absolute_url(),
menu=page.slug,
)
else:
root_menu.add_item(
page.name,
page.get_absolute_url(),
)
post_save.connect(refresh_menu_cache, sender=Page)
| {
"repo_name": "CTPUG/wafer",
"path": "wafer/pages/models.py",
"copies": "1",
"size": "7179",
"license": "isc",
"hash": 2232297504548192300,
"line_mean": 33.8495145631,
"line_max": 123,
"alpha_frac": 0.5804429586,
"autogenerated": false,
"ratio": 4.188448074679113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00020535907517540131,
"num_lines": 206
} |
from functools import partial
import logging
from mlabns.db import model
from mlabns.util import constants
from mlabns.util import message
from google.appengine.api import memcache
def _filter_by_status(tools, address_family, status):
"""Filter sliver tools based on the status of their available interfaces.
Args:
tools: A list of sliver tools to filter by status.
address_family: Address family of the interface to which the status
parameter applies. If None, include sliver tools that have the given
status on any interface.
status: Sliver tool status to filter for (i.e. only return sliver tools
with this status).
Returns:
A subset of the provided sliver tools, filtered by status.
"""
status_attrs = []
if address_family == message.ADDRESS_FAMILY_IPv4:
status_attrs.append('status_ipv4')
elif address_family == message.ADDRESS_FAMILY_IPv6:
status_attrs.append('status_ipv6')
else:
# When caller has not specified an address family, use any interface
status_attrs.append('status_ipv4')
status_attrs.append('status_ipv6')
filtered = []
for tool in tools:
for status_attr in status_attrs:
if getattr(tool, status_attr) == status:
filtered.append(tool)
# Exit as soon as the tool matches any set of criteria
break
return filtered
def _filter_by_country(tools, country):
"""Filters sliver tools based on the tool's country."""
return filter(lambda t: t.country == country, tools)
def _filter_choose_one_host_per_site(tools):
"""Filters to make sure only one host is returned per site_id.
This filter should be run after _filter_by_status if you want to make sure
the chosen site is up.
Args:
tools: The list of sliver tools to filter.
Returns:
A list containing a unique sliver tool for each site.
"""
sites = {}
for tool in tools:
if tool.site_id not in sites:
sites[tool.site_id] = tool
else:
sites[tool.site_id] = min(sites[tool.site_id],
tool,
key=lambda t: t.fqdn)
return [tool for tool in sites.values()]
def _find_site_ids_for_metro(metro):
"""Determine which site IDs are present in a given metro.
Args:
metro: The metro for which to find site IDs.
Returns:
A list of site IDs for the given metro.
"""
sites = model.Site.all().filter('metro =',
metro).fetch(constants.MAX_FETCHED_RESULTS)
if not sites:
logging.warning('No results found for metro %s.', metro)
return []
logging.info('Found %d results for metro %s.', len(sites), metro)
return [s.site_id for s in sites]
class ToolProperties(object):
"""A set of criteria to specify matching SliverTool(s)."""
def __init__(self,
tool_id,
status=None,
address_family=None,
metro=None,
country=None):
self.tool_id = tool_id
self.status = status
self.address_family = address_family
self.metro = metro
self.country = country
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
class SliverToolFetcher(object):
"""Fetches SliverTools from AppEngine memcache and Datastore."""
def __init__(self):
self._memcache_fetcher = SliverToolFetcherMemcache()
self._datastore_fetcher = SliverToolFetcherDatastore()
def fetch(self, tool_properties):
"""Fetch SliverTool objects with specified criteria.
Retrieves SliverTool objects with matching criteria. Tries to retrieve
from memcache first, but fails over to Datastore if memcache has no
matches.
Args:
tool_properties: A set of criteria that specifies what subset of
SliverTools to retrieve from the Datastore.
Returns:
A list of SliverTool objects that match the specified criteria.
"""
results = self._memcache_fetcher.fetch(tool_properties)
if results:
return results
logging.info(
'Sliver tools not found in memcache, falling back to data store.')
return self._datastore_fetcher.fetch(tool_properties)
class SliverToolFetcherMemcache(object):
"""Fetches SliverTool objects from the AppEngine Memcache."""
def fetch(self, tool_properties):
"""Fetch SliverTool objects from the Memcache with specified criteria.
Args:
tool_properties: A set of criteria that specifies what subset of
SliverTools to retrieve from Memcache.
Returns:
A list of SliverTool objects that match the specified criteria.
"""
tool_filters = []
if tool_properties.status:
tool_filters.append(partial(
_filter_by_status,
address_family=tool_properties.address_family,
status=tool_properties.status))
if tool_properties.country:
tool_filters.append(partial(_filter_by_country,
country=tool_properties.country))
if tool_properties.metro:
# Can't filter by metro without hitting the Datastore because
# Memcache does not have metro -> site ID mapping.
return []
tool_filters.append(_filter_choose_one_host_per_site)
sliver_tools = memcache.get(
tool_properties.tool_id,
namespace=constants.MEMCACHE_NAMESPACE_TOOLS)
if sliver_tools:
logging.info('Sliver tools found in memcache (%d results).',
len(sliver_tools))
candidates = sliver_tools
for tool_filter in tool_filters:
candidates = tool_filter(candidates)
logging.info('After filtering, %d candidates match criteria.',
len(candidates))
return candidates
return []
class SliverToolFetcherDatastore(object):
"""Fetches SliverTool objects from the AppEngine Datastore."""
def fetch(self, tool_properties):
"""Fetch SliverTool objects from the Datastore with specified criteria.
Args:
tool_properties: A set of criteria that specifies what subset of
SliverTools to retrieve from the Datastore.
Returns:
A list of SliverTool objects that match the specified criteria.
"""
gql_clauses = ['tool_id = :tool_id']
if tool_properties.metro:
site_ids = _find_site_ids_for_metro(tool_properties.metro)
gql_clauses.append('site_id in :site_ids')
else:
site_ids = None
if tool_properties.country:
gql_clauses.append('country = :country')
gql = 'WHERE ' + ' AND '.join(gql_clauses)
gql_query = model.SliverTool.gql(gql,
tool_id=tool_properties.tool_id,
status=tool_properties.status,
site_ids=site_ids,
country=tool_properties.country)
results = gql_query.fetch(constants.MAX_FETCHED_RESULTS)
# GQL doesn't have an OR operator, which makes it impossible to write
# GQL like (status_ipv4 = 'online' OR status_ipv6 = 'online') so we do
# status filtering in application code.
if tool_properties.status:
results = _filter_by_status(results, tool_properties.address_family,
tool_properties.status)
results = _filter_choose_one_host_per_site(results)
logging.info('%d sliver tools found in Datastore.', len(results))
return results
| {
"repo_name": "fernandalavalle/mlab-ns",
"path": "server/mlabns/db/sliver_tool_fetcher.py",
"copies": "1",
"size": "8067",
"license": "apache-2.0",
"hash": 449270669259679500,
"line_mean": 34.3815789474,
"line_max": 80,
"alpha_frac": 0.6009669022,
"autogenerated": false,
"ratio": 4.351132686084142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5452099588284142,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
from nephila.xmlparser import HTMLParser
from nephila.agent import AsyncAgent
from nephila.utils import get_full_url
class Spider:
def __init__(self, sitemap, handler):
self.metainfo = sitemap[0]
self.route = sitemap[1:]
self.start_url = self.metainfo["url"]
self.handler = handler
self.agent = AsyncAgent(self.metainfo["request"])
self.logger = logging.getLogger(__name__)
def start(self):
callback = partial(self.crawl, route=self.route)
self.agent.fetch(self.start_url, callback)
def crawl(self, response, route):
this_route = route[0]
rest_route = route[1:]
charset = self.metainfo.get("charset", 'utf-8')
htmlstring = response.body.decode(charset)
parse = HTMLParser(htmlstring)
if "child" in this_route and this_route["child"]:
hrefs = parse.get_urls(this_route["child"])
if hrefs is not None:
links = (get_full_url(response.effective_url, i) for i in hrefs)
for link in links:
callback = partial(self.crawl, route=rest_route)
self.agent.fetch(link, callback)
else:
self.logger.error("Child urls is broken")
if "sibling" in this_route and this_route["sibling"]:
hrefs = parse.get_urls(this_route["sibling"])
if hrefs is not None:
links = (get_full_url(response.effective_url, i) for i in hrefs)
for link in links:
callback = partial(self.crawl, route=route)
self.agent.fetch(link, callback)
else:
self.logger.debug("%s:no sibling or wrong rule", response.effective_url)
if "sendto" in this_route and this_route["sendto"]:
data = this_route["data"] and parse.get_data(this_route["data"]) or dict()
data["response"] = response
for name, params in this_route["sendto"].items():
handler = getattr(self.handler, name)
handler(data=data, **params)
| {
"repo_name": "rydesun/nephila",
"path": "nephila/spider.py",
"copies": "1",
"size": "2158",
"license": "mit",
"hash": 6362652025662133000,
"line_mean": 38.2363636364,
"line_max": 88,
"alpha_frac": 0.5848007414,
"autogenerated": false,
"ratio": 3.988909426987061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008622102900806245,
"num_lines": 55
} |
from functools import partial
import logging
from scapy.all import ARP, Ether, sendp
import sleepproxy.manager
from sleepproxy.sniff import SnifferThread
_HOSTS = {}
def handle(othermac, addresses, mymac, iface):
if othermac in _HOSTS:
logging.info("I already seem to be managing %s, ignoring" % othermac)
return
logging.info('Now handling ARPs for %s:%s on %s' % (othermac, addresses, iface))
for address in addresses:
if ':' in address: #ipv6
expr = "ip6 && icmp6 && (ip6[40] == 135 || ip6[40] == 136) and host %s" % (address) #ipv6 uses ndp, not arp
else:
expr = "arp host %s" % (address)
thread = SnifferThread( filterexp=expr, prn=partial(_handle_packet, address, mymac, othermac), iface=iface,) #using a callback, but not doing it async
_HOSTS[othermac] = thread
thread.start() #make this a greenlet?
def forget(mac):
logging.info("Removing %s from ARP handler" % (mac, ))
if mac not in _HOSTS:
logging.info("I don't seem to be managing %s" % (mac, ))
return
_HOSTS[mac].stop()
del _HOSTS[mac]
def _handle_packet(address, mac, sleeper, packet):
if ARP not in packet:
# I don't know how this happens, but I've seen it
return
if packet.hwsrc.replace(':','') == sleeper: #grat-arp from sleeper on wakeup
logging.warning("sleeper[%s] has awakened, deregistering it" % sleeper)
sleepproxy.manager.forget_host(sleeper)
return
if packet[ARP].op != ARP.who_has:
return
if packet[ARP].pdst != address:
logging.debug("Skipping packet with pdst %s != %s" % (packet[ARP].pdst, address, ))
return
logging.debug(packet.display())
ether = packet[Ether]
arp = packet[ARP]
reply = Ether(
dst=ether.src, src=mac) / ARP(
op="is-at",
psrc=arp.pdst,
pdst=arp.psrc,
hwsrc=mac,
hwdst=packet[ARP].hwsrc)
logging.info("Spoofing ARP response for %s to %s" % (arp.pdst, packet[ARP].psrc))
sendp(reply)
| {
"repo_name": "kfix/SleepProxyServer",
"path": "sleepproxy/arp.py",
"copies": "1",
"size": "2086",
"license": "bsd-2-clause",
"hash": 137844992970985340,
"line_mean": 33.7666666667,
"line_max": 158,
"alpha_frac": 0.610738255,
"autogenerated": false,
"ratio": 3.3269537480063796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44376920030063793,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
from scapy.all import IP, TCP
import sleepproxy.manager
from sleepproxy.sniff import SnifferThread
from sleepproxy.wol import wake
from time import sleep
_HOSTS = {}
def handle(mac, addresses, iface):
if mac in _HOSTS:
logging.debug("Ignoring already managed TCP host %s" % (mac, ))
logging.info("Now handling TCP SYNs for %s:%s on %s" % (mac, addresses, iface))
for address in addresses:
#we can be fancier, wake on port 22 with plain packets, not just syn
#http://www.opensource.apple.com/source/mDNSResponder/mDNSResponder-522.1.11/mDNSCore/mDNS.c mDNSCoreReceiveRawTransportPacket()
if ':' in address: #ipv6
expr = "ip6[6]=6 && ip6[53]&4!=0 and ip6[6]=6 && ip6[53]&1=0 and dst host %s" % (address) #ipv6 can have multiple headers, so no tcp* shortcuts in pcap-filter
else:
expr = "tcp[tcpflags] & tcp-syn != 0 and tcp[tcpflags] & tcp-ack = 0 and dst host %s" % (address)
thread = SnifferThread( filterexp=expr, prn=partial(_handle_packet, mac, address), iface=iface) #using a callback, but nut not doing it async
_HOSTS[mac] = thread
thread.start() #make this a greenlet?
def forget(mac):
logging.info("Removing host %s from TCP handler" % (mac, ))
if mac not in _HOSTS:
logging.info("I don't seem to know about %s, ignoring" % (mac, ))
return
_HOSTS[mac].stop()
del _HOSTS[mac]
def _handle_packet(mac, address, packet):
"""Do something with a SYN for the other machine!"""
if not (IP in packet and TCP in packet):
return
if packet[IP].dst != address:
logging.debug("Sniffed a TCP SYN for the wrong address?: %s" % packet.show() )
return
#logging.debug(packet.display())
sleepproxy.manager.mdns.forget(mac) # pre-emptively drop adv to keep the mac from de-colliding its name
sleep(0.4)
wake(mac) #retry=15?
#sleepproxy.manager.forget_host(mac)
| {
"repo_name": "kfix/SleepProxyServer",
"path": "sleepproxy/tcp.py",
"copies": "1",
"size": "1991",
"license": "bsd-2-clause",
"hash": 6103448528637281000,
"line_mean": 38.82,
"line_max": 170,
"alpha_frac": 0.6579608237,
"autogenerated": false,
"ratio": 3.323873121869783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9331410580307331,
"avg_score": 0.030084673052490424,
"num_lines": 50
} |
from functools import partial
import logging
from six import iteritems
from bravado_core.docstring import docstring_property
from bravado_core.schema import SWAGGER_PRIMITIVES
log = logging.getLogger(__name__)
# Models in #/definitions are tagged with this key so that they can be
# differentiated from 'object' types.
MODEL_MARKER = 'x-model'
def tag_models(container, key, path, visited_models, swagger_spec):
"""Callback used during the swagger spec ingestion process to tag models
with a 'x-model'. This is only done in the root document.
A list of visited models is maintained to avoid duplication of tagging.
:param container: container being visited
:param key: attribute in container being visited as a string
:param path: list of path segments to the key
:type visited_models: dict (k,v) == (model_name, path)
:type swagger_spec: :class:`bravado_core.spec.Spec`
"""
if len(path) < 2 or path[-2] != 'definitions':
return
deref = swagger_spec.deref
model_name = key
model_spec = deref(container.get(key))
if deref(model_spec.get('type')) != 'object':
return
if deref(model_spec.get(MODEL_MARKER)) is not None:
return
log.debug('Found model: {0}'.format(model_name))
if model_name in visited_models:
raise ValueError(
'Duplicate "{0}" model found at path {1}. '
'Original "{0}" model at path {2}'
.format(model_name, path, visited_models[model_name]))
model_spec['x-model'] = model_name
visited_models[model_name] = path
def collect_models(container, key, path, models, swagger_spec):
"""Callback used during the swagger spec ingestion to collect all the
tagged models and create appropriate python types for them.
:param container: container being visited
:param key: attribute in container being visited as a string
:param path: list of path segments to the key
:param models: created model types are placed here
:type swagger_spec: :class:`bravado_core.spec.Spec`
"""
deref = swagger_spec.deref
if key == MODEL_MARKER:
model_spec = container
model_name = deref(model_spec.get(MODEL_MARKER))
models[model_name] = create_model_type(
swagger_spec, model_name, model_spec)
def create_model_type(swagger_spec, model_name, model_spec):
"""Create a dynamic class from the model data defined in the swagger
spec.
The docstring for this class is dynamically generated because generating
the docstring is relatively expensive, and would only be used in rare
cases for interactive debugging in a REPL.
:type swagger_spec: :class:`bravado_core.spec.Spec`
:param model_name: model name
:param model_spec: json-like dict that describes a model.
:returns: dynamic type created with attributes, docstrings attached
:rtype: type
"""
doc = docstring_property(partial(
create_model_docstring, swagger_spec, model_spec))
methods = dict(
__doc__=doc,
__eq__=lambda self, other: compare(self, other),
__init__=lambda self, **kwargs: model_constructor(self, model_spec,
kwargs),
__repr__=lambda self: create_model_repr(self, model_spec),
__dir__=lambda self: model_dir(self, model_spec),
)
return type(str(model_name), (object,), methods)
def model_dir(model, model_spec):
"""Responsible for returning the names of the valid attributes on this
model object. This includes any properties defined in this model's spec
plus additional attibutes that exist as `additionalProperties`.
:param model: instance of a model
:param model_spec: spec the passed in model in dict form
:returns: list of str
"""
return list(model_spec['properties'].keys()) + model._additional_props
def compare(first, second):
"""Compares two model types for equivalence.
TODO: If a type composes another model type, .__dict__ recurse on those
and compare again on those dict values.
:param first: generated model type
:type first: type
:param second: generated model type
:type second: type
:returns: True if equivalent, False otherwise
"""
if not hasattr(first, '__dict__') or not hasattr(second, '__dict__'):
return False
# Ignore any '_raw' keys
def norm_dict(d):
return dict((k, d[k]) for k in d if k != '_raw')
return norm_dict(first.__dict__) == norm_dict(second.__dict__)
def model_constructor(model, model_spec, constructor_kwargs):
"""Constructor for the given model instance. Just assigns kwargs as attrs
on the model based on the 'properties' in the model specification.
:param model: Instance of a model type
:type model: type
:param model_spec: model specification
:type model_spec: dict
:param constructor_kwargs: kwargs sent in to the constructor invocation
:type constructor_kwargs: dict
:raises: AttributeError on constructor_kwargs that don't exist in the
model specification's list of properties
"""
arg_names = list(constructor_kwargs.keys())
for attr_name, attr_spec in iteritems(model_spec['properties']):
if attr_name in arg_names:
attr_value = constructor_kwargs[attr_name]
arg_names.remove(attr_name)
else:
attr_value = None
setattr(model, attr_name, attr_value)
if arg_names and not model_spec.get('additionalProperties', True):
raise AttributeError(
"Model {0} does not have attributes for: {1}"
.format(type(model), arg_names))
# we've got additionalProperties to set on the model
for arg_name in arg_names:
setattr(model, arg_name, constructor_kwargs[arg_name])
# stash so that dir(model) works
model._additional_props = arg_names
def create_model_repr(model, model_spec):
"""Generates the repr string for the model.
:param model: Instance of a model
:param model_spec: model specification
:type model_spec: dict
:returns: repr string for the model
"""
s = [
"{0}={1!r}".format(attr_name, getattr(model, attr_name))
for attr_name in sorted(model_spec['properties'].keys())
]
return "{0}({1})".format(model.__class__.__name__, ', '.join(s))
def is_model(swagger_spec, schema_object_spec):
"""
:param swagger_spec: :class:`bravado_core.spec.Spec`
:param schema_object_spec: specification for a swagger object
:type schema_object_spec: dict
:return: True if the spec has been "marked" as a model type, false
otherwise.
"""
deref = swagger_spec.deref
schema_object_spec = deref(schema_object_spec)
return deref(schema_object_spec.get(MODEL_MARKER)) is not None
def create_model_docstring(swagger_spec, model_spec):
"""
:type swagger_spec: :class:`bravado_core.spec.Spec`
:param model_spec: specification for a model in dict form
:rtype: string or unicode
"""
deref = swagger_spec.deref
model_spec = deref(model_spec)
s = 'Attributes:\n\n\t'
attr_iter = iter(sorted(iteritems(model_spec['properties'])))
# TODO: Add more stuff available in the spec - 'required', 'example', etc
for attr_name, attr_spec in attr_iter:
attr_spec = deref(attr_spec)
schema_type = deref(attr_spec['type'])
if schema_type in SWAGGER_PRIMITIVES:
# TODO: update to python types and take 'format' into account
attr_type = schema_type
elif schema_type == 'array':
array_spec = deref(attr_spec['items'])
if is_model(swagger_spec, array_spec):
array_type = deref(array_spec[MODEL_MARKER])
else:
array_type = deref(array_spec['type'])
attr_type = u'list of {0}'.format(array_type)
elif is_model(swagger_spec, attr_spec):
attr_type = deref(attr_spec[MODEL_MARKER])
elif schema_type == 'object':
attr_type = 'dict'
s += u'{0}: {1}'.format(attr_name, attr_type)
if deref(attr_spec.get('description')):
s += u' - {0}'.format(deref(attr_spec['description']))
s += '\n\t'
return s
| {
"repo_name": "MphasisWyde/eWamSublimeAdaptor",
"path": "POC/v0_4_POC_with_generic_cmd_and_swagger/third-party/bravado_core/model.py",
"copies": "7",
"size": "8303",
"license": "mit",
"hash": 7042798429062460000,
"line_mean": 34.1822033898,
"line_max": 77,
"alpha_frac": 0.6495242683,
"autogenerated": false,
"ratio": 3.8529002320185617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 236
} |
from functools import partial
import logging
import lib.const as C
import lib.visit as v
from .. import util
from .. import sample
from ..meta import class_lookup
from ..meta.template import Template
from ..meta.clazz import Clazz
from ..meta.method import Method
from ..meta.field import Field
from ..meta.statement import Statement, to_statements
from ..meta.expression import Expression, to_expression
class Singleton(object):
@classmethod
def find_singleton(cls):
return lambda anno: anno.by_name(C.A.SINGLE)
def __init__(self, smpls):
self._smpls = smpls
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
## @Singleton
## class C { ... }
## =>
## class C { ...
## private C() { } // private constructor
## private static C instance; // singleton holder
## public static C getInstance() { // retriever
## if (instance == null) { instance = new C(); }
## return instance;
## }
## }
def rewrite(self, cls):
cname = cls.name
logging.debug("reducing: @{} class {} {{ ... }}".format(C.A.SINGLE, cname))
# make the constructor(s) *private*
inits = cls.inits
if not inits: inits = [cls.add_default_init()]
for init in inits:
if C.mod.PR not in init.mods: init.mods.append(C.mod.PR)
# rip off *public* modifier, if exists
try: init.mods.remove(C.mod.PB)
except ValueError: pass
Fname = cname
fname = cname.lower()
for mtd in cls.mtds:
mname = mtd.name
if mname.startswith("get") and mname.endswith(cname):
Fname = mname.replace("get",'')
fname = Fname[:1].lower() + Fname[1:]
break
# add a static field to hold the singleton instance
holder = cls.fld_by_name(fname)
if not holder:
holder = Field(clazz=cls, mods=[C.mod.PR, C.mod.ST], typ=cname, name=fname)
logging.debug("adding field {0}.{1} of type {0}".format(cname, fname))
cls.add_flds([holder])
# retriever
mname = sample.find_getter(self._smpls, [cname], Fname)
mtd_g = cls.mtd_by_sig(mname)
if not mtd_g:
mtd_g = Method(clazz=cls, mods=[C.mod.PB, C.mod.ST], typ=cname, name=mname)
logging.debug("adding method {}.{}".format(cname, mname))
cls.add_mtds([mtd_g])
body = u"""
if ({fname} == null) {{ {fname} = new {cname}(); }}
return {fname};
""".format(**locals())
logging.debug("filling getter {}.{}".format(cname, mname))
mtd_g.body = to_statements(mtd_g, body)
# to replace annotation @Singleton(Class) in expressions
setattr(cls, "singleton", holder)
setattr(holder, "getter", mtd_g)
@v.when(Template)
def visit(self, node):
for cls in node.classes:
if util.exists(Singleton.find_singleton(), cls.annos):
self.rewrite(cls)
@v.when(Clazz)
def visit(self, node): pass
@v.when(Field)
def visit(self, node): pass
@v.when(Method)
def visit(self, node): pass
@v.when(Statement)
def visit(self, node): return [node]
## @Singleton(C) => C.getInstance()
@v.when(Expression)
def visit(self, node):
if node.kind == C.E.ANNO:
_anno = node.anno
if _anno.name == C.A.SINGLE:
logging.debug("reducing: {}".format(str(_anno)))
cls_s = class_lookup(_anno.cid)
mtd_g = cls_s.singleton.getter
return to_expression(u"{}.{}()".format(cls_s.name, mtd_g.name))
return node
| {
"repo_name": "plum-umd/pasket",
"path": "pasket/rewrite/singleton_anno.py",
"copies": "1",
"size": "3474",
"license": "mit",
"hash": -612840833987537200,
"line_mean": 27.95,
"line_max": 81,
"alpha_frac": 0.6137017847,
"autogenerated": false,
"ratio": 3.2650375939849625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43787393786849627,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
import lib.const as C
import lib.visit as v
from .. import util
from ..meta.template import Template
from ..meta.clazz import Clazz
from ..meta.method import Method
from ..meta.field import Field
from ..meta.statement import Statement, to_statements
from ..meta.expression import Expression
class Factory(object):
@classmethod
def find_factory(cls):
return lambda anno: anno.by_name(C.A.FACTORY)
def __init__(self, smpls):
self._smpls = smpls
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
@v.when(Template)
def visit(self, node): pass
@v.when(Clazz)
def visit(self, node): pass
@v.when(Field)
def visit(self, node): pass
verbs = ["create", "make", "new"]
@staticmethod
def is_factory(mname):
for v in Factory.verbs:
if mname.startswith(v): return True
return False
## [naming convention]
## X createX();
## =>
## X createX() {
## X obj = new X(); // for cleaner log conformity check
## return obj;
## }
## [explicit annotation]
## @Factory Y m1(); => Y m1() { return new Y(); }
## @Factory(C) I m2(); => I m2() { return new C(); }
@v.when(Method)
def visit(self, node):
if node.body or node.clazz.is_itf or node.typ in C.J.v: return
mname = node.name
factory = None
if Factory.is_factory(mname):
factory = node.typ
elif util.exists(Factory.find_factory(), node.annos):
_anno = util.find(Factory.find_factory(), node.annos)
if hasattr(_anno, "cid"): factory = _anno.cid
else: factory = node.typ
if factory:
logging.debug("filling factory: {}.{}".format(node.clazz.name, mname))
init_e = Clazz.call_init_if_instantiable(factory, node.params)
body = u"""
{0} __obj = {1};
return __obj;
""".format(factory, str(init_e))
node.body = to_statements(node, body)
@v.when(Statement)
def visit(self, node): return [node]
@v.when(Expression)
def visit(self, node): return node
| {
"repo_name": "plum-umd/pasket",
"path": "pasket/rewrite/factory.py",
"copies": "1",
"size": "2087",
"license": "mit",
"hash": -7185121601809617000,
"line_mean": 24.1445783133,
"line_max": 76,
"alpha_frac": 0.6300910398,
"autogenerated": false,
"ratio": 3.2866141732283465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4416705213028347,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import logging
import six
import simplejson as json
from bravado_core import schema
from bravado_core.content_type import APP_JSON
from bravado_core.exception import SwaggerMappingError
from bravado_core.marshal import marshal_schema_object
from bravado_core.unmarshal import unmarshal_schema_object
from bravado_core.validate import validate_schema_object
log = logging.getLogger(__name__)
# 'multi' left out intentionally - http client lib should handle it
COLLECTION_FORMATS = {
'csv': ',',
'ssv': ' ',
'tsv': '\t',
'pipes': '|'
}
def stringify_body(value):
"""Json dump the value to string if not already in string
"""
if not value or isinstance(value, six.string_types):
return value
return json.dumps(value)
class Param(object):
"""Thin wrapper around a param_spec dict that provides convenience functions
for commonly requested parameter information.
"""
def __init__(self, swagger_spec, op, param_spec):
"""
:type swagger_spec: :class:`bravado_core.spec.Spec`
:type op: :class:`bravado_core.operation.Operation`
:type param_spec: parameter specification in dict form
"""
self.op = op
self.swagger_spec = swagger_spec
self.param_spec = param_spec
@property
def name(self):
return self.param_spec['name']
@property
def location(self):
# not using 'in' as the name since it is a keyword in python
return self.param_spec['in']
@property
def description(self):
return self.param_spec.get('description', None)
@property
def required(self):
return self.param_spec.get('required', False)
def has_default(self):
return 'default' in self.param_spec
@property
def default(self):
return self.param_spec['default']
def get_param_type_spec(param):
"""
The spec for the parameter 'type' is not always in the same place for a
parameter. The notable exception is when the location is 'body' and the
schema for the type is in param_spec['schema']
:rtype: dict
:return: the param spec that contains 'type'
:raises: SwaggerMappingError when param location is not valid
"""
location = param.location
if location in ('path', 'query', 'header', 'formData'):
return param.param_spec
if location == 'body':
return param.param_spec['schema']
raise SwaggerMappingError(
"Don't know how to handle location {0}".format(location))
def marshal_param(param, value, request):
"""
Given an operation parameter and its value, marshal the value and place it
in the proper request destination.
Destination is one of:
- path - can accept primitive and array of primitive types
- query - can accept primitive and array of primitive types
- header - can accept primitive and array of primitive types
- body - can accept any type
- formData - can accept primitive and array of primitive types
:type param: :class:`bravado_core.param.Param`
:param value: The value to assign to the parameter
:type request: dict
"""
spec = get_param_type_spec(param)
location = param.location
value = marshal_schema_object(param.swagger_spec, spec, value)
if param.swagger_spec.config['validate_requests']:
validate_schema_object(spec, value)
if spec['type'] == 'array' and location != 'body':
value = marshal_collection_format(spec, value)
if location == 'path':
token = u'{%s}' % param.name
# Don't do any escaping/encoding - http_client will take care of it
request['url'] = request['url'].replace(token, six.text_type(value))
elif location == 'query':
request['params'][param.name] = value
elif location == 'header':
request['headers'][param.name] = value
elif location == 'formData':
if spec['type'] == 'file':
add_file(param, value, request)
else:
request.setdefault('data', {})[param.name] = value
elif location == 'body':
request['headers']['Content-Type'] = APP_JSON
request['data'] = json.dumps(value)
else:
raise SwaggerMappingError(
"Don't know how to marshal_param with location {0}".
format(location))
def unmarshal_param(param, request):
"""Unmarshal the given parameter from the passed in request like object.
:type param: :class:`bravado_core.param.Param`
:type request: :class:`bravado_core.request.RequestLike`
"""
param_spec = get_param_type_spec(param)
location = param.location
cast_param = partial(cast_request_param, param_spec['type'], param.name)
default_value = schema.get_default(param_spec)
if location == 'path':
raw_value = cast_param(request.path.get(param.name, None))
elif location == 'query':
raw_value = cast_param(request.query.get(param.name, default_value))
elif location == 'header':
raw_value = cast_param(request.headers.get(param.name, default_value))
elif location == 'formData':
if param_spec['type'] == 'file':
raw_value = request.files.get(param.name, None)
else:
raw_value = cast_param(request.form.get(param.name, default_value))
elif location == 'body':
# TODO: verify content-type header
raw_value = request.json()
else:
raise SwaggerMappingError(
"Don't know how to unmarshal_param with location {0}".
format(location))
if param_spec['type'] == 'array' and location != 'body':
raw_value = unmarshal_collection_format(param_spec, raw_value)
if param.swagger_spec.config['validate_requests']:
validate_schema_object(param_spec, raw_value)
value = unmarshal_schema_object(param.swagger_spec, param_spec, raw_value)
return value
CAST_TYPE_TO_FUNC = {
'integer': int,
'number': float,
'boolean': bool
}
def cast_request_param(param_type, param_name, param_value):
"""Try to cast a request param (e.g. query arg, POST data) from a string to
its specified type in the schema. This allows validating non-string params.
:param param_type: name of the type to be casted to
:type param_type: string
:param param_name: param name
:type param_name: string
:param param_value: param value
:type param_value: string
"""
if param_value is None:
return None
try:
return CAST_TYPE_TO_FUNC.get(param_type, lambda x: x)(param_value)
except ValueError:
log.warn("Failed to cast %s value of %s to %s",
param_name, param_value, param_type)
# Ignore type error, let jsonschema validation handle incorrect types
return param_value
def add_file(param, value, request):
"""Add a parameter of type 'file' to the given request.
:type param: :class;`bravado_core.param.Param`
:param value: The raw content of the file to be uploaded
:type request: dict
"""
if request.get('files') is None:
# support multiple files by default by setting to an empty array
request['files'] = []
# The http client should take care of setting the content-type header
# to 'multipart/form-data'. Just verify that the swagger spec is
# conformant
expected_mime_type = 'multipart/form-data'
# TODO: Remove after https://github.com/Yelp/swagger_spec_validator/issues/22 is implemented # noqa
if expected_mime_type not in param.op.consumes:
raise SwaggerMappingError((
"Mime-type '{0}' not found in list of supported mime-types for "
"parameter '{1}' on operation '{2}': {3}").format(
expected_mime_type,
param.name,
param.op.operation_id,
param.op.consumes
))
file_tuple = (param.name, (param.name, value))
request['files'].append(file_tuple)
def marshal_collection_format(spec, value):
"""
For an array, apply the collection format and return the result.
:param spec: spec of the parameter with 'type': 'array'
:param value: array value of the parmaeter
:return: transformed value as a string
"""
collection_format = spec.get('collectionFormat', 'csv')
if collection_format == 'multi':
# http client lib should handle this
return value
sep = COLLECTION_FORMATS[collection_format]
return sep.join(str(element) for element in value)
def unmarshal_collection_format(spec, value):
"""
For a non-body parameter of type array, unmarshal the value into an array
of elements.
Input:
spec = {
'name': 'status'
'in': 'query',
'collectionFormat': 'psv', # pipe separated value
'type': 'array',
'items': {
'type': 'string',
}
}
value="pending|completed|started"
Output:
['pending', 'completed', 'started']
:param spec: spec of the parameter with 'type': 'array'
:type spec: dict
:param value: parameter value
:type value: string
:rtype: list
"""
collection_format = spec.get('collectionFormat', 'csv')
if collection_format == 'multi':
# http client lib should have already unmarshaled to an array
return value
sep = COLLECTION_FORMATS[collection_format]
return [
cast_request_param(spec['items']['type'], spec['name'], item)
for item in value.split(sep)
]
| {
"repo_name": "admetricks/bravado-core",
"path": "bravado_core/param.py",
"copies": "1",
"size": "9655",
"license": "bsd-3-clause",
"hash": 3079083851122469400,
"line_mean": 31.7288135593,
"line_max": 108,
"alpha_frac": 0.6341791818,
"autogenerated": false,
"ratio": 3.968351829017674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5102531010817674,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import math
from turtle import Turtle
from domino_puzzle import Board, CaptureBoardGraph, Domino, Cell
from dominosa import PairState
DEFAULT_CELL_SIZE = 100
PIP_PATTERNS = """\
---+
|
|
|
---+
|
O |
|
---+
O |
|
O|
---+
O |
O |
O|
---+
O O|
|
O O|
---+
O O|
O |
O O|
---+
OOO|
|
OOO|
---+
"""
def draw_pips(turtle, pips, cell_size: float = DEFAULT_CELL_SIZE):
pip_pattern = PIP_PATTERNS.splitlines()[pips*4+1:pips*4+4]
pip_radius = cell_size*0.09
turtle.up()
pos = turtle.pos()
turtle.back(pip_radius*5)
turtle.left(90)
turtle.forward(pip_radius*4)
turtle.right(90)
for i in range(3):
turtle.forward(pip_radius*2)
turtle.right(90)
turtle.forward(pip_radius)
turtle.left(90)
for j in range(3):
if pip_pattern[i][j] == 'O':
turtle.dot(pip_radius*2)
turtle.forward(pip_radius*3)
turtle.back(pip_radius*11)
turtle.right(90)
turtle.forward(pip_radius*2)
turtle.left(90)
turtle.setpos(pos)
def draw_domino(turtle, domino, cell_size=DEFAULT_CELL_SIZE):
start_fill = turtle.fillcolor()
try:
if domino.head.pips == '#':
draw_domino_outline(turtle, cell_size, fill='black', margin=0)
return
draw_domino_outline(turtle, cell_size)
finally:
turtle.fillcolor(start_fill)
turtle.up()
turtle.forward(cell_size * 0.5)
turtle.right(90)
turtle.back(cell_size * 0.35)
turtle.down()
turtle.forward(cell_size*.7)
turtle.up()
turtle.back(cell_size*.35)
turtle.left(90)
turtle.forward(cell_size*.5)
draw_pips(turtle, domino.tail.pips, cell_size)
turtle.back(cell_size)
draw_pips(turtle, domino.head.pips, cell_size)
def draw_cell(turtle, cell, cell_size=DEFAULT_CELL_SIZE):
turtle.up()
turtle.back(cell_size * 0.45)
turtle.left(90)
turtle.forward(cell_size * 0.45)
turtle.right(90)
r = cell_size * 0.1
turtle.forward(r)
turtle.down()
turtle.fillcolor('white')
old_colour = turtle.pencolor()
turtle.pencolor('light grey')
turtle.begin_fill()
for _ in range(4):
turtle.forward(cell_size * 0.7)
turtle.up()
turtle.circle(-r, 90)
turtle.down()
turtle.end_fill()
turtle.pencolor(old_colour)
turtle.up()
turtle.back(r)
turtle.forward(cell_size * 0.45)
turtle.right(90)
turtle.forward(cell_size * 0.45)
turtle.left(90)
draw_pips(turtle, cell.pips, cell_size)
def draw_domino_outline(turtle,
cell_size=DEFAULT_CELL_SIZE,
fill='white',
margin=0.05):
turtle.up()
r = cell_size * 0.05
margin_size = cell_size * margin
turtle.back(cell_size/2-r-margin_size)
turtle.left(90)
turtle.forward(cell_size/2-margin_size)
turtle.right(90)
turtle.down()
turtle.fillcolor(fill)
turtle.begin_fill()
for _ in range(2):
turtle.forward(2*cell_size - 2*(margin_size + r))
turtle.circle(-r, 90)
turtle.forward(cell_size - 2*(margin_size + r))
turtle.circle(-r, 90)
turtle.end_fill()
turtle.up()
turtle.right(90)
turtle.forward(cell_size/2-margin_size)
turtle.left(90)
turtle.forward(cell_size/2-r-margin_size)
def draw_die_outline(turtle,
die_size: float = DEFAULT_CELL_SIZE,
fill='white'):
turtle.up()
r = die_size / 12
turtle.back(die_size / 2 - r)
turtle.left(90)
turtle.forward(die_size / 2)
turtle.right(90)
turtle.down()
turtle.fillcolor(fill)
turtle.begin_fill()
for _ in range(4):
turtle.forward(die_size - 2 * r)
turtle.circle(-r, 90)
turtle.end_fill()
turtle.up()
turtle.right(90)
turtle.forward(die_size / 2)
turtle.left(90)
turtle.forward(die_size / 2 - r)
def draw_paths(turtle, board: Board, cell_size=DEFAULT_CELL_SIZE):
pos = turtle.pos()
old_colour = turtle.color()
old_width = turtle.width()
turtle.width(2)
turtle.color('grey')
turtle.up()
turtle.forward(cell_size/2)
turtle.left(90)
turtle.back(cell_size*(board.height-0.5))
for y in range(board.height):
for x in range(board.width):
cell: Cell = board[x][y]
if y < board.height-1:
lower_neighbour = board[x][y + 1]
draw_neighbour_path(turtle, cell, lower_neighbour, cell_size)
turtle.right(90)
if x < board.width-1:
draw_neighbour_path(turtle, cell, board[x+1][y], cell_size)
turtle.forward(cell_size)
turtle.left(90)
turtle.forward(cell_size)
turtle.left(90)
turtle.forward(cell_size*board.width)
turtle.right(90)
turtle.setpos(pos)
turtle.color(old_colour)
turtle.width(old_width)
def draw_neighbour_path(turtle, cell, neighbour, cell_size):
if abs(neighbour.pips - cell.pips) <= 1:
turtle.down()
turtle.forward(cell_size)
turtle.up()
turtle.back(cell_size)
def draw_board(turtle, board, cell_size=DEFAULT_CELL_SIZE):
start_x, start_y = turtle.pos()
start_colour = turtle.color()
start_heading = turtle.heading()
for y in range(board.height):
for x in range(board.width):
cell = board[x][y]
if cell is not None:
domino = cell.domino
if domino is None:
draw_cell(turtle, cell, cell_size)
elif cell is domino.head:
turtle.left(domino.degrees)
draw_domino(turtle, domino, cell_size)
turtle.right(domino.degrees)
turtle.forward(cell_size)
turtle.up()
turtle.back(cell_size*board.width)
turtle.left(90)
turtle.forward(cell_size)
turtle.right(90)
for domino, x, y in board.offset_dominoes:
turtle.setpos(start_x + x*cell_size, start_y + y*cell_size)
turtle.left(domino.degrees)
draw_domino(turtle, domino, cell_size)
turtle.right(domino.degrees)
for (x, y), marker in board.markers.items():
turtle.setheading(start_heading+90)
turtle.setpos(start_x + x*cell_size, start_y + y*cell_size)
turtle.color(start_colour)
turtle.dot(0.75*cell_size)
turtle.back(0.05*cell_size)
turtle.color('white')
turtle.write(marker,
align='center',
font=('Arial', 0.20*cell_size, 'normal'))
turtle.back(0.10*cell_size)
cell = board[x][y]
domino = cell.domino
turtle.setheading(domino.degrees)
draw_pips(turtle, cell.pips, int(0.30*cell_size))
turtle.color(start_colour)
turtle.setpos((start_x, start_y))
turtle.setheading(start_heading)
def draw_arrow(turtle, cell_size, rotation=0):
pos = turtle.pos()
turtle.left(rotation)
turtle.back(cell_size*.2)
turtle.down()
turtle.left(90)
turtle.begin_fill()
turtle.forward(cell_size*.05)
turtle.right(90)
turtle.forward(cell_size*.3)
turtle.left(90)
turtle.forward(cell_size*.1)
turtle.right(120)
turtle.forward(cell_size*.3)
turtle.right(120)
turtle.forward(cell_size*.3)
turtle.right(120)
turtle.forward(cell_size*.1)
turtle.left(90)
turtle.forward(cell_size*.3)
turtle.right(90)
turtle.forward(cell_size*.05)
turtle.end_fill()
turtle.up()
turtle.right(90)
turtle.forward(cell_size*.2)
turtle.setpos(pos)
turtle.right(rotation)
def draw_cross(turtle, cell_size, rotation=0):
pos = turtle.pos()
thickness = cell_size*.1
length = cell_size*.15
turtle.up()
turtle.right(rotation-45)
turtle.right(45)
turtle.forward(thickness*.5)
turtle.left(90)
turtle.forward(thickness*.5)
turtle.down()
turtle.begin_fill()
for _ in range(4):
turtle.forward(length)
turtle.left(90)
turtle.forward(thickness)
turtle.left(90)
turtle.forward(length)
turtle.right(90)
turtle.end_fill()
turtle.right(45)
turtle.left(rotation-45)
turtle.up()
turtle.setpos(pos)
def draw_move(turtle, cell_size, offset, domino, dx, dy, move_num, step_count):
shade = (move_num-1) * 1.0/step_count
rgb = (0, 1-shade, shade)
turtle.forward((domino.head.x-offset[0]) * cell_size)
turtle.left(90)
turtle.forward((domino.head.y-offset[1]) * cell_size)
turtle.right(90)
turtle.setheading(domino.degrees)
turtle.forward(cell_size*.5)
turtle.setheading(math.atan2(dy, dx) * 180/math.pi)
pen = turtle.pen()
turtle.pencolor(rgb)
circle_pos = turtle.pos()
turtle.width(4)
turtle.forward(cell_size*0.05)
turtle.down()
turtle.forward(cell_size*0.4)
turtle.up()
turtle.pen(pen)
turtle.setpos(circle_pos)
turtle.forward(8)
turtle.setheading(270)
turtle.forward(8)
turtle.left(90)
turtle.down()
turtle.pencolor(rgb)
turtle.fillcolor('white')
turtle.begin_fill()
turtle.circle(8)
turtle.end_fill()
turtle.pen(pen)
turtle.write(move_num, align='center')
turtle.up()
def draw_match(turtle, cell_size, offset, cell):
turtle.forward((cell.x-offset[0]) * cell_size)
turtle.left(90)
turtle.forward((cell.y-offset[1]) * cell_size)
turtle.right(90)
pen = turtle.pen()
turtle.color('red')
turtle.up()
turtle.back(10)
turtle.right(90)
turtle.begin_fill()
turtle.circle(10)
turtle.left(90)
turtle.forward(5)
turtle.right(90)
turtle.circle(5)
turtle.left(90)
turtle.end_fill()
turtle.pen(pen)
def draw_capture_circle(turtle,
cell_size,
offset,
domino,
move_num=None):
x = (domino.head.x + domino.tail.x) * 0.5 - offset[0]
y = (domino.head.y + domino.tail.y) * 0.5 - offset[1]
pen = turtle.pen()
turtle.forward(x*cell_size)
turtle.left(90)
turtle.forward(y*cell_size)
turtle.right(90)
turtle.setheading(270)
turtle.forward(8)
turtle.left(90)
turtle.down()
turtle.pencolor('red')
turtle.fillcolor('red' if move_num is None else 'white')
turtle.begin_fill()
turtle.circle(8)
turtle.end_fill()
turtle.pen(pen)
if move_num is not None:
turtle.write(move_num, align='center')
def draw_fuji(turtle, num_dominoes, cell_size=DEFAULT_CELL_SIZE):
turtle.up()
offset = (num_dominoes // 2 - 0.5) * cell_size
turtle.forward(offset)
turtle.right(90)
turtle.forward(cell_size*0.5)
turtle.left(90)
turtle.down()
for _ in range(3):
draw_domino_outline(turtle, cell_size)
turtle.up()
turtle.right(90)
turtle.forward(cell_size)
turtle.left(90)
turtle.down()
turtle.up()
turtle.back(offset)
turtle.right(90)
turtle.back(3*cell_size)
turtle.left(90)
def draw_diagram(turtle,
state,
cell_size=DEFAULT_CELL_SIZE,
solution=False,
show_path=False,
board_class=Board):
marks = {'>': partial(draw_arrow, turtle, cell_size),
'^': partial(draw_arrow, turtle, cell_size, 90),
'<': partial(draw_arrow, turtle, cell_size, 180),
'v': partial(draw_arrow, turtle, cell_size, 270),
'+': partial(draw_cross, turtle, cell_size, 0),
'*': partial(draw_cross, turtle, cell_size, 45)}
pos = turtle.pos()
sections = state.split('\n---\n')
lines = sections[0].splitlines()
turtle.up()
turtle.forward(cell_size*0.5)
turtle.right(90)
turtle.forward(cell_size*len(lines)*0.5)
turtle.left(90)
origin = turtle.pos()
board = board_class.create(state)
draw_board(turtle, board, cell_size)
turtle.up()
turtle.pencolor('white')
for y, line in enumerate(reversed(lines)):
for x, c in enumerate(line):
if (x+y) % 2:
mark = marks.get(c)
if mark is not None:
mark()
turtle.up()
turtle.forward(cell_size*.5)
turtle.back(cell_size*len(line)*.5)
turtle.left(90)
turtle.forward(cell_size*.5)
turtle.right(90)
turtle.setpos(pos)
draw_dominosa_hints(turtle, board, cell_size)
draw_dice(turtle, board, cell_size)
draw_arrows(turtle, board, cell_size)
if show_path:
draw_paths(turtle, board, cell_size)
if solution:
border = 1
offset = [border, border]
board = Board.create(state, border=border)
for cell in board.findMatches():
turtle.setpos(origin)
draw_match(turtle,
cell_size,
offset,
cell)
graph = CaptureBoardGraph()
graph.walk(board)
solution = graph.get_solution(return_partial=True)
step_count = max(len(solution)-1, 1)
for move_num, move in enumerate(solution, 1):
domino_name = move[:2]
for domino in board.dominoes:
if domino.get_name() == domino_name:
dx, dy = Domino.get_direction(move[-1])
turtle.setpos(origin)
draw_move(turtle,
cell_size,
offset,
domino,
dx,
dy,
move_num,
step_count)
old_offset = offset[:]
state = graph.move(domino, dx, dy, offset)
new_board = Board.create(state, border=border)
captures = set(board.dominoes)
captures.difference_update(new_board.dominoes)
captures.discard(domino)
for capture in captures:
turtle.setpos(origin)
draw_capture_circle(turtle,
cell_size,
old_offset,
capture,
move_num)
offset[0] += border
offset[1] += border
board = new_board
break
# Mark uncaptured dominoes
for domino in board.dominoes:
turtle.setpos(origin)
draw_capture_circle(turtle, cell_size, offset, domino)
turtle.setpos(pos)
def draw_dominosa_hints(turtle, board, cell_size):
if not hasattr(board, 'get_pair_state'):
return
old_color = turtle.pencolor()
old_size = turtle.pensize()
turtle.pencolor('black')
turtle.pensize(cell_size*0.05)
turtle.up()
# Draw splits between cells in the same row.
for x in range(1, board.width):
turtle.forward(cell_size)
turtle.right(90)
for y in reversed(range(board.height)):
pair_state = board.get_pair_state(x-1, y, x, y)
if pair_state != PairState.SPLIT:
turtle.forward(cell_size)
else:
draw_split(turtle, cell_size)
turtle.back(cell_size*board.height)
turtle.left(90)
turtle.back(cell_size*(board.width-1))
# Draw splits between cells in the same column.
turtle.right(90)
for y in reversed(range(1, board.height)):
turtle.forward(cell_size)
turtle.left(90)
for x in range(board.width):
try:
pair_state = board.get_pair_state(x, y-1, x, y)
except KeyError:
pair_state = PairState.UNDECIDED
if pair_state != PairState.SPLIT:
turtle.forward(cell_size)
else:
draw_split(turtle, cell_size)
turtle.back(cell_size * board.width)
turtle.right(90)
turtle.back(cell_size*board.width)
turtle.pencolor(old_color)
turtle.pensize(old_size)
def draw_split(turtle, cell_size):
turtle.forward(0.15 * cell_size)
turtle.down()
turtle.forward(0.7 * cell_size)
turtle.up()
turtle.forward(0.15 * cell_size)
def draw_joined_block(turtle, width, height):
turtle.down()
turtle.begin_fill()
for _ in range(2):
turtle.forward(width)
turtle.right(90)
turtle.forward(height)
turtle.right(90)
turtle.end_fill()
turtle.up()
def draw_dice(turtle: Turtle, board: Board, cell_size: int):
dice_set = board.dice_set
if dice_set is None:
return
turtle.color('black', 'white')
turtle.right(90)
turtle.forward(int(cell_size * (board.height-0.5)))
turtle.left(90)
turtle.forward(cell_size/2)
die_size = cell_size * 0.6
for y in range(board.height):
for x in range(board.width):
die_pips = dice_set[x, y]
if die_pips is not None:
draw_die_outline(turtle, die_size)
cell = board[x][y]
if cell is None or cell.domino is None:
dy = 0
else:
dx, dy = cell.domino.direction
if dy:
turtle.left(90)
draw_pips(turtle, die_pips, die_size)
if dy:
turtle.right(90)
turtle.forward(cell_size)
turtle.back(cell_size*board.width)
turtle.left(90)
turtle.forward(cell_size)
turtle.right(90)
turtle.right(90)
turtle.forward(cell_size / 2)
turtle.left(90)
turtle.back(cell_size / 2)
def draw_arrows(turtle: Turtle, board: Board, cell_size: int):
arrows = board.arrows
if arrows is None:
return
start_pos = turtle.pos()
turtle.up()
turtle.color('grey50')
line_width = cell_size / 20
turtle.right(90)
turtle.forward(cell_size * (board.height - 0.5))
turtle.left(90)
turtle.forward(cell_size / 2)
x0, y0 = turtle.pos()
for arrow in arrows.positions:
x2, y2 = arrow[0]
x = x0 + x2*cell_size
y = y0 + y2*cell_size
turtle.goto(x, y)
turtle.down()
# noinspection PyTypeChecker
turtle.width(line_width)
for x2, y2 in arrow[1:-1]:
x = x0 + x2*cell_size
y = y0 + y2*cell_size
turtle.goto(x, y)
x2, y2 = arrow[-1]
x = x0 + x2*cell_size
y = y0 + y2*cell_size
turtle.setheading(turtle.towards(x, y))
distance = max(abs(x - turtle.xcor()), abs(y - turtle.ycor()))
turtle.forward(distance - line_width)
turtle.up()
turtle.forward(line_width)
turtle.right(150)
turtle.width(cell_size//100)
turtle.down()
turtle.begin_fill()
for _ in range(3):
turtle.forward(cell_size*0.15)
turtle.right(120)
turtle.end_fill()
turtle.up()
turtle.goto(x0, y0)
turtle.setheading(0)
turtle.goto(start_pos)
def draw_demo(turtle):
width = turtle.screen.window_width()
height = turtle.screen.window_height()
cell_size = min(width/8.2, height/7.2)
turtle.up()
turtle.back(cell_size*4)
turtle.left(90)
turtle.forward(cell_size*3.5)
turtle.right(90)
turtle.down()
turtle.fillcolor('white')
turtle.begin_fill()
for _ in range(2):
turtle.forward(cell_size*8)
turtle.right(90)
turtle.forward(cell_size*7)
turtle.right(90)
turtle.end_fill()
demo_state = """\
5 5 5 5 6 6 6 6
- - - - - - - -
1 2 3 4 4 3 2 1
1|2 3 0
v *
4 5+6 6
2|2
"""
mountain_state = """\
0|1 2|1 0|4
2 1|5 4|1 4
- -
0 0|6 4|2 4
0|3 3|3 4|5
1 2 3|6 5|5
- -
3 2 1|6 5|6
"""
dominosa_state = """\
0 1 2 3
-
4 5 6 0
1|2 3 4
"""
demo_type = 'demo'
if demo_type == 'mountains':
draw_diagram(turtle, mountain_state, cell_size, show_path=True)
elif demo_type == 'dominosa':
draw_diagram(turtle, dominosa_state, cell_size)
else:
draw_fuji(turtle, 8, cell_size)
draw_diagram(turtle, demo_state, cell_size, solution=False)
| {
"repo_name": "donkirkby/domiculture",
"path": "diagram.py",
"copies": "2",
"size": "20479",
"license": "mit",
"hash": -6108718821538360000,
"line_mean": 27.1691884457,
"line_max": 79,
"alpha_frac": 0.5602324332,
"autogenerated": false,
"ratio": 3.3347988926884873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48950313258884876,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import math
import actions
from actions import _get_as_str
import call_definitions
from call_definitions import xpcom_constructor as xpcom_const, python_wrap
from entity_values import entity
import instanceactions
from jstypes import JSWrapper
from validator.compat import FX40_DEFINITION
from validator.constants import MDN_DOC
# A list of identifiers and member values that may not be used.
BANNED_IDENTIFIERS = {
u'newThread':
'Creating threads from JavaScript is a common cause '
'of crashes and is unsupported in recent versions of the platform',
u'processNextEvent':
'Spinning the event loop with processNextEvent is a common cause of '
'deadlocks, crashes, and other errors due to unintended reentrancy. '
'Please use asynchronous callbacks instead wherever possible',
}
CUSTOMIZATION_API_HELP = (
'We are currently working to provide libraries and APIs to allow '
'extensions to modify these settings in ways that we can guarantee are '
'in-policy. In the interim, we recommend that you avoid changing these '
'settings altogether, if at all possible.')
CUSTOMIZATION_PREF_MESSAGE = {
'description': (
'Extensions must not alter user preferences such as the current home '
'page, new tab page, or search engine, without explicit user consent, '
'in which a user takes a non-default action. Such changes must also '
'be reverted when the extension is disabled or uninstalled.',
'In nearly all cases, new values for these preferences should be '
'set in the default preference branch, rather than the user branch.'),
'signing_help':
'Add-ons which directly change these preferences must undergo at '
'manual code review for at least one submission. ' +
CUSTOMIZATION_API_HELP,
'signing_severity': 'high',
}
NETWORK_PREF_MESSAGE = {
'description':
'Changing network preferences may be dangerous, and often leads to '
'performance costs.',
'signing_help':
'Changes to these preferences are strongly discouraged. If at all '
'possible, you should remove any reference to them from '
'your extension. Extensions which do modify these preferences '
'must undergo light manual code review for at least one submission.',
'signing_severity': 'low',
}
SEARCH_PREF_MESSAGE = {
'description':
'Search engine preferences may not be changed by add-ons directly. '
'All such changes must be made only via the browser search service, '
'and only after an explicit opt-in from the user. All such changes '
'must be reverted when the extension is disabled or uninstalled.',
'signing_help': (
'You should remove all references to these preferences from your '
'code, and interact with search settings only via the '
'`Services.search` interface. Extensions which interact with these '
'preferences directly are not acceptable within the Firefox add-on '
'ecosystem.',
'Note, however, that extensions which change search settings even via '
'the search service must undergo manual code review for at least '
'one submission. ' + CUSTOMIZATION_API_HELP),
'signing_severity': 'high',
}
SECURITY_PREF_MESSAGE = {
'description':
'Changing this preference may have severe security implications, and '
'is forbidden under most circumstances.',
'editors_only': True,
'signing_help': ('Extensions which alter these settings are allowed '
'within the Firefox add-on ecosystem by exception '
'only, and under extremely limited circumstances.',
'Please remove any reference to these preference names '
'from your add-on.'),
'signing_severity': 'high',
}
MARIONETTE_MESSAGE = {
'warning': 'Marionette should not be accessed by extensions',
'description': 'References to the Marionette service are not acceptable '
'in extensions. Please remove them.',
}
def fuel_error(traverse_node, err):
traverse_node.im_self.warning(
err_id=('js', 'traverser', 'dangerous_global'),
warning='The FUEL library is now deprecated.',
description='The FUEL library is now deprecated. You should use the '
'add-ons SDK or Services.jsm. See %s for more information.'
% MDN_DOC % 'Mozilla/Tech/Toolkit_API/FUEL',
for_appversions=FX40_DEFINITION,
tier=5,
compatibility_type='warning')
BANNED_PREF_BRANCHES = (
# Security and update preferences
(u'app.update.', SECURITY_PREF_MESSAGE),
(u'browser.addon-watch.', SECURITY_PREF_MESSAGE),
(u'capability.policy.', None),
(u'datareporting.', SECURITY_PREF_MESSAGE),
(u'extensions.blocklist.', SECURITY_PREF_MESSAGE),
(u'extensions.checkCompatibility', None),
(u'extensions.getAddons.', SECURITY_PREF_MESSAGE),
(u'extensions.update.', SECURITY_PREF_MESSAGE),
# Let's see if we can get away with this...
# Changing any preference in this branch should result in a
# warning. However, this substring may turn out to be too
# generic, and lead to spurious warnings, in which case we'll
# have to single out sub-branches.
(u'security.', SECURITY_PREF_MESSAGE),
# Search, homepage, and customization preferences
(u'browser.newtab.url', CUSTOMIZATION_PREF_MESSAGE),
(u'browser.newtabpage.enabled', CUSTOMIZATION_PREF_MESSAGE),
(u'browser.search.defaultenginename', SEARCH_PREF_MESSAGE),
(u'browser.search.searchEnginesURL', SEARCH_PREF_MESSAGE),
(u'browser.startup.homepage', CUSTOMIZATION_PREF_MESSAGE),
(u'extensions.getMoreThemesURL', None),
(u'keyword.URL', SEARCH_PREF_MESSAGE),
(u'keyword.enabled', SEARCH_PREF_MESSAGE),
# Network
(u'network.proxy.autoconfig_url', {
'description':
'As many add-ons have reason to change the proxy autoconfig URL, '
'and only one at a time may do so without conflict, extensions '
'must make proxy changes using other mechanisms. Installing a '
'proxy filter is the recommended alternative: '
'https://developer.mozilla.org/en-US/docs/Mozilla/Tech/XPCOM/'
'Reference/Interface/nsIProtocolProxyService#registerFilter()',
'signing_help':
'Dynamic proxy configuration should be implemented via proxy '
'filters, as described above. This preference should not be '
'set, except directly by end users.',
'signing_severity': 'low'}),
(u'network.proxy.', NETWORK_PREF_MESSAGE),
(u'network.http.', NETWORK_PREF_MESSAGE),
(u'network.websocket.', NETWORK_PREF_MESSAGE),
# Other
(u'browser.preferences.instantApply', None),
(u'extensions.alwaysUnpack', None),
(u'extensions.bootstrappedAddons', None),
(u'extensions.dss.', None),
(u'extensions.installCache', None),
(u'extensions.lastAppVersion', None),
(u'extensions.pendingOperations', None),
(u'general.useragent.', None),
(u'nglayout.debug.disable_xul_cache', None),
# Marionette
(u'marionette.force-local', MARIONETTE_MESSAGE),
(u'marionette.defaultPrefs.enabled', MARIONETTE_MESSAGE),
(u'marionette.defaultPrefs.port', MARIONETTE_MESSAGE),
)
BANNED_PREF_REGEXPS = [
r'extensions\..*\.update\.(url|enabled|interval)',
]
def is_shared_scope(traverser, right=None, node_right=None):
"""Returns true if the traverser `t` is traversing code loaded into
a shared scope, such as a browser window. Particularly used for
detecting when global overwrite warnings should be issued."""
# FIXME(Kris): This is not a great heuristic.
return not (traverser.is_jsm or
traverser.err.get_resource('em:bootstrap') == 'true')
# See https://github.com/mattbasta/amo-validator/wiki/JS-Predefined-Entities
# for details on entity properties.
CONTENT_DOCUMENT = None
CATEGORY_MANAGER = {
u'addCategoryEntry':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
('Bootstrapped add-ons may not create persistent category '
'entries.' if len(a) > 3 and t(a[3]).is_literal() else
'Authors of bootstrapped add-ons must take care to clean up '
'any added category entries at shutdown.')}}
OBSOLETE_EXTENSION_MANAGER = {
'value': {},
'dangerous': 'This interface is part of the obsolete extension manager '
'interface, which is not available in any remotely modern '
'version of Firefox. It should not be referenced in any '
'code.'}
ADDON_INSTALL_METHOD = {
'value': {},
'dangerous': {
'description': (
'Add-ons may install other add-ons only by user consent. Any '
'such installations must be carefully reviewed to ensure '
'their safety.'),
'editors_only': True,
'signing_help': (
'Rather than directly install other add-ons, you should offer '
'users the opportunity to install them via the normal web install '
'process, using an install link or button connected to the '
'`InstallTrigger` API: '
'https://developer.mozilla.org/en-US/docs/Web/API/InstallTrigger',
'Updates to existing add-ons should be provided via the '
'install.rdf `updateURL` mechanism.'),
'signing_severity': 'high'},
}
SEARCH_MESSAGE = 'Potentially dangerous use of the search service'
SEARCH_DESCRIPTION = (
'Changes to the default and currently-selected search engine settings '
'may only take place after users have explicitly opted-in, by taking '
'a non-default action. Any such changes must be reverted when the add-on '
'making them is disabled or uninstalled.')
def search_warning(severity='medium', editors_only=False,
message=SEARCH_MESSAGE,
description=SEARCH_DESCRIPTION):
return {'err_id': ('testcases_javascript_actions',
'search_service',
'changes'),
'signing_help':
'Add-ons which directly change search settings must undergo '
'manual code review for at least one submission. ' +
CUSTOMIZATION_API_HELP,
'signing_severity': severity,
'editors_only': editors_only,
'warning': message,
'description': description}
REGISTRY_WRITE = {'dangerous': {
'err_id': ('testcases_javascript_actions',
'windows_registry',
'write'),
'warning': 'Writes to the registry may be dangerous',
'description': 'Writing to the registry can have many system-level '
'consequences and requires careful review.',
'signing_help': (
'Please store any settings relevant to your add-on within the '
'current Firefox profile, ideally using the preferences service.'
'If you are intentionally changing system settings, consider '
'searching for a Firefox API which has a similar effect. If no such '
'API exists, we strongly discourage making any changes which affect '
'the system outside of the browser.'),
'signing_severity': 'medium',
'editors_only': True}}
def registry_key(write=False):
"""Represents a function which returns a registry key object."""
res = {'return': lambda wrapper, arguments, traverser: (
build_quick_xpcom('createInstance', 'nsIWindowMediator',
traverser, wrapper=True))}
if write:
res.update(REGISTRY_WRITE)
return res
INTERFACES = {
u'nsISupports': {'value': {}},
u'mozIStorageBaseStatement':
{'value':
{u'execute':
{'dangerous': instanceactions.SYNCHRONOUS_SQL_DESCRIPTION},
u'executeStep':
{'dangerous': instanceactions.SYNCHRONOUS_SQL_DESCRIPTION}}},
u'nsIExtensionManager': OBSOLETE_EXTENSION_MANAGER,
u'nsIUpdateItem': OBSOLETE_EXTENSION_MANAGER,
u'nsIInstallLocation': OBSOLETE_EXTENSION_MANAGER,
u'nsIAddonInstallListener': OBSOLETE_EXTENSION_MANAGER,
u'nsIAddonUpdateCheckListener': OBSOLETE_EXTENSION_MANAGER,
u'nsICategoryManager':
{'value': CATEGORY_MANAGER},
u'nsIAccessibleRetrieval':
{'dangerous':
'Using the nsIAccessibleRetrieval interface causes significant '
'performance degradation in Gecko. It should only be used in '
'accessibility-related add-ons.',
'value': {}},
u'nsIBrowserSearchService':
{'value':
{u'currentEngine':
{'readonly': search_warning(severity='high')},
u'defaultEngine':
{'readonly': search_warning(severity='high')},
u'addEngine':
{'dangerous': search_warning()},
u'addEngineWithDetails':
{'dangerous': search_warning()},
u'removeEngine':
{'dangerous': search_warning()},
u'moveEngine':
{'dangerous': search_warning()}}},
u'nsIComponentRegistrar':
{'value':
{u'autoRegister':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Bootstrapped add-ons may not register chrome '
'manifest files.'},
u'registerFactory':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care to '
'clean up any component registrations at shutdown.'}}},
u'nsIDNSService': {'value': {u'resolve': entity('nsIDNSService.resolve')}},
u'nsIJSON':
{'value':
{u'encode':
{'return': call_definitions.nsIJSON_deprec},
u'decode':
{'return': call_definitions.nsIJSON_deprec}}},
u'nsIMsgDatabase':
{'value':
{u'forceFolderDBClosed': entity('nsIMsgDatabase.forceFolderDBClosed')}},
u'nsIObserverService':
{'value':
{u'addObserver':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to remove any added observers '
'at shutdown.'}},
'dangerous': lambda a, t, e:
lambda t, e: (
e.metadata.get('is_jetpack') and
'The observer service should not be used directly in SDK '
"add-ons. Please use the 'sdk/system/events' module "
'instead.')},
u'nsIPrefBranch':
{'value': dict(
tuple((method, {'return': instanceactions.set_preference})
for method in (u'setBoolPref',
u'setCharPref',
u'setComplexValue',
u'setIntPref',
u'clearUserPref',
u'deleteBranch',
u'resetBranch')) +
tuple((method, {'return': instanceactions.get_preference})
for method in (u'getBoolPref',
u'getCharPref',
u'getChildList',
u'getComplexValue',
u'getFloatPref',
u'getIntPref',
u'getPrefType',
u'prefHasUserValue')))},
u'nsIResProtocolHandler':
{'value':
{u'setSubstitution':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
a and len(a) > 1 and t(a[1]).get_literal_value() and
'Authors of bootstrapped add-ons must take care '
'to clean up any added resource substitutions '
'at shutdown.'}}},
u'nsISound': {'value': {'play': entity('nsISound.play')}},
u'nsIStringBundleService':
{'value':
{u'createStringBundle':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to flush the string bundle cache at shutdown.'},
u'createExtensibleBundle':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to flush the string bundle cache at shutdown.'}}},
u'nsIStyleSheetService':
{'value':
{u'loadAndRegisterSheet':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care to '
'unregister registered stylesheets at shutdown.'}}},
u'nsITransferable':
{'value':
{u'init':
entity('nsITransferable.init')}},
u'nsIWindowMediator':
{'value':
{'registerNotification':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to remove any added observers at shutdown.'}}},
u'nsIWindowWatcher':
{'value':
{u'addListener':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to remove any added observers at shutdown.'},
u'openWindow': entity('nsIWindowWatcher.openWindow')}},
u'nsIProtocolProxyService': {'value': {
u'registerFilter': {'dangerous': {
'err_id': ('testcases_javascript_actions',
'predefinedentities', 'proxy_filter'),
'description': (
'Proxy filters can be used to direct arbitrary network '
'traffic through remote servers, and may potentially '
'be abused.',
'Additionally, to prevent conflicts, the `applyFilter` '
'method should always return its third argument in cases '
'when it is not supplying a specific proxy.'),
'editors_only': True,
'signing_help': 'Due to the potential for unintended effects, '
'any add-on which uses this API must undergo '
'manual code review for at least one submission.',
'signing_severity': 'low'}}}},
u'nsIWebBrowserPersist':
{'value':
{u'saveChannel':
{'return': call_definitions.webbrowserpersist},
u'saveURI':
{'return':
call_definitions.webbrowserpersist_saveuri},
u'savePrivacyAwareURI':
{'return': call_definitions.webbrowserpersist}}},
u'nsIMsgCompose':
{'value':
{u'checkAndPopulateRecipients': entity('nsIMsgCompose.checkAndPopulateRecipients')}},
u'nsIFolderLookupService':
{'value':
{u'getFolderById': entity('nsIFolderLookupService.getFolderById')}},
u'nsIAbCard':
{'value':
{u'kAllowRemoteContentProperty': entity('nsIAbCard.kAllowRemoteContentProperty')}},
u'nsIAddrDatabase':
{'value':
{u'addAllowRemoteContent': entity('nsIAddrDatabase.addAllowRemoteContent')}},
'nsIWindowsRegKey': {'value': {u'create': REGISTRY_WRITE,
u'createChild': registry_key(write=True),
u'openChild': registry_key(),
u'writeBinaryValue': REGISTRY_WRITE,
u'writeInt64Value': REGISTRY_WRITE,
u'writeIntValue': REGISTRY_WRITE,
u'writeStringValue': REGISTRY_WRITE,
}},
'nsIPermissionManager': {'value': {
'add': entity('nsIPermissionManager.add'),
'addFromPrincipal': entity('nsIPermissionManager.addFromPrincipal'),
'remove': entity('nsIPermissionManager.remove'),
'removeFromPrincipal': entity('nsIPermissionManager.removeFromPrincipal'),
'removeAll': entity('nsIPermissionManager.removeAll'),
'testExactPermission': entity('nsIPermissionManager.testExactPermission'),
'testExactPermissionFromPrincipal': entity('nsIPermissionManager.testExactPermissionFromPrincipal'),
'testPermission': entity('nsIPermissionManager.testPermission'),
'testPermissionFromPrincipal': entity('nsIPermissionManager.testPermissionFromPrincipal')}},
}
INTERFACE_ENTITIES = {u'nsIXMLHttpRequest':
{'xpcom_map':
lambda: GLOBAL_ENTITIES['XMLHttpRequest']},
u'nsIProcess': {'dangerous': {
'warning': 'The use of nsIProcess is potentially '
'dangerous and requires careful review '
'by an administrative reviewer.',
'editors_only': True,
'signing_help':
'Consider alternatives to directly launching '
'executables, such as loading a URL with an '
'appropriate external protocol handler, making '
'network requests to a local service, or using '
'the (as a last resort) `nsIFile.launch()` method '
'to open a file with the appropriate application.',
'signing_severity': 'high',
}},
u'nsIDOMGeoGeolocation': {'dangerous':
'Use of the geolocation API by add-ons requires '
'prompting users for consent.'},
u'nsIWindowsRegKey': {'dangerous': {
'signing_help':
'The information stored in many standard registry '
'keys is available via built-in Firefox APIs, '
'such as `Services.sysinfo`, `Services.dirsvc`, '
'and the environment service '
'(http://mzl.la/1OGgCF3). We strongly discourage '
'extensions from reading registry information '
'which is not available via other Firefox APIs.',
'signing_severity': 'low',
'editors_only': True,
'description': (
'Access to the registry is potentially '
'dangerous, and should be reviewed with special '
'care.')}},
}
DANGEROUS_CERT_DB = {
'err_id': ('javascript', 'predefinedentities', 'cert_db'),
'description': 'Access to the X509 certificate '
'database is potentially dangerous '
'and requires careful review by an '
'administrative reviewer.',
'editors_only': True,
'signing_help': 'Please avoid interacting with the certificate and trust '
'databases if at all possible. Any add-ons which interact '
'with these databases must undergo manual code review '
'prior to signing.',
'signing_severity': 'high',
}
INTERFACE_ENTITIES.update(
(interface, {'dangerous': DANGEROUS_CERT_DB})
for interface in ('nsIX509CertDB', 'nsIX509CertDB2', 'nsIX509CertList',
'nsICertOverrideService'))
CONTRACT_ENTITIES = {
contract: DANGEROUS_CERT_DB
for contract in ('@mozilla.org/security/x509certdb;1',
'@mozilla.org/security/x509certlist;1',
'@mozilla.org/security/certoverride;1')}
for interface in INTERFACES:
def construct(interface):
def wrap():
return INTERFACES[interface]
return wrap
if interface not in INTERFACE_ENTITIES:
INTERFACE_ENTITIES[interface] = {}
INTERFACE_ENTITIES[interface]['xpcom_map'] = construct(interface)
def build_quick_xpcom(method, interface, traverser, wrapper=False):
"""A shortcut to quickly build XPCOM objects on the fly."""
extra = ()
if isinstance(interface, (list, tuple)):
interface, extra = interface[0], interface[1:]
def interface_obj(iface):
return traverser._build_global(
name=method,
entity={'xpcom_map':
lambda: INTERFACES.get(iface, INTERFACES['nsISupports'])})
constructor = xpcom_const(method, pretraversed=True)
obj = constructor(None, [interface_obj(interface)], traverser)
for iface in extra:
# `xpcom_constructor` really needs to be cleaned up so we can avoid
# this duplication.
iface = interface_obj(iface)
iface = traverser._build_global('QueryInterface',
iface.value['xpcom_map']())
obj.value = obj.value.copy()
value = obj.value['value'].copy()
value.update(iface.value['value'])
obj.value.update(iface.value)
obj.value['value'] = value
if isinstance(obj, JSWrapper) and not wrapper:
obj = obj.value
return obj
UNSAFE_TEMPLATE_METHOD = (
'The use of `%s` can lead to unsafe '
'remote code execution, and therefore must be done with '
'great care, and only with sanitized data.')
SERVICES = {
'appinfo': ('nsIXULAppInfo', 'nsIXULRuntime'),
'appShell': 'nsIAppShellService',
'blocklist': 'nsIBlocklistService',
'cache': 'nsICacheService',
'cache2': 'nsICacheStorageService',
'clipboard': 'nsIClipboard',
'console': 'nsIConsoleService',
'contentPrefs': 'nsIContentPrefService',
'cookies': ('nsICookieManager', 'nsICookieManager2', 'nsICookieService'),
'dirsvc': ('nsIDirectoryService', 'nsIProperties'),
'DOMRequest': 'nsIDOMRequestService',
'domStorageManager': 'nsIDOMStorageManager',
'downloads': 'nsIDownloadManager',
'droppedLinkHandler': 'nsIDroppedLinkHandler',
'eTLD': 'nsIEffectiveTLDService',
'focus': 'nsIFocusManager',
'io': ('nsIIOService', 'nsIIOService2'),
'locale': 'nsILocaleService',
'logins': 'nsILoginManager',
'obs': 'nsIObserverService',
'perms': 'nsIPermissionManager',
'prefs': ('nsIPrefBranch2', 'nsIPrefService', 'nsIPrefBranch'),
'prompt': 'nsIPromptService',
'scriptloader': 'mozIJSSubScriptLoader',
'scriptSecurityManager': 'nsIScriptSecurityManager',
'search': 'nsIBrowserSearchService',
'startup': 'nsIAppStartup',
'storage': 'mozIStorageService',
'strings': 'nsIStringBundleService',
'sysinfo': 'nsIPropertyBag2',
'telemetry': 'nsITelemetry',
'tm': 'nsIThreadManager',
'uriFixup': 'nsIURIFixup',
'urlFormatter': 'nsIURLFormatter',
'vc': 'nsIVersionComparator',
'wm': 'nsIWindowMediator',
'ww': 'nsIWindowWatcher',
}
for key, value in SERVICES.items():
SERVICES[key] = {'value': partial(build_quick_xpcom,
'getService', value)}
DANGEROUS_EVAL = {
'err_id': ('javascript', 'dangerous_global', 'eval'),
'description': ('Evaluation of strings as code can lead to security '
'vulnerabilities and performance issues, even in the '
'most innocuous of circumstances. Please avoid using '
'`eval` and the `Function` constructor when at all '
'possible.',
'Alternatives are available for most use cases. See '
'https://developer.mozilla.org/en-US/Add-ons/'
'Overlay_Extensions/XUL_School/'
'Appendix_C:_Avoid_using_eval_in_Add-ons '
'for more information.'),
'signing_help':
'Please try to avoid evaluating strings as code wherever possible. '
'Read over the linked document for suggested alternatives. '
'If you are referencing the `Function` constructor without calling '
'it, and cannot avoid continuing to do so, consider alternatives '
'such as calling `Object.getPrototypeOf` on an existing function '
'object.',
'signing_severity': 'high'}
FUNCTION_EXPORT_HELP = (
'Given the potential security risks of exposing APIs to unprivileged '
'code, extensions which use these APIs must undergo manual review for at '
'least one submission. If you are not using these APIs to interact with '
'content code, please consider alternatives, such as built-in '
'message passing functionality.')
# GLOBAL_ENTITIES is also representative of the `window` object.
GLOBAL_ENTITIES = {
u'window': {'value': lambda t: {'value': GLOBAL_ENTITIES}},
u'null': {'literal': lambda t: JSWrapper(None, traverser=t)},
u'Cc': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['classes']},
u'Ci': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['interfaces']},
u'Cu': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['utils']},
# From Services.jsm.
u'Services': {'value': SERVICES},
# From Preferences.jsm.
# TODO: Support calls that return instances of this object which
# operate on non-root branches.
u'Preferences': {'value': {
u'get': {'return': instanceactions.get_preference},
u'reset': {'return': instanceactions.set_preference},
u'resetBranch': {'return': instanceactions.set_preference},
u'set': {'return': instanceactions.set_preference}}},
u'AddonManager': {
'readonly': False,
'value': {
u'autoUpdateDefault': {'readonly': SECURITY_PREF_MESSAGE},
u'checkUpdateSecurity': {'readonly': SECURITY_PREF_MESSAGE},
u'checkUpdateSecurityDefault': {'readonly': SECURITY_PREF_MESSAGE},
u'updateEnabled': {'readonly': SECURITY_PREF_MESSAGE},
u'getInstallForFile': ADDON_INSTALL_METHOD,
u'getInstallForURL': ADDON_INSTALL_METHOD,
u'installAddonsFromWebpage': ADDON_INSTALL_METHOD}},
u'ctypes': {'dangerous': {
'description': (
'Insufficiently meticulous use of ctypes can lead to serious, '
'and often exploitable, errors. The use of bundled binary code, '
'or access to system libraries, may allow for add-ons to '
'perform unsafe operations. All ctypes use must be carefully '
'reviewed by a qualified reviewer.'),
'editors_only': True,
'signing_help': ('Please try to avoid interacting with or bundling '
'native binaries whenever possible. If you are '
'bundling binaries for performance reasons, please '
'consider alternatives such as Emscripten '
'(http://mzl.la/1KrSUh2), JavaScript typed arrays '
'(http://mzl.la/1Iw02sr), and Worker threads '
'(http://mzl.la/1OGfAcc).',
'Any code which makes use of the `ctypes` API '
'must undergo manual code review for at least one '
'submission.'),
'signing_severity': 'high'}},
u'document':
{'value':
{u'title':
{'overwriteable': True,
'readonly': False},
u'defaultView':
{'value': lambda t: {'value': GLOBAL_ENTITIES}},
u'loadOverlay':
{'dangerous':
lambda a, t, e:
not a or not _get_as_str(t(a[0])).lower()
.startswith(('chrome:', 'resource:'))},
u'write': entity('document.write'),
u'writeln': entity('document.write')}},
# The nefariuos timeout brothers!
u'setTimeout': {'dangerous': actions._call_settimeout},
u'setInterval': {'dangerous': actions._call_settimeout},
u'require': {'dangerous': actions._call_require},
u'encodeURI': {'readonly': True},
u'decodeURI': {'readonly': True},
u'encodeURIComponent': {'readonly': True},
u'decodeURIComponent': {'readonly': True},
u'escape': {'readonly': True},
u'unescape': {'readonly': True},
u'isFinite': {'readonly': True},
u'isNaN': {'readonly': True},
u'parseFloat': {'readonly': True},
u'parseInt': {'readonly': True},
u'eval': {'dangerous': DANGEROUS_EVAL},
u'Function': {'dangerous': DANGEROUS_EVAL},
u'Object':
{'value':
{u'prototype': {'readonly': is_shared_scope},
u'constructor': # Just an experiment for now
{'value': lambda t: GLOBAL_ENTITIES['Function']}}},
u'String':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.string_global},
u'Array':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.array_global},
u'Number':
{'value':
{u'prototype':
{'readonly': is_shared_scope},
u'POSITIVE_INFINITY':
{'value': lambda t: JSWrapper(float('inf'), traverser=t)},
u'NEGATIVE_INFINITY':
{'value': lambda t: JSWrapper(float('-inf'), traverser=t)}},
'return': call_definitions.number_global},
u'Boolean':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.boolean_global},
u'RegExp': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'Date': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'File': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'Math':
{'value':
{u'PI':
{'value': lambda t: JSWrapper(math.pi, traverser=t)},
u'E':
{'value': lambda t: JSWrapper(math.e, traverser=t)},
u'LN2':
{'value': lambda t: JSWrapper(math.log(2), traverser=t)},
u'LN10':
{'value': lambda t: JSWrapper(math.log(10), traverser=t)},
u'LOG2E':
{'value': lambda t: JSWrapper(math.log(math.e, 2),
traverser=t)},
u'LOG10E':
{'value': lambda t: JSWrapper(math.log10(math.e),
traverser=t)},
u'SQRT2':
{'value': lambda t: JSWrapper(math.sqrt(2), traverser=t)},
u'SQRT1_2':
{'value': lambda t: JSWrapper(math.sqrt(1/2), traverser=t)},
u'abs':
{'return': python_wrap(abs, [('num', 0)])},
u'acos':
{'return': python_wrap(math.acos, [('num', 0)])},
u'asin':
{'return': python_wrap(math.asin, [('num', 0)])},
u'atan':
{'return': python_wrap(math.atan, [('num', 0)])},
u'atan2':
{'return': python_wrap(math.atan2, [('num', 0),
('num', 1)])},
u'ceil':
{'return': python_wrap(math.ceil, [('num', 0)])},
u'cos':
{'return': python_wrap(math.cos, [('num', 0)])},
u'exp':
{'return': python_wrap(math.exp, [('num', 0)])},
u'floor':
{'return': python_wrap(math.floor, [('num', 0)])},
u'log':
{'return': call_definitions.math_log},
u'max':
{'return': python_wrap(max, [('num', 0)], nargs=True)},
u'min':
{'return': python_wrap(min, [('num', 0)], nargs=True)},
u'pow':
{'return': python_wrap(math.pow, [('num', 0),
('num', 0)])},
u'random': # Random always returns 0.5 in our fantasy land.
{'return': call_definitions.math_random},
u'round':
{'return': call_definitions.math_round},
u'sin':
{'return': python_wrap(math.sin, [('num', 0)])},
u'sqrt':
{'return': python_wrap(math.sqrt, [('num', 1)])},
u'tan':
{'return': python_wrap(math.tan, [('num', 0)])},
}},
u'netscape':
{'value':
{u'security':
{'value':
{u'PrivilegeManager':
{'value':
{u'enablePrivilege':
{'dangerous': {
'signing_help':
'Any references to this API must '
'be removed from your extension. '
'Add-ons using this API will not '
'be accepted for signing.',
'signing_severity': 'high',
'description': (
'enablePrivilege is extremely '
'dangerous, and nearly always '
'unnecessary. It should not be '
'used under any circumstances.'),
}}}}}}}},
u'navigator':
{'value': {u'wifi': {'dangerous': True},
u'geolocation': {'dangerous': True}}},
u'Components':
{'dangerous_on_read':
lambda t, e: bool(e.metadata.get('is_jetpack')),
'value':
{u'classes':
{'xpcom_wildcard': True,
'value':
{u'createInstance':
{'return': xpcom_const('createInstance')},
u'getService':
{'return': xpcom_const('getService')}}},
'utils':
{'value': {u'evalInSandbox':
{'dangerous': {
'editors_only': 'true',
'signing_help':
DANGEROUS_EVAL['signing_help'],
'signing_severity': 'low'}},
u'cloneInto':
{'dangerous': {
'editors_only': True,
'signing_help': FUNCTION_EXPORT_HELP,
'signing_severity': 'low',
'description': (
'Can be used to expose privileged '
'functionality to unprivileged scopes. '
'Care should be taken to ensure that '
'this is done safely.')}},
u'exportFunction':
{'dangerous': {
'editors_only': True,
'signing_help': FUNCTION_EXPORT_HELP,
'signing_severity': 'low',
'description': (
'Can be used to expose privileged '
'functionality to unprivileged scopes. '
'Care should be taken to ensure that '
'this is done safely.')}},
u'import':
{'dangerous':
lambda a, t, e:
a and 'ctypes.jsm' in _get_as_str(t(a[0]))},
u'waiveXrays':
{'return': call_definitions.js_unwrap}}},
u'interfaces': {'value': INTERFACE_ENTITIES}}},
u'extensions': {'dangerous': True},
u'xpcnativewrappers': {'dangerous': True},
u'XMLHttpRequest':
{'value':
{u'open':
{'dangerous':
# Ban synchronous XHR by making sure the third arg
# is absent and false.
lambda a, t, e:
a and len(a) >= 3 and
not t(a[2]).get_literal_value() and
'Synchronous HTTP requests can cause serious UI '
'performance problems, especially to users with '
'slow network connections.'}}},
# Global properties are inherently read-only, though this formalizes it.
u'Infinity':
{'value':
lambda t:
GLOBAL_ENTITIES[u'Number']['value'][u'POSITIVE_INFINITY']},
u'NaN': {'readonly': True},
u'undefined': {'readonly': True},
u'innerHeight': {'readonly': False},
u'innerWidth': {'readonly': False},
u'width': {'readonly': False},
u'height': {'readonly': False},
u'top': {'readonly': actions._readonly_top},
u'mozRequestAnimationFrame': {
'value': actions._renamed_mozRequestAnimationFrame},
u'content':
{'context': 'content',
'value':
{u'document':
{'value': lambda t: GLOBAL_ENTITIES[u'document']}}},
u'contentWindow':
{'context': 'content',
'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'_content': {'value': lambda t: GLOBAL_ENTITIES[u'content']},
u'gBrowser':
{'value':
{u'contentDocument':
{'context': 'content',
'value': lambda t: CONTENT_DOCUMENT},
u'contentWindow':
{'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'selectedTab':
{'readonly': False}}},
u'opener':
{'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'XPCNativeWrapper':
{'value':
{u'unwrap':
{'return': call_definitions.js_unwrap}},
'return': call_definitions.js_wrap},
# Preference creation in pref defaults files
u'pref': {'dangerous': actions._call_create_pref},
u'user_pref': {'dangerous': actions._call_create_pref},
u'unsafeWindow': {'dangerous': 'The use of unsafeWindow is insecure and '
'should be avoided whenever possible. '
'Consider using a different API if it is '
'available in order to achieve similar '
'functionality.'},
u'XPCOMUtils':
{'value': {u'categoryManager': {'value': CATEGORY_MANAGER}}},
u'updateCharsetPopupMenu': entity('updateCharsetPopupMenu'),
u'EditorSetDocumentCharacterSet': entity('EditorSetDocumentCharacterSet'),
u'DisablePhishingWarning': entity('DisablePhishingWarning'),
u'RoomInfo': entity('RoomInfo'),
u'FillInHTMLTooltip': entity('FillInHTMLTooltip'),
u'escapeXMLchars': entity('escapeXMLchars'),
u'getNonHtmlRecipients': entity('getNonHtmlRecipients'),
u'updateCharsetPopupMenu': entity('updateCharsetPopupMenu'),
u'EditorSetDocumentCharacterSet': entity('EditorSetDocumentCharacterSet'),
u'awArrowHit': entity('awArrowHit'),
u'UpdateMailEditCharset': entity('UpdateMailEditCharset'),
u'InitCharsetMenuCheckMark': entity('InitCharsetMenuCheckMark'),
u'allowRemoteContentForSender': entity('allowRemoteContentForSender'),
u'allowRemoteContentForSite': entity('allowRemoteContentForSite'),
u'createNewHeaderView': entity('createNewHeaderView'),
u'MarionetteComponent': {'dangerous_on_read': MARIONETTE_MESSAGE},
u'MarionetteServer': {'dangerous_on_read': MARIONETTE_MESSAGE},
'Application': {'dangerous_on_read': fuel_error},
'NewTabURL': {'value': {'override': entity('NewTabURL.override')}},
# Common third-party libraries
'Handlebars': {
'value': {
'SafeString':
{'dangerous':
UNSAFE_TEMPLATE_METHOD % 'Handlebars.SafeString'}}},
# Angular
'$sce': {
'value': {
'trustAs': {'dangerous':
UNSAFE_TEMPLATE_METHOD % '$sce.trustAs'},
'trustAsHTML': {'dangerous':
UNSAFE_TEMPLATE_METHOD % '$sce.trustAsHTML'}}},
}
CONTENT_DOCUMENT = GLOBAL_ENTITIES[u'content']['value'][u'document']
| {
"repo_name": "magopian/amo-validator",
"path": "validator/testcases/javascript/predefinedentities.py",
"copies": "1",
"size": "46367",
"license": "bsd-3-clause",
"hash": -519853311760494660,
"line_mean": 43.4554170662,
"line_max": 132,
"alpha_frac": 0.5405352945,
"autogenerated": false,
"ratio": 4.401651794190241,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011113497509362249,
"num_lines": 1043
} |
from functools import partial
import math
import requests
from . import csfd
COLORS = {
'20m': 'blue',
'30m': 'sky',
'45m': 'green',
'1h': 'lime',
'1.5h': 'yellow',
'2h': 'orange',
'2.5h': 'red',
'3+h': 'purple',
}
AEROVOD_LABEL = dict(name='Aerovod', color='black')
class InvalidUsernameError(ValueError):
pass
def create_session(token, key):
def prefix_request(f, method, url, *args, **kwargs):
url = 'https://trello.com/1/' + url.lstrip('/')
response = f(method, url, *args, **kwargs)
response.raise_for_status()
return response.json()
session = requests.Session()
session.request = partial(prefix_request, session.request)
session.params = dict(token=token, key=key)
return session
def card_exists(cards, film):
for card in cards:
if film['title'] in card['name'] or film['url'] in card['desc']:
return card['id']
def get_inbox_id(lists):
return lists[0]['id']
def prepare_card_data(list_id, film):
return dict(
name=film['title'],
desc=film['url'],
idList=list_id,
pos='top',
)
def prepare_updated_card_data():
return dict(
pos='top',
)
def not_in_members(username, members):
return username not in [member['username'] for member in members]
def prepare_duration_labels(durations):
for duration in durations:
name = get_duration_bracket(duration)
yield dict(name=name, color=COLORS[name])
def get_duration_bracket(duration):
duration_cca = math.floor(duration / 10.0) * 10
if duration <= 20:
return '20m'
elif duration <= 30:
return '30m'
elif duration <= 45:
return '45m'
if duration <= 60:
return '1h'
if duration_cca <= 90:
return '1.5h'
if duration_cca <= 120:
return '2h'
if duration_cca <= 150:
return '2.5h'
else:
return '3+h'
def get_missing_labels(existing_labels, labels):
names = {label['name'] for label in existing_labels}
return [label for label in labels if label['name'] not in names]
def get_missing_attached_urls(existing_attachments, urls):
def normalize_url(url):
try:
return csfd.normalize_url(url)
except csfd.InvalidURLError:
return url
existing_urls = {
normalize_url(attach['name'])
for attach in existing_attachments
if attach['name'] == attach['url']
}
return [url for url in urls if normalize_url(url) not in existing_urls]
def has_poster(attachments):
for attachment in attachments:
if len(attachment['previews']):
return True
return False
| {
"repo_name": "honzajavorek/film2trello",
"path": "film2trello/trello.py",
"copies": "1",
"size": "2717",
"license": "mit",
"hash": -8797006892851157000,
"line_mean": 21.8319327731,
"line_max": 75,
"alpha_frac": 0.6013986014,
"autogenerated": false,
"ratio": 3.514877102199224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9616275703599224,
"avg_score": 0,
"num_lines": 119
} |
from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import AffineRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}\nQ: {:06.4f}'.format(
iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = AffineRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| {
"repo_name": "siavashk/pycpd",
"path": "examples/fish_affine_3D.py",
"copies": "1",
"size": "1451",
"license": "mit",
"hash": -3578270484543069700,
"line_mean": 31.9772727273,
"line_max": 128,
"alpha_frac": 0.6085458305,
"autogenerated": false,
"ratio": 2.8450980392156864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8952762967671994,
"avg_score": 0.00017618040873854828,
"num_lines": 44
} |
from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import DeformableRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}'.format(
iteration), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:, :-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:, :-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:, :-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:, :-1] = fish_source
Y = np.vstack((Y1, Y2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = DeformableRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| {
"repo_name": "siavashk/pycpd",
"path": "examples/fish_deformable_3D.py",
"copies": "1",
"size": "1439",
"license": "mit",
"hash": -4401594938846702600,
"line_mean": 31.7045454545,
"line_max": 121,
"alpha_frac": 0.6115357887,
"autogenerated": false,
"ratio": 2.8722554890219563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.898285983211838,
"avg_score": 0.00018628912071535022,
"num_lines": 44
} |
from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import RigidRegistration
import numpy as np
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}\nQ: {:06.4f}'.format(
iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
X = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X[:, :-1] = fish_target
fish_source = np.loadtxt('data/fish_source.txt')
Y = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y[:, :-1] = fish_source
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = RigidRegistration(**{'X': X, 'Y': Y})
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
| {
"repo_name": "siavashk/pycpd",
"path": "examples/fish_rigid_3D.py",
"copies": "1",
"size": "1197",
"license": "mit",
"hash": -25547731728966140,
"line_mean": 30.5,
"line_max": 128,
"alpha_frac": 0.6207184628,
"autogenerated": false,
"ratio": 2.9776119402985075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40983304030985074,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sp
import re
import pandas as pd
class HyperparameterExplorer:
def __init__(self, X, y, classifier, score_name, primary_hyperparameter,
validation_split=0.1, test_X=None, test_y=None,
use_prev_best_weights=True, ):
# model_partial is a model that's missing one or more parameters.
self.all_training_X = X # reserved for final training after hyper sweep.
self.all_training_y = y # reserved for final training after hyper sweep.
self.N, self.d = X.shape
self.summary = pd.DataFrame()
self.primary_hyperparameter = primary_hyperparameter
# split off a validation set from X
split_index = int(self.N*(1-validation_split))
print("{} of {} points from training are reserved for "
"validation".format(self.N - split_index, self.N))
self.train_X = X[0:split_index, :]
self.validation_X = X[split_index:, :]
self.train_y = y[0:split_index]
self.validation_y = y[split_index:]
print('variances of all training data: {}'.format(np.var(y)))
print('variances of split-off training & validation '
'data: {}, {}'.format(np.var(self.train_y),
np.var(self.validation_y)))
if test_X is not None and test_y is not None:
self.test_X = test_X
self.test_y = test_y
self.model = partial(classifier,
X=self.train_X, y=self.train_y,
test_X=test_X, test_y=test_y)
else:
self.model = partial(classifier, X=self.train_X, y=self.train_y)
# keep track of model numbers.
self.num_models = 0
self.models = {}
# the score that will be used to determine which model is best.
self.training_score_name = score_name
self.validation_score_name = re.sub("training", "validation", score_name)
self.score_name = re.sub("training", "", score_name)
self.use_prev_best_weights = use_prev_best_weights
def train_model(self, kernel_kwargs, model_kwargs):
# train model
# check that model was made
try:
m = self.model(kernel_kwargs=kernel_kwargs, **model_kwargs)
# set weights to the best found so far
# Note: this is silly for non-iterative solvers like Ridge.
if self.use_prev_best_weights:
best_weights = self.best_weights_given_hyperparam(m.lam)
if "W" in m.__dict__.keys() and best_weights is not None:
# TODO: new fun best_weights_given_lam()
m.W = best_weights.copy()
if m.is_sparse():
m.W = sp.csc_matrix(m.W)
elif ("w" in m.__dict__.keys()) and best_weights is not None:
m.w = best_weights.copy()
except NameError:
print("model failed for {}".format(**model_kwargs))
self.num_models += 1
m.run()
# save outcome of fit. Includes training data 0/1 loss, etc.
self.models[self.num_models] = m
# get results
outcome = m.results_row()
if len(outcome) < 1:
print("model didn't work..?")
# Save the model number for so we can look up the model later
outcome['model number'] = [self.num_models]
print('{}:{}'.format(self.training_score_name,
outcome[self.training_score_name][0]))
validation_results = m.apply_model(X=self.validation_X,
y=self.validation_y,
data_name='validation')
validation_results = pd.DataFrame(validation_results)
v_columns = [c for c in validation_results.columns
if 'validation' in c or self.primary_hyperparameter == c]
outcome = pd.merge(pd.DataFrame(outcome),
validation_results[v_columns])
# Append this new model's findings onto the old model.
self.summary = pd.concat([self.summary, outcome])
# Oh, silly Pandas:
self.summary.reset_index(drop=True, inplace=True)
# Plot log loss vs time if applicable.
if "log loss" in self.summary.columns:
m.plot_test_and_train_log_loss_during_fitting()
m.plot_test_and_train_01_loss_during_fitting()
def best(self, value='model'):
"""
Find the best model according to the validation data
via the Pandas DataFrame.
:param value: a string describing what you want from the best model.
"""
# get the index of the model with the best score
i = self.summary[[self.validation_score_name]].idxmin()[0]
i = self.summary['model number'][i]
if value == 'model number':
return i
summary_row = self.summary[self.summary['model number'] == i]
if value == 'summary':
return summary_row.T
best_score = summary_row[self.validation_score_name]
if value == 'score':
return best_score
model = self.models[i]
if value == 'model':
return model
if value == 'weights':
return model.get_weights()
print("best {} = {}; found in model {}".format(
self.validation_score_name, best_score, i))
return model
def best_results_for_each_lambda(self):
"""
Group summary results by lambda and return a summary of the best
validation score result for each lambda tested so far.
"""
if self.summary.shape[0] == 0:
return None
# best losses at each lambda:
idx = self.summary.groupby(['lambda'])[self.validation_score_name].\
transform(min) == self.summary[self.validation_score_name]
return self.summary[idx]
def best_weights_given_hyperparam(self, hyperparam):
"""
Return the best weights seen for your lambda.
If your lambda hasn't been tested, return the best weights for the
closest lambda.
"""
best_scores = self.best_results_for_each_lambda()
if best_scores is None:
return None
# closest lambda value tested so far:
def closest_lambda(x):
""" lambda function: gets the most similar lambda in the dataframe"""
nonlocal lam
return abs(x-lam)
closest_lambda = min(best_scores['lambda'].reset_index(drop=True),
key=closest_lambda)
closest_score = \
best_scores[best_scores['lambda'] ==
closest_lambda][self.validation_score_name].reset_index(drop=True)[0]
print("returning best weights for lambda = {}. "
"Corresponded to {} = {}".format(
closest_lambda, self.validation_score_name, closest_score))
closest_row = \
self.summary[(self.summary['lambda'] == closest_lambda) &
(self.summary[self.validation_score_name] ==
closest_score)]
assert closest_row.shape[0] == 1
return closest_row['weights'].reset_index(drop=True)[0].copy()
def plot_fits(self, df = None, x='lambda',
y1=None, y2=None, filename=None, xlim=None, ylim=None,
logx=True):
if df is None:
df = self.summary
if y1 == None:
y1 = self.validation_score_name
if y2 == None:
y2 = self.training_score_name
fig, ax = plt.subplots(1, 1, figsize=(4, 3))
plot_data = df.sort(x)
if logx:
plt.semilogx(plot_data[x], plot_data[y1],
linestyle='--', marker='o', c='g')
plt.semilogx(plot_data[x], plot_data[y2],
linestyle='--', marker='o', c='grey')
else:
plt.plot(plot_data[x], plot_data[y1],
linestyle='--', marker='o', c='g')
plt.plot(plot_data[x], plot_data[y2],
linestyle='--', marker='o', c='grey')
plt.legend(loc='best')
plt.xlabel(x)
plt.ylabel(self.score_name)
ax.axhline(y=0, color='k')
if xlim:
ax.set_xlim([xlim[0],xlim[1]])
if ylim:
ax.set_ylim([ylim[0],ylim[1]])
plt.tight_layout()
if filename is not None:
fig.savefig(filename + '.pdf')
def plot_best_fits(self, y1=None, y2=None, logx=True):
df = self.best_results_for_each_lambda()
self.plot_fits(df=df, y1=y1, y2=y2, xlim=None, ylim=None, logx=logx)
def train_on_whole_training_set(self, max_steps=None, delta_percent=None):
# get the best model conditions from the hyperparameter exploration,
# and print it to ensure the user's hyperparameters match the best
# models's.:
#print("best cross-validation model's info:")
#print(self.best('summary'))
print("getting best model.")
self.final_model = self.best('model').copy()
print(self.final_model.results_row())
# replace the smaller training sets with the whole training set.
self.final_model.replace_X_and_y(self.all_training_X,
self.all_training_y)
if max_steps:
self.final_model.max_steps = max_steps
if delta_percent:
self.delta_percent = delta_percent
# find the best weights using all the data
self.final_model.run()
def evaluate_test_data(self):
test_results = self.final_model.apply_model(
X = self.test_X, y = self.test_y, data_name="test")
print(pd.DataFrame(test_results).T)
| {
"repo_name": "JanetMatsen/Machine_Learning_CSE_546",
"path": "HW3/code/hyperparameter_explorer_semi-orig.py",
"copies": "1",
"size": "9961",
"license": "mit",
"hash": -4863505734092000000,
"line_mean": 39.4918699187,
"line_max": 93,
"alpha_frac": 0.5583776729,
"autogenerated": false,
"ratio": 3.9512098373661244,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9979290213673626,
"avg_score": 0.00605945931849934,
"num_lines": 246
} |
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sp
import re
import pandas as pd
class HyperparameterExplorer:
def __init__(self, X, y, model, score_name, validation_split=0.1,
test_X=None, test_y=None, use_prev_best_weights=True):
# model_partial is a model that's missing one or more parameters.
self.all_training_X = X # reserved for final training after hyper sweep.
self.all_training_y = y # reserved for final training after hyper sweep.
self.N, self.d = X.shape
self.summary = pd.DataFrame()
# split off a validation set from X
split_index = int(self.N*(1-validation_split))
print("{} of {} points from training are reserved for "
"validation".format(self.N - split_index, self.N))
self.train_X = X[0:split_index, :]
self.validation_X = X[split_index:, :]
self.train_y = y[0:split_index]
self.validation_y = y[split_index:]
print('variances of all training data: {}'.format(np.var(y)))
print('variances of split-off training & validation '
'data: {}, {}'.format(np.var(self.train_y),
np.var(self.validation_y)))
self.model = partial(model, X=self.train_X, y=self.train_y)
if test_X is not None and test_y is not None:
self.test_X = test_X
self.test_y = test_y
self.model = partial(self.model, test_X=test_X, test_y=test_y)
# keep track of model numbers.
self.num_models = 0
self.models = {}
# the score that will be used to determine which model is best.
self.training_score_name = score_name
self.validation_score_name = re.sub("training", "validation", score_name)
self.score_name = re.sub("training", "", score_name)
self.use_prev_best_weights = use_prev_best_weights
def train_model(self, **kwargs):
# train model
# check that model was made
try:
m = self.model(**kwargs)
# set weights to the best found so far
# Note: this is silly for non-iterative solvers like Ridge.
if self.use_prev_best_weights:
best_weights = self.best_weights_given_lam(m.lam)
if "W" in m.__dict__.keys() and best_weights is not None:
# TODO: new fun best_weights_given_lam()
m.W = best_weights.copy()
if m.is_sparse():
m.W = sp.csc_matrix(m.W)
elif ("w" in m.__dict__.keys()) and best_weights is not None:
m.w = best_weights.copy()
except NameError:
print("model failed for {}".format(**kwargs))
self.num_models += 1
m.run()
# save outcome of fit. Includes training data 0/1 loss, etc.
self.models[self.num_models] = m
# get results
outcome = m.results_row()
if len(outcome) < 1:
print("model didn't work..?")
# Save the model number for so we can look up the model later
outcome['model number'] = [self.num_models]
print('{}:{}'.format(self.training_score_name,
outcome[self.training_score_name][0]))
validation_results = m.apply_model(X=self.validation_X,
y=self.validation_y,
data_name='validation')
validation_results = pd.DataFrame(validation_results)
v_columns = [c for c in validation_results.columns
if 'validation' in c or 'lambda' == c]
outcome = pd.merge(pd.DataFrame(outcome),
validation_results[v_columns])
# Append this new model's findings onto the old model.
self.summary = pd.concat([self.summary, outcome])
# Oh, silly Pandas:
self.summary.reset_index(drop=True, inplace=True)
# Plot log loss vs time if applicable.
if "log loss" in self.summary.columns:
m.plot_test_and_train_log_loss_during_fitting()
m.plot_test_and_train_01_loss_during_fitting()
def best(self, value='model'):
"""
Find the best model according to the validation data
via the Pandas DataFrame.
:param value: a string describing what you want from the best model.
"""
# get the index of the model with the best score
i = self.summary[[self.validation_score_name]].idxmin()[0]
i = self.summary['model number'][i]
if value == 'model number':
return i
summary_row = self.summary[self.summary['model number'] == i]
if value == 'summary':
return summary_row.T
best_score = summary_row[self.validation_score_name]
if value == 'score':
return best_score
model = self.models[i]
if value == 'model':
return model
if value == 'weights':
return model.get_weights()
print("best {} = {}; found in model {}".format(
self.validation_score_name, best_score, i))
return model
def best_results_for_each_lambda(self):
"""
Group summary results by lambda and return a summary of the best
validation score result for each lambda tested so far.
"""
if self.summary.shape[0] == 0:
return None
# best losses at each lambda:
idx = self.summary.groupby(['lambda'])[self.validation_score_name].\
transform(min) == self.summary[self.validation_score_name]
return self.summary[idx]
def best_weights_given_lam(self, lam):
"""
Return the best weights seen for your lambda.
If your lambda hasn't been tested, return the best weights for the
closest lambda.
"""
best_scores = self.best_results_for_each_lambda()
if best_scores is None:
return None
# closest lambda value tested so far:
#c = min(myList, key=lambda x:abs(x-myNumber))
def closest_lambda(x):
""" lambda function: gets the most similar lambda in the dataframe"""
nonlocal lam
return abs(x-lam)
closest_lambda = min(best_scores['lambda'].reset_index(drop=True),
key=closest_lambda)
closest_score = \
best_scores[best_scores['lambda'] ==
closest_lambda][self.validation_score_name].reset_index(drop=True)[0]
print("returning best weights for lambda = {}. "
"Corresponded to {} = {}".format(
closest_lambda, self.validation_score_name, closest_score))
closest_row = \
self.summary[(self.summary['lambda'] == closest_lambda) &
(self.summary[self.validation_score_name] ==
closest_score)]
assert closest_row.shape[0] == 1
return closest_row['weights'].reset_index(drop=True)[0].copy()
def plot_fits(self, df = None, x='lambda',
y1=None, y2=None, filename=None, xlim=None, ylim=None,
logx=True):
if df is None:
df = self.summary
if y1 == None:
y1 = self.validation_score_name
if y2 == None:
y2 = self.training_score_name
fig, ax = plt.subplots(1, 1, figsize=(4, 3))
plot_data = df.sort(x)
if logx:
plt.semilogx(plot_data[x], plot_data[y1],
linestyle='--', marker='o', c='g')
plt.semilogx(plot_data[x], plot_data[y2],
linestyle='--', marker='o', c='grey')
else:
plt.plot(plot_data[x], plot_data[y1],
linestyle='--', marker='o', c='g')
plt.plot(plot_data[x], plot_data[y2],
linestyle='--', marker='o', c='grey')
plt.legend(loc='best')
plt.xlabel(x)
plt.ylabel(self.score_name)
ax.axhline(y=0, color='k')
if xlim:
ax.set_xlim([xlim[0],xlim[1]])
if ylim:
ax.set_ylim([ylim[0],ylim[1]])
plt.tight_layout()
if filename is not None:
fig.savefig(filename + '.pdf')
def plot_best_fits(self, y1=None, y2=None, logx=True):
df = self.best_results_for_each_lambda()
self.plot_fits(df=df, y1=y1, y2=y2, xlim=None, ylim=None, logx=logx)
def train_on_whole_training_set(self, max_iter=None, delta_percent=None):
# get the best model conditions from the hyperparameter exploration,
# and print it to ensure the user's hyperparameters match the best
# models's.:
#print("best cross-validation model's info:")
#print(self.best('summary'))
print("getting best model.")
self.final_model = self.best('model').copy()
print(self.final_model.results_row())
# replace the smaller training sets with the whole training set.
self.final_model.replace_X_and_y(self.all_training_X,
self.all_training_y)
if max_iter:
self.final_model.max_iter = max_iter
if delta_percent:
self.delta_percent = delta_percent
# find the best weights using all the data
self.final_model.run()
def evaluate_test_data(self):
test_results = self.final_model.apply_model(
X = self.test_X, y = self.test_y, data_name="test")
print(pd.DataFrame(test_results).T)
| {
"repo_name": "JanetMatsen/Machine_Learning_CSE_546",
"path": "HW2/code/hyperparameter_explorer.py",
"copies": "1",
"size": "9683",
"license": "mit",
"hash": -8137249052814065000,
"line_mean": 39.0123966942,
"line_max": 93,
"alpha_frac": 0.5571620366,
"autogenerated": false,
"ratio": 3.9028617492946394,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9929239372326675,
"avg_score": 0.006156882713592819,
"num_lines": 242
} |
from functools import partial
import mimetypes
import os
import unittest
from tempfile import NamedTemporaryFile
from pydub import AudioSegment
from pydub.utils import (
db_to_float,
ratio_to_db,
make_chunks,
mediainfo,
get_encoder_name,
)
from pydub.exceptions import (
InvalidTag,
InvalidID3TagVersion,
InvalidDuration,
CouldntDecodeError,
)
from pydub.silence import (
detect_silence,
)
from pydub.generators import (
Sine,
Square,
Pulse,
Triangle,
Sawtooth,
WhiteNoise,
)
data_dir = os.path.join(os.path.dirname(__file__), 'data')
class UtilityTests(unittest.TestCase):
def test_db_float_conversions(self):
self.assertEqual(db_to_float(20), 10)
self.assertEqual(db_to_float(10, using_amplitude=False), 10)
self.assertEqual(db_to_float(0), 1)
self.assertEqual(ratio_to_db(1), 0)
self.assertEqual(ratio_to_db(10), 20)
self.assertEqual(ratio_to_db(10, using_amplitude=False), 10)
self.assertEqual(3, db_to_float(ratio_to_db(3)))
self.assertEqual(12, ratio_to_db(db_to_float(12)))
self.assertEqual(3, db_to_float(ratio_to_db(3, using_amplitude=False), using_amplitude=False))
self.assertEqual(12, ratio_to_db(db_to_float(12, using_amplitude=False), using_amplitude=False))
class FileAccessTests(unittest.TestCase):
def setUp(self):
self.mp3_path = os.path.join(data_dir, 'test1.mp3')
def test_audio_segment_from_mp3(self):
seg1 = AudioSegment.from_mp3(os.path.join(data_dir, 'test1.mp3'))
mp3_file = open(os.path.join(data_dir, 'test1.mp3'), 'rb')
seg2 = AudioSegment.from_mp3(mp3_file)
self.assertEqual(len(seg1), len(seg2))
self.assertTrue(seg1._data == seg2._data)
self.assertTrue(len(seg1) > 0)
test1wav = test1 = test2 = test3 = testparty = None
class AudioSegmentTests(unittest.TestCase):
def setUp(self):
global test1, test2, test3, testparty
if not test1:
test1 = AudioSegment.from_mp3(os.path.join(data_dir, 'test1.mp3'))
test2 = AudioSegment.from_mp3(os.path.join(data_dir, 'test2.mp3'))
test3 = AudioSegment.from_mp3(os.path.join(data_dir, 'test3.mp3'))
testparty = AudioSegment.from_mp3(
os.path.join(data_dir, 'party.mp3'))
self.seg1 = test1
self.seg2 = test2
self.seg3 = test3
self.mp3_seg_party = testparty
self.ogg_file_path = os.path.join(data_dir, 'bach.ogg')
self.mp4_file_path = os.path.join(data_dir, 'creative_common.mp4')
self.mp3_file_path = os.path.join(data_dir, 'party.mp3')
def assertWithinRange(self, val, lower_bound, upper_bound):
self.assertTrue(lower_bound < val < upper_bound,
"%s is not in the acceptable range: %s - %s" %
(val, lower_bound, upper_bound))
def assertWithinTolerance(self, val, expected, tolerance=None,
percentage=None):
if percentage is not None:
tolerance = val * percentage
lower_bound = val - tolerance
upper_bound = val + tolerance
self.assertWithinRange(val, lower_bound, upper_bound)
def test_concat(self):
catted_audio = self.seg1 + self.seg2
expected = len(self.seg1) + len(self.seg2)
self.assertWithinTolerance(len(catted_audio), expected, 1)
def test_append(self):
merged1 = self.seg3.append(self.seg1, crossfade=100)
merged2 = self.seg3.append(self.seg2, crossfade=100)
self.assertEqual(len(merged1), len(self.seg1) + len(self.seg3) - 100)
self.assertEqual(len(merged2), len(self.seg2) + len(self.seg3) - 100)
def test_volume_with_add_sub(self):
quieter = self.seg1 - 6
self.assertAlmostEqual(ratio_to_db(quieter.rms, self.seg1.rms),
-6,
places=2)
louder = quieter + 2.5
self.assertAlmostEqual(ratio_to_db(louder.rms, quieter.rms),
2.5,
places=2)
def test_repeat_with_multiply(self):
seg = self.seg1 * 3
expected = len(self.seg1) * 3
expected = (expected - 2, expected + 2)
self.assertTrue(expected[0] < len(seg) < expected[1])
def test_overlay(self):
seg_mult = self.seg1[:5000] * self.seg2[:3000]
seg_over = self.seg1[:5000].overlay(self.seg2[:3000], loop=True)
self.assertEqual(len(seg_mult), len(seg_over))
self.assertTrue(seg_mult._data == seg_over._data)
self.assertEqual(len(seg_mult), 5000)
self.assertEqual(len(seg_over), 5000)
def test_overlay_times(self):
# infinite
seg_mult = self.seg1[:5000] * self.seg2[:3000]
seg_over = self.seg1[:5000].overlay(self.seg2[:3000], times=99999999)
self.assertEqual(len(seg_mult), len(seg_over))
self.assertEqual(len(seg_over), 5000)
self.assertTrue(seg_mult._data == seg_over._data)
# no times, no-op
piece = self.seg2[:1000]
seg_manual = self.seg1[:4000]
seg_over = self.seg1[:4000].overlay(piece, times=0)
self.assertEqual(len(seg_manual), len(seg_over))
self.assertEqual(len(seg_over), 4000)
self.assertFalse(seg_mult._data == seg_over._data)
# 1 loop
seg_manual = self.seg1[:4000].overlay(piece, position=500)
seg_over = self.seg1[:4000].overlay(piece, times=1)
self.assertEqual(len(seg_manual), len(seg_over))
self.assertEqual(len(seg_over), 4000)
self.assertFalse(seg_mult._data == seg_over._data)
# 2 loops
seg_manual = self.seg1[:4000].overlay(piece, position=500) \
.overlay(piece, position=1500)
seg_over = self.seg1[:4000].overlay(piece, times=2)
self.assertEqual(len(seg_manual), len(seg_over))
self.assertEqual(len(seg_over), 4000)
self.assertFalse(seg_mult._data == seg_over._data)
# 3 loops
seg_manual = self.seg1[:4000].overlay(piece, position=500) \
.overlay(piece, position=1500).overlay(piece, position=2500)
seg_over = self.seg1[:4000].overlay(piece, times=3)
self.assertEqual(len(seg_manual), len(seg_over))
self.assertEqual(len(seg_over), 4000)
self.assertFalse(seg_mult._data == seg_over._data)
# 4 loops (last will pass end)
seg_manual = self.seg1[:4000].overlay(piece, position=500) \
.overlay(piece, position=1500).overlay(piece, position=2500) \
.overlay(piece, position=3500)
seg_over = self.seg1[:4000].overlay(piece, times=4)
self.assertEqual(len(seg_manual), len(seg_over))
self.assertEqual(len(seg_over), 4000)
self.assertFalse(seg_mult._data == seg_over._data)
# 5 loops (last won't happen b/c past end)
seg_manual = self.seg1[:4000].overlay(piece, position=500) \
.overlay(piece, position=1500).overlay(piece, position=2500) \
.overlay(piece, position=3500).overlay(piece, position=3500)
seg_over = self.seg1[:4000].overlay(piece, times=5)
self.assertEqual(len(seg_manual), len(seg_over))
self.assertEqual(len(seg_over), 4000)
self.assertFalse(seg_mult._data == seg_over._data)
# ~infinite, same (as 4 and 5 really)
seg_over = self.seg1[:4000].overlay(piece, times=999999999)
self.assertEqual(len(seg_manual), len(seg_over))
self.assertEqual(len(seg_over), 4000)
self.assertFalse(seg_mult._data == seg_over._data)
def test_slicing(self):
empty = self.seg1[:0]
second_long_slice = self.seg1[:1000]
remainder = self.seg1[1000:]
self.assertEqual(len(empty), 0)
self.assertEqual(len(second_long_slice), 1000)
self.assertEqual(len(remainder), len(self.seg1) - 1000)
last_5_seconds = self.seg1[-5000:]
before = self.seg1[:-5000]
self.assertEqual(len(last_5_seconds), 5000)
self.assertEqual(len(before), len(self.seg1) - 5000)
past_end = second_long_slice[:1500]
self.assertTrue(second_long_slice._data == past_end._data)
def test_indexing(self):
short = self.seg1[:100]
rebuilt1 = self.seg1[:0]
for part in short:
rebuilt1 += part
rebuilt2 = sum([part for part in short], short[:0])
self.assertTrue(short._data == rebuilt1._data)
self.assertTrue(short._data == rebuilt2._data)
def test_set_channels(self):
mono = self.seg1.set_channels(1)
stereo = mono.set_channels(2)
self.assertEqual(len(self.seg1), len(mono))
self.assertEqual(len(self.seg1), len(stereo))
mono = self.seg2.set_channels(1)
mono = mono.set_frame_rate(22050)
self.assertEqual(len(mono), len(self.seg2))
monomp3 = AudioSegment.from_mp3(mono.export())
self.assertWithinTolerance(len(monomp3), len(self.seg2),
percentage=0.01)
merged = monomp3.append(stereo, crossfade=100)
self.assertWithinTolerance(len(merged),
len(self.seg1) + len(self.seg2) - 100,
tolerance=1)
def test_split_to_mono(self):
seg = self.seg1
mono_segments = seg.split_to_mono()
seg_lchannel = mono_segments[0]
seg_rchannel = mono_segments[1]
self.assertEqual(len(seg_lchannel), len(seg))
self.assertEqual(len(seg_rchannel), len(seg))
self.assertEqual(seg_lchannel.frame_rate, seg.frame_rate)
self.assertEqual(seg_rchannel.frame_rate, seg.frame_rate)
self.assertEqual(seg_lchannel.frame_count(), seg.frame_count())
self.assertEqual(seg_rchannel.frame_count(), seg.frame_count())
def test_apply_gain_stereo(self):
seg = self.seg1
orig_l, orig_r = seg.split_to_mono()
orig_dbfs_l = orig_l.dBFS
orig_dbfs_r = orig_r.dBFS
# for readability: infinity
inf = float("inf")
def assertAlmostEqual(v1, v2, **kwargs):
if v1 in (inf, -inf):
self.assertEqual(v1, v2)
else:
self.assertAlmostEqual(v1, v2, **kwargs)
def check_stereo_gain(left_dbfs_change, right_dbfs_change):
panned = seg.apply_gain_stereo(left_dbfs_change, right_dbfs_change)
self.assertEqual(panned.channels, 2)
l, r = panned.split_to_mono()
assertAlmostEqual(l.dBFS, orig_dbfs_l + left_dbfs_change, places=2)
assertAlmostEqual(r.dBFS, orig_dbfs_r + right_dbfs_change, places=2)
# hard left
check_stereo_gain(0.0, -inf)
check_stereo_gain(0.0, -6.0)
check_stereo_gain(0.0, 0.0)
check_stereo_gain(-6.0, 0.0)
check_stereo_gain(-inf, 0.0)
def test_pan(self):
seg = self.seg1
orig_l, orig_r = seg.split_to_mono()
orig_dbfs_l = orig_l.dBFS
orig_dbfs_r = orig_r.dBFS
# for readability: infinity
inf = float("inf")
def assertAlmostEqual(v1, v2, **kwargs):
if v1 in (inf, -inf):
self.assertEqual(v1, v2)
else:
self.assertAlmostEqual(v1, v2, **kwargs)
def check_pan(pan, left_dbfs_change, right_dbfs_change):
panned = seg.pan(pan)
l, r = panned.split_to_mono()
assertAlmostEqual(l.dBFS, orig_dbfs_l + left_dbfs_change, places=1)
assertAlmostEqual(r.dBFS, orig_dbfs_r + right_dbfs_change, places=1)
check_pan(-1.0, 3.0, -inf)
check_pan(-0.5, 1.5, -4.65)
check_pan(0.0, 0.0, 0.0)
check_pan(0.5, -4.65, 1.5)
check_pan(1.0, -inf, 3.0)
def test_export_as_mp3(self):
seg = self.seg1
exported_mp3 = seg.export()
seg_exported_mp3 = AudioSegment.from_mp3(exported_mp3)
self.assertWithinTolerance(len(seg_exported_mp3),
len(seg),
percentage=0.01)
def test_export_as_wav(self):
seg = self.seg1
exported_wav = seg.export(format='wav')
seg_exported_wav = AudioSegment.from_wav(exported_wav)
self.assertWithinTolerance(len(seg_exported_wav),
len(seg),
percentage=0.01)
def test_export_as_raw(self):
seg = self.seg1
exported_raw = seg.export(format='raw')
seg_exported_raw = AudioSegment.from_raw(exported_raw, sample_width=seg.sample_width, frame_rate=seg.frame_rate, channels = seg.channels)
self.assertWithinTolerance(len(seg_exported_raw),
len(seg),
percentage=0.01)
def test_export_as_ogg(self):
seg = self.seg1
exported_ogg = seg.export(format='ogg')
seg_exported_ogg = AudioSegment.from_ogg(exported_ogg)
self.assertWithinTolerance(len(seg_exported_ogg),
len(seg),
percentage=0.01)
def test_export_forced_codec(self):
seg = self.seg1 + self.seg2
with NamedTemporaryFile('w+b', suffix='.ogg') as tmp_file:
seg.export(tmp_file.name, 'ogg', codec='libvorbis')
exported = AudioSegment.from_ogg(tmp_file.name)
self.assertWithinTolerance(len(exported),
len(seg),
percentage=0.01)
def test_fades(self):
seg = self.seg1[:10000]
# 1 ms difference in the position of the end of the fade out
inf_end = seg.fade(start=0, end=float('inf'), to_gain=-120)
negative_end = seg.fade(start=0, end=-1, to_gain=-120)
self.assertWithinTolerance(inf_end.rms, negative_end.rms,
percentage=0.001)
self.assertTrue(negative_end.rms <= inf_end.rms)
self.assertTrue(inf_end.rms < seg.rms)
self.assertEqual(len(inf_end), len(seg))
self.assertTrue(-6 < ratio_to_db(inf_end.rms, seg.rms) < -5)
# use a slice out of the middle to make sure there is audio
seg = self.seg2[2000:8000]
fade_out = seg.fade_out(1000)
fade_in = seg.fade_in(1000)
self.assertTrue(0 < fade_out.rms < seg.rms)
self.assertTrue(0 < fade_in.rms < seg.rms)
self.assertEqual(len(fade_out), len(seg))
self.assertEqual(len(fade_in), len(seg))
db_at_beginning = ratio_to_db(fade_in[:1000].rms, seg[:1000].rms)
db_at_end = ratio_to_db(fade_in[-1000:].rms, seg[-1000:].rms)
self.assertTrue(db_at_beginning < db_at_end)
db_at_beginning = ratio_to_db(fade_out[:1000].rms, seg[:1000].rms)
db_at_end = ratio_to_db(fade_out[-1000:].rms, seg[-1000:].rms)
self.assertTrue(db_at_end < db_at_beginning)
def test_reverse(self):
seg = self.seg1
rseg = seg.reverse()
# the reversed audio should be exactly equal in playback duration
self.assertEqual(len(seg), len(rseg))
r2seg = rseg.reverse()
# if you reverse it twice you should get an identical AudioSegment
self.assertEqual(seg, r2seg)
def test_normalize(self):
seg = self.seg1
normalized = seg.normalize(0.0)
self.assertEqual(len(normalized), len(seg))
self.assertTrue(normalized.rms > seg.rms)
self.assertWithinTolerance(
normalized.max,
normalized.max_possible_amplitude,
percentage=0.0001
)
def test_for_accidental_shortening(self):
seg = self.mp3_seg_party
with NamedTemporaryFile('w+b', suffix='.mp3') as tmp_mp3_file:
seg.export(tmp_mp3_file.name)
for i in range(3):
AudioSegment.from_mp3(tmp_mp3_file.name).export(tmp_mp3_file.name, "mp3")
tmp_seg = AudioSegment.from_mp3(tmp_mp3_file.name)
self.assertFalse(len(tmp_seg) < len(seg))
def test_formats(self):
seg_m4a = AudioSegment.from_file(
os.path.join(data_dir, 'format_test.m4a'), "m4a")
self.assertTrue(len(seg_m4a))
def test_equal_and_not_equal(self):
wav_file = self.seg1.export(format='wav')
wav = AudioSegment.from_wav(wav_file)
self.assertTrue(self.seg1 == wav)
self.assertFalse(self.seg1 != wav)
def test_duration(self):
self.assertEqual(int(self.seg1.duration_seconds), 10)
wav_file = self.seg1.export(format='wav')
wav = AudioSegment.from_wav(wav_file)
self.assertEqual(wav.duration_seconds, self.seg1.duration_seconds)
def test_autodetect_format(self):
aac_path = os.path.join(data_dir, 'wrong_extension.aac')
fn = partial(AudioSegment.from_file, aac_path, "aac")
self.assertRaises(CouldntDecodeError, fn)
# Trying to auto detect input file format
aac_file = AudioSegment.from_file(
os.path.join(data_dir, 'wrong_extension.aac'))
self.assertEqual(int(aac_file.duration_seconds), 9)
def test_export_ogg_as_mp3(self):
with NamedTemporaryFile('w+b', suffix='.mp3') as tmp_mp3_file:
AudioSegment.from_file(self.ogg_file_path).export(tmp_mp3_file,
format="mp3")
tmp_file_type, _ = mimetypes.guess_type(tmp_mp3_file.name)
self.assertEqual(tmp_file_type, 'audio/mpeg')
def test_export_mp3_as_ogg(self):
with NamedTemporaryFile('w+b', suffix='.ogg') as tmp_ogg_file:
AudioSegment.from_file(self.mp3_file_path).export(tmp_ogg_file,
format="ogg")
tmp_file_type, _ = mimetypes.guess_type(tmp_ogg_file.name)
self.assertEqual(tmp_file_type, 'audio/ogg')
def test_export_mp4_as_ogg(self):
with NamedTemporaryFile('w+b', suffix='.ogg') as tmp_ogg_file:
AudioSegment.from_file(self.mp4_file_path).export(tmp_ogg_file,
format="ogg")
tmp_file_type, _ = mimetypes.guess_type(tmp_ogg_file.name)
self.assertEqual(tmp_file_type, 'audio/ogg')
def test_export_mp4_as_mp3(self):
with NamedTemporaryFile('w+b', suffix='.mp3') as tmp_mp3_file:
AudioSegment.from_file(self.mp4_file_path).export(tmp_mp3_file,
format="mp3")
tmp_file_type, _ = mimetypes.guess_type(tmp_mp3_file.name)
self.assertEqual(tmp_file_type, 'audio/mpeg')
def test_export_mp4_as_wav(self):
with NamedTemporaryFile('w+b', suffix='.wav') as tmp_wav_file:
AudioSegment.from_file(self.mp4_file_path).export(tmp_wav_file,
format="mp3")
tmp_file_type, _ = mimetypes.guess_type(tmp_wav_file.name)
self.assertEqual(tmp_file_type, 'audio/x-wav')
def test_export_mp4_as_mp3_with_tags(self):
with NamedTemporaryFile('w+b', suffix='.mp3') as tmp_mp3_file:
tags_dict = {
'title': "The Title You Want",
'artist': "Artist's name",
'album': "Name of the Album"
}
AudioSegment.from_file(self.mp4_file_path).export(tmp_mp3_file,
format="mp3",
tags=tags_dict)
tmp_file_type, _ = mimetypes.guess_type(tmp_mp3_file.name)
self.assertEqual(tmp_file_type, 'audio/mpeg')
def test_export_mp4_as_mp3_with_tags_raises_exception_when_tags_are_not_a_dictionary(self):
with NamedTemporaryFile('w+b', suffix='.mp3') as tmp_mp3_file:
json = '{"title": "The Title You Want", "album": "Name of the Album", "artist": "Artist\'s name"}'
func = partial(
AudioSegment.from_file(self.mp4_file_path).export, tmp_mp3_file,
format="mp3", tags=json)
self.assertRaises(InvalidTag, func)
def test_export_mp4_as_mp3_with_tags_raises_exception_when_id3version_is_wrong(self):
tags = {'artist': 'Artist', 'title': 'Title'}
with NamedTemporaryFile('w+b', suffix='.mp3') as tmp_mp3_file:
func = partial(
AudioSegment.from_file(self.mp4_file_path).export,
tmp_mp3_file,
format="mp3",
tags=tags,
id3v2_version='BAD VERSION'
)
self.assertRaises(InvalidID3TagVersion, func)
def test_export_mp3_with_tags(self):
tags = {'artist': 'Mozart', 'title': 'The Magic Flute'}
with NamedTemporaryFile('w+b', suffix='.mp3') as tmp_mp3_file:
AudioSegment.from_file(self.mp4_file_path).export(tmp_mp3_file, format="mp3", tags=tags)
info = mediainfo(filepath=tmp_mp3_file.name)
info_tags = info["TAG"]
self.assertEqual(info_tags["artist"], "Mozart")
self.assertEqual(info_tags["title"], "The Magic Flute")
def test_fade_raises_exception_when_duration_start_end_are_none(self):
seg = self.seg1
func = partial(seg.fade, start=1, end=1, duration=1)
self.assertRaises(TypeError, func)
def test_silent(self):
seg = AudioSegment.silent(len(self.seg1))
self.assertEqual(len(self.seg1), len(seg))
self.assertEqual(seg.rms, 0)
self.assertEqual(seg.frame_width, 2)
seg_8bit = seg.set_sample_width(1)
self.assertEqual(seg_8bit.sample_width, 1)
self.assertEqual(seg_8bit.frame_width, 1)
self.assertEqual(seg_8bit.rms, 0)
seg *= self.seg1
self.assertEqual(seg.rms, self.seg1.rms)
self.assertEqual(len(seg), len(self.seg1))
self.assertEqual(seg.frame_width, self.seg1.frame_width)
self.assertEqual(seg.frame_rate, self.seg1.frame_rate)
def test_fade_raises_exception_when_duration_is_negative(self):
seg = self.seg1
func = partial(seg.fade,
to_gain=1,
from_gain=1,
start=None,
end=None,
duration=-1)
self.assertRaises(InvalidDuration, func)
def test_make_chunks(self):
seg = self.seg1
chunks = make_chunks(seg, 100)
seg2 = chunks[0]
for chunk in chunks[1:]:
seg2 += chunk
self.assertEqual(len(seg), len(seg2))
def test_empty(self):
self.assertEqual(len(self.seg1), len(self.seg1 + AudioSegment.empty()))
self.assertEqual(len(self.seg2), len(self.seg2 + AudioSegment.empty()))
self.assertEqual(len(self.seg3), len(self.seg3 + AudioSegment.empty()))
def test_speedup(self):
speedup_seg = self.seg1.speedup(2.0)
self.assertWithinTolerance(
len(self.seg1) / 2, len(speedup_seg), percentage=0.01)
def test_dBFS(self):
seg_8bit = self.seg1.set_sample_width(1)
self.assertWithinTolerance(seg_8bit.dBFS, -8.88, tolerance=0.01)
self.assertWithinTolerance(self.seg1.dBFS, -8.88, tolerance=0.01)
self.assertWithinTolerance(self.seg2.dBFS, -10.39, tolerance=0.01)
self.assertWithinTolerance(self.seg3.dBFS, -6.47, tolerance=0.01)
def test_compress(self):
compressed = self.seg1.compress_dynamic_range()
self.assertWithinTolerance(self.seg1.dBFS - compressed.dBFS,
10.0,
tolerance=10.0)
# Highest peak should be lower
self.assertTrue(compressed.max < self.seg1.max)
# average volume should be reduced
self.assertTrue(compressed.rms < self.seg1.rms)
def test_exporting_to_ogg_uses_default_codec_when_codec_param_is_none(self):
with NamedTemporaryFile('w+b', suffix='.ogg') as tmp_ogg_file:
AudioSegment.from_file(self.mp4_file_path).export(tmp_ogg_file, format="ogg")
info = mediainfo(filepath=tmp_ogg_file.name)
self.assertEqual(info["codec_name"], "vorbis")
self.assertEqual(info["format_name"], "ogg")
def test_zero_length_segment(self):
self.assertEqual(0, len(self.seg1[0:0]))
def test_invert(self):
s = Sine(100).to_audio_segment()
s_inv = s.invert_phase()
self.assertFalse(s == s_inv)
self.assertTrue(s.rms == s_inv.rms)
self.assertTrue(s == s_inv.invert_phase())
def test_max_dBFS(self):
sine_0_dbfs = Sine(1000).to_audio_segment()
sine_minus_3_dbfs = Sine(1000).to_audio_segment(volume=-3.0)
self.assertAlmostEqual(-0.0, sine_0_dbfs.max_dBFS, 2)
self.assertAlmostEqual(-3.0, sine_minus_3_dbfs.max_dBFS, 2)
class SilenceTests(unittest.TestCase):
def setUp(self):
global test1wav
if not test1wav:
test1wav = AudioSegment.from_wav(os.path.join(data_dir, 'test1.wav'))
self.seg1 = test1wav
def test_detect_completely_silent_segment(self):
seg = AudioSegment.silent(5000)
silent_ranges = detect_silence(seg, min_silence_len=1000, silence_thresh=-20)
self.assertEqual(silent_ranges, [[0, 5000]])
def test_detect_tight_silent_segment(self):
seg = AudioSegment.silent(1000)
silent_ranges = detect_silence(seg, min_silence_len=1000, silence_thresh=-20)
self.assertEqual(silent_ranges, [[0, 1000]])
def test_detect_too_long_silence(self):
seg = AudioSegment.silent(3000)
silent_ranges = detect_silence(seg, min_silence_len=5000, silence_thresh=-20)
self.assertEqual(silent_ranges, [])
def test_detect_silence_seg1(self):
silent_ranges = detect_silence(self.seg1, min_silence_len=500, silence_thresh=-20)
self.assertEqual(silent_ranges, [[0, 775], [3141, 4033], [5516, 6051]])
class GeneratorTests(unittest.TestCase):
def test_with_smoke(self):
Sine(440).to_audio_segment()
Square(440).to_audio_segment()
Triangle(440).to_audio_segment()
Pulse(440, duty_cycle=0.75).to_audio_segment()
Sawtooth(440, duty_cycle=0.75).to_audio_segment()
WhiteNoise().to_audio_segment()
def test_loudness(self):
sine_dbfs = Sine(440).to_audio_segment().dBFS
square_dbfs = Square(440).to_audio_segment().dBFS
white_noise_dbfs = WhiteNoise().to_audio_segment().dBFS
self.assertAlmostEqual(sine_dbfs, -3.0, places=1)
self.assertAlmostEqual(square_dbfs, 0.0, places=1)
self.assertAlmostEqual(white_noise_dbfs, -5, places=0)
def test_duration(self):
one_sec = Sine(440).to_audio_segment(duration=1000)
five_sec = Sine(440).to_audio_segment(duration=5000)
half_sec = Sine(440).to_audio_segment(duration=500)
self.assertAlmostEqual(len(one_sec), 1000)
self.assertAlmostEqual(len(five_sec), 5000)
self.assertAlmostEqual(len(half_sec), 500)
class NoConverterTests(unittest.TestCase):
def setUp(self):
self.wave_file = os.path.join(data_dir, 'test1.wav')
self.mp3_file = os.path.join(data_dir, 'test1.mp3')
self.raw_file = os.path.join(data_dir, 'test1.raw')
AudioSegment.converter = "definitely-not-a-path-to-anything-asdjklqwop"
def tearDown(self):
AudioSegment.converter = get_encoder_name()
def test_opening_wav_file(self):
seg = AudioSegment.from_wav(self.wave_file)
self.assertTrue(len(seg) > 1000)
seg = AudioSegment.from_file(self.wave_file)
self.assertTrue(len(seg) > 1000)
seg = AudioSegment.from_file(self.wave_file, "wav")
self.assertTrue(len(seg) > 1000)
seg = AudioSegment.from_file(self.wave_file, format="wav")
self.assertTrue(len(seg) > 1000)
def test_opening_raw_file(self):
seg = AudioSegment.from_raw(self.raw_file, sample_width=2, frame_rate=32000, channels=2)
self.assertTrue(len(seg) > 1000)
seg = AudioSegment.from_file(self.raw_file, "raw", sample_width=2, frame_rate=32000, channels=2)
self.assertTrue(len(seg) > 1000)
seg = AudioSegment.from_file(self.raw_file, format="raw", sample_width=2, frame_rate=32000, channels=2)
self.assertTrue(len(seg) > 1000)
def test_opening_raw_file_with_missing_args_fails(self):
func = partial(AudioSegment.from_raw, self.raw_file)
self.assertRaises(KeyError, func)
def test_opening_mp3_file_fails(self):
func = partial(AudioSegment.from_mp3, self.mp3_file)
self.assertRaises(OSError, func)
func = partial(AudioSegment.from_file, self.mp3_file)
self.assertRaises(OSError, func)
func = partial(AudioSegment.from_file, self.mp3_file, "mp3")
self.assertRaises(OSError, func)
func = partial(AudioSegment.from_file, self.mp3_file, format="mp3")
self.assertRaises(OSError, func)
def test_exporting(self):
seg = AudioSegment.from_wav(self.wave_file)
exported = AudioSegment.from_wav(seg.export(format="wav"))
self.assertEqual(len(exported), len(seg))
class FilterTests(unittest.TestCase):
def setUp(self):
global test1wav
if not test1wav:
test1wav = AudioSegment.from_wav(os.path.join(data_dir, 'test1.wav'))
self.seg1 = test1wav
def test_highpass_works_on_multichannel_segments(self):
self.assertEqual(self.seg1.channels, 2)
less_bass = self.seg1.high_pass_filter(800)
self.assertTrue(less_bass.dBFS < self.seg1.dBFS)
def test_highpass_filter_reduces_loudness(self):
s = Square(200).to_audio_segment()
less_bass = s.high_pass_filter(400)
self.assertTrue(less_bass.dBFS < s.dBFS)
def test_highpass_filter_cutoff_frequency(self):
# A Sine wave should not be affected by a HPF 3 octaves lower
s = Sine(800).to_audio_segment()
less_bass = s.high_pass_filter(100)
self.assertAlmostEqual(less_bass.dBFS, s.dBFS, places=0)
def test_lowpass_filter_reduces_loudness(self):
s = Square(200).to_audio_segment()
less_treble = s.low_pass_filter(400)
self.assertTrue(less_treble.dBFS < s.dBFS)
def test_lowpass_filter_cutoff_frequency(self):
# A Sine wave should not be affected by a LPF 3 octaves Higher
s = Sine(100).to_audio_segment()
less_treble = s.low_pass_filter(800)
self.assertAlmostEqual(less_treble.dBFS, s.dBFS, places=0)
if __name__ == "__main__":
import sys
if sys.version_info >= (3, 1):
unittest.main(warnings="ignore")
else:
unittest.main()
| {
"repo_name": "sgml/pydub",
"path": "test/test.py",
"copies": "3",
"size": "30984",
"license": "mit",
"hash": -874881519336827500,
"line_mean": 36.9705882353,
"line_max": 145,
"alpha_frac": 0.5976633101,
"autogenerated": false,
"ratio": 3.355789017654067,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5453452327754066,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import mimetypes
import os
from celery.utils.log import get_task_logger
from girder.models.collection import Collection
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.user import User
from isic_archive.celery import app
from isic_archive.models import Batch, Dataset, Image
from isic_archive.upload import TempDir, ZipFileOpener
from isic_archive.utility.boto import s3
logger = get_task_logger(__name__)
def _uploadZipfileToGirder(requestSession, filePath, dataset):
# Attach file to dataset
uploadCollection = Collection().findOne({
'name': 'Temporary ZIP Uploads'
})
uploadFolder = Folder().findOne({
'name': 'Temporary ZIP Uploads',
'baseParentId': uploadCollection['_id']
})
originalFileName = os.path.basename(filePath)
fileSize = os.path.getsize(filePath)
with open(filePath, 'rb') as fileStream:
uploadFileResponse = requestSession.post(
'file',
params={
'parentType': 'folder',
'parentId': uploadFolder['_id'],
'name': originalFileName,
'size': fileSize,
'mimeType': 'application/zip'
},
allow_redirects=False,
)
uploadFileResponse.raise_for_status()
uploadId = uploadFileResponse.json()['_id']
chunk_size = 1024 * 1024 * 50
offset = 0
for chunk in iter(partial(fileStream.read, chunk_size), b''):
uploadChunkResponse = requestSession.post(
'file/chunk',
params={
'offset': offset,
'uploadId': uploadId
},
data=chunk,
allow_redirects=False,
)
uploadChunkResponse.raise_for_status()
offset += len(chunk)
uploadFile = File().load(uploadChunkResponse.json()['_id'], force=True)
uploadItem = Item().load(uploadFile['itemId'], force=True)
uploadFile['itemId'] = None
uploadFile['attachedToType'] = ['dataset', 'isic_archive']
uploadFile['attachedToId'] = dataset['_id']
uploadFile = File().save(uploadFile)
File().propagateSizeChange(
item=uploadItem,
sizeIncrement=-uploadFile['size'],
updateItemSize=False
)
Item().remove(uploadItem)
return uploadFile
@app.task(bind=True)
def ingestBatchFromZipfile(self, batchId):
"""
Ingest images from a ZIP file into a dataset.
The images are extracted to a "Pre-review" folder within the dataset folder.
"""
from isic_archive.tasks import ingestImage
batch = Batch().load(batchId)
dataset = Dataset().load(batch['datasetId'], force=True)
user = User().load(batch['creatorId'], force=True)
if batch['ingestStatus'] != 'queued':
raise Exception('Trying to ingest a non-queued batch')
prereviewFolder = Folder().createFolder(
parent=Dataset().imagesFolder(dataset),
name='Pre-review',
parentType='folder',
creator=user,
public=False,
reuseExisting=True)
# Get upload information stored on batch
s3BucketName = batch.get('s3BucketName')
s3ObjectKey = batch.get('s3ObjectKey')
if not all([s3BucketName, s3ObjectKey]):
raise Exception('Error retrieving upload information.')
# Move file from S3 to the assetstore, attached to the dataset
with TempDir() as tempDir:
# Download file from S3 as upload user
filePath = os.path.join(tempDir, f'{batch["_id"]}.zip')
s3.download_file(
Bucket=s3BucketName,
Key=s3ObjectKey,
Filename=filePath
)
uploadFile = _uploadZipfileToGirder(self.session, filePath, dataset)
batch['ingestStatus'] = 'extracting'
batch['uploadFileId'] = uploadFile['_id']
batch = Batch().save(batch)
# Process zip file
with ZipFileOpener(filePath) as (fileList, fileCount):
for originalFilePath, originalFileRelpath in fileList:
originalFileName = os.path.basename(originalFileRelpath)
with open(originalFilePath, 'rb') as originalFileStream:
image = Image().createEmptyImage(
originalFileRelpath=originalFileRelpath,
parentFolder=prereviewFolder,
creator=user,
dataset=dataset,
batch=batch
)
try:
resp = self.session.post(
'file',
params={
'parentType': 'item',
'parentId': image['_id'],
'name': '%s%s' % (
image['name'],
os.path.splitext(originalFileName)[1].lower()
),
'size': os.path.getsize(originalFilePath),
'mimeType': mimetypes.guess_type(originalFileName)[0]
},
data=originalFileStream,
allow_redirects=False,
)
resp.raise_for_status()
except Exception:
logger.exception('An individual image failed to be extracted')
continue
originalFile = File().load(resp.json()['_id'], force=True)
originalFile['imageRole'] = 'original'
File().updateFile(originalFile)
ingestImage.delay(image['_id'])
batch['ingestStatus'] = 'extracted'
Batch().save(batch)
| {
"repo_name": "ImageMarkup/isic-archive",
"path": "isic_archive/tasks/zip.py",
"copies": "1",
"size": "5921",
"license": "apache-2.0",
"hash": 1670203386913984300,
"line_mean": 34.4550898204,
"line_max": 86,
"alpha_frac": 0.558013849,
"autogenerated": false,
"ratio": 4.5651503469545105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5623164195954511,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import multiprocessing
from collections import Counter
from functools32 import lru_cache
from itertools import combinations
from collections import namedtuple
import datetime
import hashlib
import sys
import time
import signal
import gzip
# Signal handler updates GLOBAL to stop processing
globalStop = False
LogLine = namedtuple('LogLine', ['ts', 'text'])
DataRecord = namedtuple('DataRecord', ['line', 'md5hash', 'stats'])
def openFile(name, mode):
if name.lower().endswith('.gz'):
return gzip.open(name, mode + 'b')
else:
return open(name, mode)
# GOOD
def dataset_iterator(fIn, num_lines):
'''
Handle reading the data from file into a know form
'''
lines_read = 0
success_full = 0
while num_lines == -1 or lines_read < num_lines:
lines_read += 1
line = fIn.readline()
if len(line) == 0:
break
else:
try:
print 'read', line
ts = datetime.datetime.strptime(line[:14], '%b %d %H:%M:%S')
yield LogLine(ts.replace(year=2015), line[15:].strip())
success_full += 1
except:
pass
def distribWork(C, G, denominator, X):
CNext = Counter()
GNext = dict()
j = argMaxPhiSimple(C, X, G, denominator)
GNext[X.md5hash] = j
CNext = X.stats
return (j, CNext, GNext)
# stop processing if CTRL-C pressed
# GOOD
def signal_handler(signal, frame):
global globalStop
globalStop = True
# return a md5 string representation of input string
# TODO lookup faster hashes
@lru_cache()
def makeHash(s):
m = hashlib.md5()
m.update(s)
return m.hexdigest()
# make a concatenation of a tuple
# GOOD
# can make multiple things alias to the same comparison..
# 'a','aaa','aa','aa','aaa','a' all map to 'aaaa'
@lru_cache()
def tuple2Str(a):
return '%s%s' % a
# make a counter object from a string
# GOOD
@lru_cache()
def str2Counter(X):
# set chosen to make membership of a tuple instead of count of tuple
# Counter is to track the number of DOCUMENTS containing the tuple
# not the count of the tuples in a DOCUMENT.
return Counter(map(tuple2Str, set(combinations(X.rstrip().split(), 2))))
# calculate the best partition for X to be in
# using the cheat sum(p(r,Cdest))
# TODO update with results from email to paper authors
# TODO (global update, p(r,C) )
# @profile
def argMaxPhiSimple(C, X, G, denominator):
numGroups = len(C)
# see which group X should be in to maximize
partition = G[X.md5hash]
retScore = 0.0
retval = partition
for partition in range(numGroups):
currentScore = 0.0
numerator = 0.0
for r in X.stats.iterkeys():
numerator += C[partition].get(r, 0)
currentScore += numerator * numerator
# TODO make sure this is the correct way to calculate
d = denominator.get(partition, 0.000000000001)
d = d * d
currentScore = numerator / d
# keep tabs of who is winning
if retScore < currentScore:
retScore = currentScore
retval = partition
return retval
# store the data histograms
# in each parition
# GOOD
def randomSeeds(D, k, G):
C = [Counter() for _ in range(k)]
partition = 0
for d in D:
# assigning groups to a message
G[d.md5hash] = partition
# Do things the Counter way
C[partition].update(d.stats)
partition = (partition + 1) % k
print 'UniqLogLines', len(G)
return C
# move X from partition i to partition j
# GOOD
def updatePartition(CNext, X, GNext, j):
GNext[makeHash(X)] = j
# TODO would a binary version of this be sufficient?
CNext[j].update(str2Counter(X))
# determine if array of dicts are equal
# GOOD
def partitionsNotEqual(C, CNext):
for i in range(len(C)):
if C[i] != CNext[i]:
return True
return False
# D : log message set
# k : number of groups to partition
# returns: C: partitions
# GOOD
def logSig_localSearch(D, G, k, maxIter):
global globalStop
GNext = dict()
CNext = [Counter() for _ in range(k)]
C = randomSeeds(D, k, G)
denominator = Counter(G.itervalues())
print "Starting Run\n"
# TODO should this be an energy measure
# instead of dict comp?
limit = 0
partitionsNotSame = True
while partitionsNotSame and (limit < maxIter) and not globalStop:
start = time.time()
pool = multiprocessing.Pool()
func = partial(distribWork, C, G, denominator)
distribOut = pool.map(func, D, chunksize=2000)
pool.close()
pool.join()
for d in distribOut:
tempj, tempCNext, tempGNext = d
CNext[tempj].update(tempCNext)
GNext.update(tempGNext)
limit += 1
finish = time.time()
# make sure to stop when partitions stable
partitionsNotSame = partitionsNotEqual(C, CNext)
# TODO is this the corret thing?
C = CNext
# update for passing back
G.clear()
G.update(GNext)
CNext = [Counter() for _ in range(k)]
GNext = dict()
denominator = Counter(G.itervalues())
print 'looping iteration %i time=%3.4f (sec)' % (limit, finish - start)
sys.stdout.flush()
# end while
print '\niterated %i times' % (limit)
return C
# GOOD
def main(argv):
totalS = time.time()
print 'Attempting to open %s' % (argv[0])
print 'k = %i' % int(argv[1])
print 'maxIter = %i' % int(argv[2])
a = openFile(argv[0], 'r')
D = list()
G = dict()
readCount = 0
for r in dataset_iterator(a, -1):
h = makeHash(r.text)
s = str2Counter(r.text)
D.append(DataRecord(r, h, s))
readCount += 1
a.close()
print 'Read %i items' % readCount
logSig_localSearch(D, G, int(argv[1]), int(argv[2]))
totalE = time.time()
outHist = Counter(G.itervalues())
partitions = sorted(set(G.itervalues()))
# print a histogram of partition sizes
print 'cluster, number'
for p in partitions:
print '%i, %i' % (p, outHist[p])
print 'total execution time %s (sec)' % (totalE - totalS)
print 'Partition | Logline'
print '__________+__________________________________________'
# print things in partition order at the expense of looping
for p in partitions:
for d in D:
if p == G[d.md5hash]:
print ' %03i | %s' % (G[d.md5hash], d.line.text)
if __name__ == "__main__":
# install the signal handler
signal.signal(signal.SIGINT, signal_handler)
main(sys.argv[1:])
| {
"repo_name": "d-grossman/magichour",
"path": "deprecated/LogSig/multi/LogSigMulti.py",
"copies": "2",
"size": "6737",
"license": "apache-2.0",
"hash": 1261389348936189000,
"line_mean": 22.5559440559,
"line_max": 79,
"alpha_frac": 0.6007124833,
"autogenerated": false,
"ratio": 3.604601391118245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5205313874418245,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import multiprocessing
import pandas as pd
from .synthesizer import synthesize, enable_logging
from . import categorizer as cat
def load_data(hh_marginal_file, person_marginal_file,
hh_sample_file, person_sample_file):
"""
Load and process data inputs from .csv files on disk
Parameters
----------
hh_marginal_file : string
path to a csv file of household marginals
person_marginal_file : string
path to a csv file of person marginals
hh_sample_file : string
path to a csv file of sample household records to be drawn from
person_sample_file : string
path to a csv file of sample person records
Returns
-------
hh_marg : pandas.DataFrame
processed and properly indexed household marginals table
p_marg : pandas.DataFrame
processed and properly indexed person marginals table
hh_sample : pandas.DataFrame
household sample table
p_sample : pandas.DataFrame
person sample table
xwalk : list of tuples
list of marginal-to-sample geography crosswalks to iterate over
"""
hh_sample = pd.read_csv(hh_sample_file)
p_sample = pd.read_csv(person_sample_file)
hh_marg = pd.read_csv(hh_marginal_file, header=[0, 1], index_col=0)
hh_marg.columns.levels[0].name = 'cat_name'
hh_marg.columns.levels[1].name = 'cat_values'
xwalk = list(zip(hh_marg.index, hh_marg.sample_geog.unstack().values))
hh_marg = hh_marg.drop('sample_geog', axis=1, level=0)
p_marg = pd.read_csv(person_marginal_file, header=[0, 1], index_col=0)
p_marg.columns.levels[0].name = 'cat_name'
p_marg.columns.levels[1].name = 'cat_values'
return hh_marg, p_marg, hh_sample, p_sample, xwalk
def synthesize_all_zones(hh_marg, p_marg, hh_sample, p_sample, xwalk):
"""
Iterate over a geography crosswalk list and synthesize in-line
Parameters
----------
hh_marg : pandas.DataFrame
processed and properly indexed household marginals table
p_marg : pandas.DataFrame
processed and properly indexed person marginals table
hh_sample : pandas.DataFrame
household sample table
p_sample : pandas.DataFrame
person sample table
xwalk : list of tuples
list of marginal-to-sample geography crosswalks to iterate over
Returns
-------
all_households : pandas.DataFrame
synthesized household records
all_persons : pandas.DataFrame
synthesized person records
all_stats : pandas.DataFrame
chi-square and p-score values for each marginal geography drawn
"""
hh_list = []
people_list = []
stats_list = []
hh_index_start = 1
for geogs in xwalk:
households, people, stats = synthesize_zone(hh_marg, p_marg,
hh_sample, p_sample, geogs)
stats_list.append(stats)
hh_list.append(households)
people_list.append(people)
if len(households) > 0:
hh_index_start = households.index.values[-1] + 1
all_households = pd.concat(hh_list)
all_persons = pd.concat(people_list)
all_households, all_persons = synch_hhids(all_households, all_persons)
all_stats = pd.DataFrame(stats_list)
return all_households, all_persons, all_stats
def synch_hhids(households, persons):
"""
Synchronize household ids with corresponding person records
Parameters
----------
households : pandas.DataFrame
full households table with id values sequential by geog
persons : pandas.DataFrame
full persons table with id values sequential by geog
Returns
-------
households : pandas.DataFrame
households table with reindexed sequential household ids
persons : pandas.DataFrame
persons table synchronized with updated household ids
"""
households['hh_id'] = households.index
households['household_id'] = range(1, len(households.index)+1)
persons = pd.merge(
persons, households[['household_id', 'geog', 'hh_id']],
how='left', left_on=['geog', 'hh_id'], right_on=['geog', 'hh_id'],
suffixes=('', '_x')).drop('hh_id', axis=1)
households.set_index('household_id', inplace=True)
households.drop('hh_id', axis=1, inplace=True)
return households, persons
def synthesize_zone(hh_marg, p_marg, hh_sample, p_sample, xwalk):
"""
Synthesize a single zone (Used within multiprocessing synthesis)
Parameters
----------
hh_marg : pandas.DataFrame
processed and properly indexed household marginals table
p_marg : pandas.DataFrame
processed and properly indexed person marginals table
hh_sample : pandas.DataFrame
household sample table
p_sample : pandas.DataFrame
person sample table
xwalk : tuple
tuple of marginal-to-sample geography crosswalk
Returns
-------
households : pandas.DataFrame
synthesized household records
people : pandas.DataFrame
synthesized person records
stats : pandas.DataFrame
chi-square and p-score values for marginal geography drawn
"""
hhs, hh_jd = cat.joint_distribution(
hh_sample[hh_sample.sample_geog == xwalk[1]],
cat.category_combinations(hh_marg.columns))
ps, p_jd = cat.joint_distribution(
p_sample[p_sample.sample_geog == xwalk[1]],
cat.category_combinations(p_marg.columns))
households, people, people_chisq, people_p = synthesize(
hh_marg.loc[xwalk[0]], p_marg.loc[xwalk[0]], hh_jd, p_jd, hhs, ps,
hh_index_start=1)
households['geog'] = xwalk[0]
people['geog'] = xwalk[0]
stats = {'geog': xwalk[0], 'chi-square': people_chisq, 'p-score': people_p}
return households, people, stats
def multiprocess_synthesize(hh_marg, p_marg, hh_sample,
p_sample, xwalk, cores=False):
"""
Synthesize for a set of marginal geographies via multiprocessing
Parameters
----------
hh_marg : pandas.DataFrame
processed and properly indexed household marginals table
p_marg : pandas.DataFrame
processed and properly indexed person marginals table
hh_sample : pandas.DataFrame
household sample table
p_sample : pandas.DataFrame
person sample table
xwalk : list of tuples
list of marginal-to-sample geography crosswalks to iterate over
cores : integer, optional
number of cores to use in the multiprocessing pool. defaults to
multiprocessing.cpu_count() - 1
Returns
-------
all_households : pandas.DataFrame
synthesized household records
all_persons : pandas.DataFrame
synthesized person records
all_stats : pandas.DataFrame
chi-square and p-score values for each marginal geography drawn
"""
cores = cores if cores else (multiprocessing.cpu_count()-1)
part = partial(synthesize_zone, hh_marg, p_marg, hh_sample, p_sample)
p = multiprocessing.Pool(cores)
results = p.map(part, list(xwalk))
p.close()
p.join()
hh_list = [result[0] for result in results]
people_list = [result[1] for result in results]
all_stats = pd.DataFrame([result[2] for result in results])
all_households = pd.concat(hh_list)
all_persons = pd.concat(people_list)
all_households, all_persons = synch_hhids(all_households, all_persons)
return all_persons, all_households, all_stats
| {
"repo_name": "UDST/synthpop",
"path": "synthpop/zone_synthesizer.py",
"copies": "2",
"size": "7532",
"license": "bsd-3-clause",
"hash": -5054512268951539000,
"line_mean": 34.8666666667,
"line_max": 79,
"alpha_frac": 0.6549389272,
"autogenerated": false,
"ratio": 3.7012285012285013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 210
} |
from functools import partial
import numpy as np
from codim1.core import *
def test_quadratic_no_fnc_fail():
m = simple_line_mesh(2, (-1.0, 0.0), (1.0, 0.0))
try:
lm = PolynomialMapping(m.elements[0], 2)
assert(False)
except:
pass
def test_coeffs():
m = simple_line_mesh(4)
sine_boundary_func = lambda x: (x, np.sin(x))
lm = PolynomialMapping(m.elements[2], 2, sine_boundary_func)
assert(lm.coefficients[0, 1] == 0.25)
assert(lm.coefficients[1, 1] == np.sin(0.25))
def test_get_phys_pts():
m = simple_line_mesh(4)
sine_boundary_func = lambda x: (x, np.sin(x))
lm = PolynomialMapping(m.elements[2], 2, sine_boundary_func)
# Element 2 should lie from 0 to 0.5
pts = lm.get_physical_point(0.0)
np.testing.assert_almost_equal(pts[0], 0.0)
np.testing.assert_almost_equal(pts[1], 0.0)
pts = lm.get_physical_point(0.5)
np.testing.assert_almost_equal(pts[0], 0.25)
np.testing.assert_almost_equal(pts[1], np.sin(0.25))
pts = lm.get_physical_point(1.0)
np.testing.assert_almost_equal(pts[0], 0.5)
np.testing.assert_almost_equal(pts[1], 0.0)
def test_higher_order_coeff_gen():
quad_map_gen = partial(PolynomialMapping, degree = 2)
m = circular_mesh(2, 1.0, quad_map_gen)
coeffs_exact = np.array([[1.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
np.testing.assert_almost_equal(m.elements[0].mapping.coefficients,
coeffs_exact)
def test_higher_order_phys_pt():
quad_map_gen = partial(PolynomialMapping, degree = 2)
m = circular_mesh(2, 1.0, quad_map_gen)
phys_pt = m.elements[0].mapping.get_physical_point(0.5)
np.testing.assert_almost_equal(phys_pt, (0.0, 1.0))
phys_pt = m.elements[0].mapping.get_physical_point(0.25)
np.testing.assert_almost_equal(phys_pt, (0.5, 0.75))
phys_pt = m.elements[0].mapping.get_physical_point(0.75)
np.testing.assert_almost_equal(phys_pt, (-0.5, 0.75))
def test_higher_order_jacobian():
quad_map_gen = partial(PolynomialMapping, degree = 2)
m = circular_mesh(2, 1.0, quad_map_gen)
x_hat = np.linspace(0, 1, 100)
jacobian = m.elements[0].mapping.get_jacobian(0.5)
np.testing.assert_almost_equal(jacobian, 2.0)
def test_higher_order_normal():
quad_map_gen = partial(PolynomialMapping, degree = 2)
m = circular_mesh(2, 1.0, quad_map_gen)
x_hat = np.linspace(0, 1, 100)
normal = m.elements[0].mapping.get_normal(0.5)
np.testing.assert_almost_equal(normal, (0.0, -1.0))
| {
"repo_name": "tbenthompson/codim1",
"path": "test/test_poly_mapping.py",
"copies": "1",
"size": "2541",
"license": "mit",
"hash": 7252302733068035000,
"line_mean": 35.8260869565,
"line_max": 70,
"alpha_frac": 0.6328217237,
"autogenerated": false,
"ratio": 2.7205567451820127,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8819071789871593,
"avg_score": 0.006861335802084082,
"num_lines": 69
} |
from functools import partial
import numpy as np
from gym.spaces import Box, Dict, Tuple
from scipy.stats import beta, norm
import tree
import unittest
from ray.rllib.models.tf.tf_action_dist import Beta, Categorical, \
DiagGaussian, GumbelSoftmax, MultiActionDistribution, MultiCategorical, \
SquashedGaussian
from ray.rllib.models.torch.torch_action_dist import TorchBeta, \
TorchCategorical, TorchDiagGaussian, TorchMultiActionDistribution, \
TorchMultiCategorical, TorchSquashedGaussian
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT, \
softmax, SMALL_NUMBER, LARGE_INTEGER
from ray.rllib.utils.test_utils import check, framework_iterator
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class TestDistributions(unittest.TestCase):
"""Tests ActionDistribution classes."""
def _stability_test(self,
distribution_cls,
network_output_shape,
fw,
sess=None,
bounds=None,
extra_kwargs=None):
extreme_values = [
0.0,
float(LARGE_INTEGER),
-float(LARGE_INTEGER),
1.1e-34,
1.1e34,
-1.1e-34,
-1.1e34,
SMALL_NUMBER,
-SMALL_NUMBER,
]
inputs = np.zeros(shape=network_output_shape, dtype=np.float32)
for batch_item in range(network_output_shape[0]):
for num in range(len(inputs[batch_item]) // 2):
inputs[batch_item][num] = np.random.choice(extreme_values)
else:
# For Gaussians, the second half of the vector is
# log standard deviations, and should therefore be
# the log of a positive number >= 1.
inputs[batch_item][num] = np.log(
max(1, np.random.choice((extreme_values))))
dist = distribution_cls(inputs, {}, **(extra_kwargs or {}))
for _ in range(100):
sample = dist.sample()
if fw != "tf":
sample_check = sample.numpy()
else:
sample_check = sess.run(sample)
assert not np.any(np.isnan(sample_check))
assert np.all(np.isfinite(sample_check))
if bounds:
assert np.min(sample_check) >= bounds[0]
assert np.max(sample_check) <= bounds[1]
# Make sure bounds make sense and are actually also being
# sampled.
if isinstance(bounds[0], int):
assert isinstance(bounds[1], int)
assert bounds[0] in sample_check
assert bounds[1] in sample_check
logp = dist.logp(sample)
if fw != "tf":
logp_check = logp.numpy()
else:
logp_check = sess.run(logp)
assert not np.any(np.isnan(logp_check))
assert np.all(np.isfinite(logp_check))
def test_categorical(self):
batch_size = 10000
num_categories = 4
# Create categorical distribution with n categories.
inputs_space = Box(-1.0, 2.0, shape=(batch_size, num_categories))
values_space = Box(
0, num_categories - 1, shape=(batch_size, ), dtype=np.int32)
inputs = inputs_space.sample()
for fw, sess in framework_iterator(session=True):
# Create the correct distribution object.
cls = Categorical if fw != "torch" else TorchCategorical
categorical = cls(inputs, {})
# Do a stability test using extreme NN outputs to see whether
# sampling and logp'ing result in NaN or +/-inf values.
self._stability_test(
cls,
inputs_space.shape,
fw=fw,
sess=sess,
bounds=(0, num_categories - 1))
# Batch of size=3 and deterministic (True).
expected = np.transpose(np.argmax(inputs, axis=-1))
# Sample, expect always max value
# (max likelihood for deterministic draw).
out = categorical.deterministic_sample()
check(out, expected)
# Batch of size=3 and non-deterministic -> expect roughly the mean.
out = categorical.sample()
check(
tf.reduce_mean(out)
if fw != "torch" else torch.mean(out.float()),
1.0,
decimals=0)
# Test log-likelihood outputs.
probs = softmax(inputs)
values = values_space.sample()
out = categorical.logp(values
if fw != "torch" else torch.Tensor(values))
expected = []
for i in range(batch_size):
expected.append(np.sum(np.log(np.array(probs[i][values[i]]))))
check(out, expected, decimals=4)
# Test entropy outputs.
out = categorical.entropy()
expected_entropy = -np.sum(probs * np.log(probs), -1)
check(out, expected_entropy)
def test_multi_categorical(self):
batch_size = 100
num_categories = 3
num_sub_distributions = 5
# Create 5 categorical distributions of 3 categories each.
inputs_space = Box(
-1.0,
2.0,
shape=(batch_size, num_sub_distributions * num_categories))
values_space = Box(
0,
num_categories - 1,
shape=(num_sub_distributions, batch_size),
dtype=np.int32)
inputs = inputs_space.sample()
input_lengths = [num_categories] * num_sub_distributions
inputs_split = np.split(inputs, num_sub_distributions, axis=1)
for fw, sess in framework_iterator(session=True):
# Create the correct distribution object.
cls = MultiCategorical if fw != "torch" else TorchMultiCategorical
multi_categorical = cls(inputs, None, input_lengths)
# Do a stability test using extreme NN outputs to see whether
# sampling and logp'ing result in NaN or +/-inf values.
self._stability_test(
cls,
inputs_space.shape,
fw=fw,
sess=sess,
bounds=(0, num_categories - 1),
extra_kwargs={"input_lens": input_lengths})
# Batch of size=3 and deterministic (True).
expected = np.transpose(np.argmax(inputs_split, axis=-1))
# Sample, expect always max value
# (max likelihood for deterministic draw).
out = multi_categorical.deterministic_sample()
check(out, expected)
# Batch of size=3 and non-deterministic -> expect roughly the mean.
out = multi_categorical.sample()
check(
tf.reduce_mean(out)
if fw != "torch" else torch.mean(out.float()),
1.0,
decimals=0)
# Test log-likelihood outputs.
probs = softmax(inputs_split)
values = values_space.sample()
out = multi_categorical.logp(values if fw != "torch" else [
torch.Tensor(values[i]) for i in range(num_sub_distributions)
]) # v in np.stack(values, 1)])
expected = []
for i in range(batch_size):
expected.append(
np.sum(
np.log(
np.array([
probs[j][i][values[j][i]]
for j in range(num_sub_distributions)
]))))
check(out, expected, decimals=4)
# Test entropy outputs.
out = multi_categorical.entropy()
expected_entropy = -np.sum(np.sum(probs * np.log(probs), 0), -1)
check(out, expected_entropy)
def test_squashed_gaussian(self):
"""Tests the SquashedGaussian ActionDistribution for all frameworks."""
input_space = Box(-2.0, 2.0, shape=(2000, 10))
low, high = -2.0, 1.0
for fw, sess in framework_iterator(
frameworks=("torch", "tf", "tfe"), session=True):
cls = SquashedGaussian if fw != "torch" else TorchSquashedGaussian
# Do a stability test using extreme NN outputs to see whether
# sampling and logp'ing result in NaN or +/-inf values.
self._stability_test(
cls, input_space.shape, fw=fw, sess=sess, bounds=(low, high))
# Batch of size=n and deterministic.
inputs = input_space.sample()
means, _ = np.split(inputs, 2, axis=-1)
squashed_distribution = cls(inputs, {}, low=low, high=high)
expected = ((np.tanh(means) + 1.0) / 2.0) * (high - low) + low
# Sample n times, expect always mean value (deterministic draw).
out = squashed_distribution.deterministic_sample()
check(out, expected)
# Batch of size=n and non-deterministic -> expect roughly the mean.
inputs = input_space.sample()
means, log_stds = np.split(inputs, 2, axis=-1)
squashed_distribution = cls(inputs, {}, low=low, high=high)
expected = ((np.tanh(means) + 1.0) / 2.0) * (high - low) + low
values = squashed_distribution.sample()
if sess:
values = sess.run(values)
else:
values = values.numpy()
self.assertTrue(np.max(values) <= high)
self.assertTrue(np.min(values) >= low)
check(np.mean(values), expected.mean(), decimals=1)
# Test log-likelihood outputs.
sampled_action_logp = squashed_distribution.logp(
values if fw != "torch" else torch.Tensor(values))
if sess:
sampled_action_logp = sess.run(sampled_action_logp)
else:
sampled_action_logp = sampled_action_logp.numpy()
# Convert to parameters for distr.
stds = np.exp(
np.clip(log_stds, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT))
# Unsquash values, then get log-llh from regular gaussian.
# atanh_in = np.clip((values - low) / (high - low) * 2.0 - 1.0,
# -1.0 + SMALL_NUMBER, 1.0 - SMALL_NUMBER)
normed_values = (values - low) / (high - low) * 2.0 - 1.0
save_normed_values = np.clip(normed_values, -1.0 + SMALL_NUMBER,
1.0 - SMALL_NUMBER)
unsquashed_values = np.arctanh(save_normed_values)
log_prob_unsquashed = np.sum(
np.log(norm.pdf(unsquashed_values, means, stds)), -1)
log_prob = log_prob_unsquashed - \
np.sum(np.log(1 - np.tanh(unsquashed_values) ** 2),
axis=-1)
check(np.sum(sampled_action_logp), np.sum(log_prob), rtol=0.05)
# NN output.
means = np.array([[0.1, 0.2, 0.3, 0.4, 50.0],
[-0.1, -0.2, -0.3, -0.4, -1.0]])
log_stds = np.array([[0.8, -0.2, 0.3, -1.0, 2.0],
[0.7, -0.3, 0.4, -0.9, 2.0]])
squashed_distribution = cls(
inputs=np.concatenate([means, log_stds], axis=-1),
model={},
low=low,
high=high)
# Convert to parameters for distr.
stds = np.exp(log_stds)
# Values to get log-likelihoods for.
values = np.array([[0.9, 0.2, 0.4, -0.1, -1.05],
[-0.9, -0.2, 0.4, -0.1, -1.05]])
# Unsquash values, then get log-llh from regular gaussian.
unsquashed_values = np.arctanh((values - low) /
(high - low) * 2.0 - 1.0)
log_prob_unsquashed = \
np.sum(np.log(norm.pdf(unsquashed_values, means, stds)), -1)
log_prob = log_prob_unsquashed - \
np.sum(np.log(1 - np.tanh(unsquashed_values) ** 2),
axis=-1)
outs = squashed_distribution.logp(values if fw != "torch" else
torch.Tensor(values))
if sess:
outs = sess.run(outs)
check(outs, log_prob, decimals=4)
def test_diag_gaussian(self):
"""Tests the DiagGaussian ActionDistribution for all frameworks."""
input_space = Box(-2.0, 1.0, shape=(2000, 10))
for fw, sess in framework_iterator(
frameworks=("torch", "tf", "tfe"), session=True):
cls = DiagGaussian if fw != "torch" else TorchDiagGaussian
# Do a stability test using extreme NN outputs to see whether
# sampling and logp'ing result in NaN or +/-inf values.
self._stability_test(cls, input_space.shape, fw=fw, sess=sess)
# Batch of size=n and deterministic.
inputs = input_space.sample()
means, _ = np.split(inputs, 2, axis=-1)
diag_distribution = cls(inputs, {})
expected = means
# Sample n times, expect always mean value (deterministic draw).
out = diag_distribution.deterministic_sample()
check(out, expected)
# Batch of size=n and non-deterministic -> expect roughly the mean.
inputs = input_space.sample()
means, log_stds = np.split(inputs, 2, axis=-1)
diag_distribution = cls(inputs, {})
expected = means
values = diag_distribution.sample()
if sess:
values = sess.run(values)
else:
values = values.numpy()
check(np.mean(values), expected.mean(), decimals=1)
# Test log-likelihood outputs.
sampled_action_logp = diag_distribution.logp(
values if fw != "torch" else torch.Tensor(values))
if sess:
sampled_action_logp = sess.run(sampled_action_logp)
else:
sampled_action_logp = sampled_action_logp.numpy()
# NN output.
means = np.array(
[[0.1, 0.2, 0.3, 0.4, 50.0], [-0.1, -0.2, -0.3, -0.4, -1.0]],
dtype=np.float32)
log_stds = np.array(
[[0.8, -0.2, 0.3, -1.0, 2.0], [0.7, -0.3, 0.4, -0.9, 2.0]],
dtype=np.float32)
diag_distribution = cls(
inputs=np.concatenate([means, log_stds], axis=-1), model={})
# Convert to parameters for distr.
stds = np.exp(log_stds)
# Values to get log-likelihoods for.
values = np.array([[0.9, 0.2, 0.4, -0.1, -1.05],
[-0.9, -0.2, 0.4, -0.1, -1.05]])
# get log-llh from regular gaussian.
log_prob = np.sum(np.log(norm.pdf(values, means, stds)), -1)
outs = diag_distribution.logp(values if fw != "torch" else
torch.Tensor(values))
if sess:
outs = sess.run(outs)
check(outs, log_prob, decimals=4)
def test_beta(self):
input_space = Box(-2.0, 1.0, shape=(2000, 10))
low, high = -1.0, 2.0
plain_beta_value_space = Box(0.0, 1.0, shape=(2000, 5))
for fw, sess in framework_iterator(session=True):
cls = TorchBeta if fw == "torch" else Beta
inputs = input_space.sample()
beta_distribution = cls(inputs, {}, low=low, high=high)
inputs = beta_distribution.inputs
if sess:
inputs = sess.run(inputs)
else:
inputs = inputs.numpy()
alpha, beta_ = np.split(inputs, 2, axis=-1)
# Mean for a Beta distribution: 1 / [1 + (beta/alpha)]
expected = (1.0 / (1.0 + beta_ / alpha)) * (high - low) + low
# Sample n times, expect always mean value (deterministic draw).
out = beta_distribution.deterministic_sample()
check(out, expected, rtol=0.01)
# Batch of size=n and non-deterministic -> expect roughly the mean.
values = beta_distribution.sample()
if sess:
values = sess.run(values)
else:
values = values.numpy()
self.assertTrue(np.max(values) <= high)
self.assertTrue(np.min(values) >= low)
check(np.mean(values), expected.mean(), decimals=1)
# Test log-likelihood outputs (against scipy).
inputs = input_space.sample()
beta_distribution = cls(inputs, {}, low=low, high=high)
inputs = beta_distribution.inputs
if sess:
inputs = sess.run(inputs)
else:
inputs = inputs.numpy()
alpha, beta_ = np.split(inputs, 2, axis=-1)
values = plain_beta_value_space.sample()
values_scaled = values * (high - low) + low
if fw == "torch":
values_scaled = torch.Tensor(values_scaled)
out = beta_distribution.logp(values_scaled)
check(
out,
np.sum(np.log(beta.pdf(values, alpha, beta_)), -1),
rtol=0.01)
# TODO(sven): Test entropy outputs (against scipy).
def test_gumbel_softmax(self):
"""Tests the GumbelSoftmax ActionDistribution (tf + eager only)."""
for fw, sess in framework_iterator(
frameworks=["tf", "tfe"], session=True):
batch_size = 1000
num_categories = 5
input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories))
# Batch of size=n and deterministic.
inputs = input_space.sample()
gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0)
expected = softmax(inputs)
# Sample n times, expect always mean value (deterministic draw).
out = gumbel_softmax.deterministic_sample()
check(out, expected)
# Batch of size=n and non-deterministic -> expect roughly that
# the max-likelihood (argmax) ints are output (most of the time).
inputs = input_space.sample()
gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0)
expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32)
outs = gumbel_softmax.sample()
if sess:
outs = sess.run(outs)
check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08)
def test_multi_action_distribution(self):
"""Tests the MultiActionDistribution (across all frameworks)."""
batch_size = 1000
input_space = Tuple([
Box(-10.0, 10.0, shape=(batch_size, 4)),
Box(-2.0, 2.0, shape=(
batch_size,
6,
)),
Dict({
"a": Box(-1.0, 1.0, shape=(batch_size, 4))
}),
])
std_space = Box(
-0.05, 0.05, shape=(
batch_size,
3,
))
low, high = -1.0, 1.0
value_space = Tuple([
Box(0, 3, shape=(batch_size, ), dtype=np.int32),
Box(-2.0, 2.0, shape=(batch_size, 3), dtype=np.float32),
Dict({
"a": Box(0.0, 1.0, shape=(batch_size, 2), dtype=np.float32)
})
])
for fw, sess in framework_iterator(session=True):
if fw == "torch":
cls = TorchMultiActionDistribution
child_distr_cls = [
TorchCategorical, TorchDiagGaussian,
partial(TorchBeta, low=low, high=high)
]
else:
cls = MultiActionDistribution
child_distr_cls = [
Categorical,
DiagGaussian,
partial(Beta, low=low, high=high),
]
inputs = list(input_space.sample())
distr = cls(
np.concatenate([inputs[0], inputs[1], inputs[2]["a"]], axis=1),
model={},
action_space=value_space,
child_distributions=child_distr_cls,
input_lens=[4, 6, 4])
# Adjust inputs for the Beta distr just as Beta itself does.
inputs[2]["a"] = np.clip(inputs[2]["a"], np.log(SMALL_NUMBER),
-np.log(SMALL_NUMBER))
inputs[2]["a"] = np.log(np.exp(inputs[2]["a"]) + 1.0) + 1.0
# Sample deterministically.
expected_det = [
np.argmax(inputs[0], axis=-1),
inputs[1][:, :3], # [:3]=Mean values.
# Mean for a Beta distribution:
# 1 / [1 + (beta/alpha)] * range + low
(1.0 / (1.0 + inputs[2]["a"][:, 2:] / inputs[2]["a"][:, 0:2]))
* (high - low) + low,
]
out = distr.deterministic_sample()
if sess:
out = sess.run(out)
check(out[0], expected_det[0])
check(out[1], expected_det[1])
check(out[2]["a"], expected_det[2])
# Stochastic sampling -> expect roughly the mean.
inputs = list(input_space.sample())
# Fix categorical inputs (not needed for distribution itself, but
# for our expectation calculations).
inputs[0] = softmax(inputs[0], -1)
# Fix std inputs (shouldn't be too large for this test).
inputs[1][:, 3:] = std_space.sample()
# Adjust inputs for the Beta distr just as Beta itself does.
inputs[2]["a"] = np.clip(inputs[2]["a"], np.log(SMALL_NUMBER),
-np.log(SMALL_NUMBER))
inputs[2]["a"] = np.log(np.exp(inputs[2]["a"]) + 1.0) + 1.0
distr = cls(
np.concatenate([inputs[0], inputs[1], inputs[2]["a"]], axis=1),
model={},
action_space=value_space,
child_distributions=child_distr_cls,
input_lens=[4, 6, 4])
expected_mean = [
np.mean(np.sum(inputs[0] * np.array([0, 1, 2, 3]), -1)),
inputs[1][:, :3], # [:3]=Mean values.
# Mean for a Beta distribution:
# 1 / [1 + (beta/alpha)] * range + low
(1.0 / (1.0 + inputs[2]["a"][:, 2:] / inputs[2]["a"][:, :2])) *
(high - low) + low,
]
out = distr.sample()
if sess:
out = sess.run(out)
out = list(out)
if fw == "torch":
out[0] = out[0].numpy()
out[1] = out[1].numpy()
out[2]["a"] = out[2]["a"].numpy()
check(np.mean(out[0]), expected_mean[0], decimals=1)
check(np.mean(out[1], 0), np.mean(expected_mean[1], 0), decimals=1)
check(
np.mean(out[2]["a"], 0),
np.mean(expected_mean[2], 0),
decimals=1)
# Test log-likelihood outputs.
# Make sure beta-values are within 0.0 and 1.0 for the numpy
# calculation (which doesn't have scaling).
inputs = list(input_space.sample())
# Adjust inputs for the Beta distr just as Beta itself does.
inputs[2]["a"] = np.clip(inputs[2]["a"], np.log(SMALL_NUMBER),
-np.log(SMALL_NUMBER))
inputs[2]["a"] = np.log(np.exp(inputs[2]["a"]) + 1.0) + 1.0
distr = cls(
np.concatenate([inputs[0], inputs[1], inputs[2]["a"]], axis=1),
model={},
action_space=value_space,
child_distributions=child_distr_cls,
input_lens=[4, 6, 4])
inputs[0] = softmax(inputs[0], -1)
values = list(value_space.sample())
log_prob_beta = np.log(
beta.pdf(values[2]["a"], inputs[2]["a"][:, :2],
inputs[2]["a"][:, 2:]))
# Now do the up-scaling for [2] (beta values) to be between
# low/high.
values[2]["a"] = values[2]["a"] * (high - low) + low
inputs[1][:, 3:] = np.exp(inputs[1][:, 3:])
expected_log_llh = np.sum(
np.concatenate([
np.expand_dims(
np.log(
[i[values[0][j]]
for j, i in enumerate(inputs[0])]), -1),
np.log(
norm.pdf(values[1], inputs[1][:, :3],
inputs[1][:, 3:])), log_prob_beta
], -1), -1)
values[0] = np.expand_dims(values[0], -1)
if fw == "torch":
values = tree.map_structure(lambda s: torch.Tensor(s), values)
# Test all flattened input.
concat = np.concatenate(tree.flatten(values),
-1).astype(np.float32)
out = distr.logp(concat)
if sess:
out = sess.run(out)
check(out, expected_log_llh, atol=15)
# Test structured input.
out = distr.logp(values)
if sess:
out = sess.run(out)
check(out, expected_log_llh, atol=15)
# Test flattened input.
out = distr.logp(tree.flatten(values))
if sess:
out = sess.run(out)
check(out, expected_log_llh, atol=15)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_name": "robertnishihara/ray",
"path": "rllib/models/tests/test_distributions.py",
"copies": "1",
"size": "26060",
"license": "apache-2.0",
"hash": 10524478258828692,
"line_mean": 41.0322580645,
"line_max": 79,
"alpha_frac": 0.5,
"autogenerated": false,
"ratio": 3.934772761588404,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4934772761588404,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from operator import itemgetter
import os
import subprocess
import yaml
def RunCommand(command):
command = "".join(command)
print "[RUNNING COMMAND]: ", command
p = subprocess.Popen(command, shell=True, executable='/bin/bash')
stdout, stderr = p.communicate()
return (stdout, p.returncode)
def RunCommandInWorkspace(workspace, command):
if isinstance(command, list):
command = "".join(command)
init_script = os.path.join(workspace, "devel/setup.sh")
return RunCommand("source " + init_script + "; " + command)
def RunCommandInWorkingDirectory(working_dir, workspace, command):
assert os.path.isdir(working_dir)
if isinstance(command, list):
command = "".join(command)
chdir_cmd = "cd " + working_dir + ";"
return RunCommandInWorkspace(workspace, chdir_cmd + command)
def GetBuildSpaceForPackage(workspace, package_name):
package_build_path = os.path.join(workspace, "build")
package_build_path = os.path.join(package_build_path, package_name)
return package_build_path
def RunAllBenchmarksOfPackage(workspace, package_name):
package_build_path = GetBuildSpaceForPackage(workspace, package_name)
command = "cd " + package_build_path + "; make run_benchmarks"
init_script = os.path.join(workspace, "devel/setup.sh")
return RunCommand("source " + init_script + "; " + command)
def GetAllBenchmarkingResultsOfPackage(workspace, package):
package_build_space = GetBuildSpaceForPackage(workspace, package)
benchmarking_result_files = list()
for root, dirnames, filenames in os.walk(package_build_space, followlinks=True):
for filename in filenames:
kResultPrefix = "benchmark-"
if filename.startswith(kResultPrefix):
full_path = os.path.abspath(root)
benchmarking_result_files.append(os.path.join(full_path, filename))
return list(set(benchmarking_result_files))
def UnitToScaler(unit_string):
if unit_string == "ns":
return 1.0e-9
elif unit_string == "us":
return 1.0e-6
elif unit_string == "ms":
return 1.0e-3
elif unit_string == "s":
return 1.0
assert(false, "Unhandled unit: " + unit_string) | {
"repo_name": "mfehr/voxblox",
"path": "htwfsc_benchmarks/python/htwfsc_benchmarks/helpers.py",
"copies": "1",
"size": "2198",
"license": "bsd-3-clause",
"hash": 4670776960359328000,
"line_mean": 35.0491803279,
"line_max": 82,
"alpha_frac": 0.7101910828,
"autogenerated": false,
"ratio": 3.6151315789473686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9675073245113155,
"avg_score": 0.030049883326842705,
"num_lines": 61
} |
from functools import partial
import numpy as np
from pandas import pandas as pd, DataFrame
from bs4 import BeautifulSoup
import requests
from util.utils import check_cached
wthr = ('https://www.wunderground.com/history/airport/KPDK/{yr}/{m}/1/'
'MonthlyHistory.html?req_city=Alpharetta&req_state=GA&'
'req_statename=Georgia&reqdb.zip=30022&reqdb.magic=1&reqdb.wmo=99999')
def clean_string_srs(s, nulls=['-']):
"""Some Series are strings bc of weird nullish values.
This converts the srs to float
"""
bm = s.isin(nulls)
s2 = s.copy()
s2.loc[bm] = np.nan
return s2.astype(float)
def edit_html_table(tab: 'html') -> 'html':
"""Source has messed up table format;
body in each row, 2 cols.
"""
t = BeautifulSoup(str(tab), "lxml")
orig_head = t.select_one('thead')
new_head = t.select_one('tbody')
orig_head.tr.replace_with(new_head.tr)
for body in t.find_all('tbody'):
body.replace_with_children()
return t
def check_max_min(df, msg='Uh oh'):
"""Df columns has 2 levels; second
level has 'Hi' and 'Lo'. This ensures
that they are max/min of the level df.
"""
df = df.assign(
Max=lambda x: x.max(axis=1),
Min=lambda x: x.min(axis=1),
) #
samehi = df.Max == df.Hi
assert samehi.all(), '{}\n\n{}'.format(msg, df[~samehi])
samelo = df.Min == df.Lo
assert samelo.all(), '{}\n\n{}'.format(msg, df[~samelo])
# assert (df.Min == df.Lo).all(), msg
def check_max_min_df(df):
for c in {c for c, _ in df}:
# print(c)
check_max_min(df[c], msg='Bad vals in {}: \n\n{}'.format(c, df[c]))
def process_wtrdf(df, fixer=None):
lvl_1_cols = 'Temp Dew Humidity Press Vis Wind Prec'.split()
lvl_2_cols = 'Hi Avg Lo'.split()
cols = pd.MultiIndex.from_product([lvl_1_cols, lvl_2_cols])
global wsub
# Get subset of df w/ max/min/avg
wsub = df.iloc[:, :18].copy()
wsub.columns = cols[:18]
# Fix null values
str_cols = wsub.columns[wsub.dtypes == object]
if len(str_cols):
wsub[str_cols] = wsub[str_cols].apply(clean_string_srs)
# Reorder cols for Wind
wind = wsub['Wind'].copy()
wind.columns = 'Avg Lo Hi'.split()
wsub['Wind'] = wind
# Fix and check data
if fixer is not None:
fixer(wsub)
check_max_min_df(wsub.dropna(axis=0))
# Add other cols
wsub[('Prec', '')] = df['sum']
wsub[('Event', '')] = df.iloc[:, -1]
return wsub
def wtr_page2df(content: str, fixer=None) -> DataFrame:
global df_orig
soup = BeautifulSoup(content, "lxml")
tab_orig = soup.select('table.daily')[0]
tab = edit_html_table(tab_orig)
df_orig = pd.read_html(str(tab), header=0, index_col=0)[0]
df = process_wtrdf(df_orig.copy(), fixer=fixer)
return df
def fix_df_exceptions(df, yr, m):
if (yr, m) == (2013, 9):
df.loc[17, ('Wind', 'Hi')] = np.nan
elif (yr, m) == (2014, 7):
df.loc[13, ('Wind', 'Hi')] = np.nan
elif (yr, m) == (2014, 12):
df.loc[2, ('Wind', 'Hi')] = np.nan
df.loc[2, ('Wind', 'Avg')] = np.nan
def wtr_date2df(yr, month):
u = wthr.format(yr=yr, m=month)
r = requests.get(u)
return wtr_page2df(r.content, fixer=partial(fix_df_exceptions, yr=yr, m=month))
@check_cached(.5)
def add_dates(df, yr=None, m=None):
df.index.name = None
return df.assign(Yr=yr, M=m).reset_index(drop=0).rename(columns={'index': 'Day'})
def wthr_data(yrmths, wtr_date2df=None):
assert wtr_date2df
wtr_dfs = [
add_dates(wtr_date2df(yr, m), yr=yr, m=m)
for yr, m in yrmths
]
wtr_df = pd.concat(wtr_dfs, ignore_index=True)
return wtr_df
| {
"repo_name": "d10genes/pollen",
"path": "util/wthr_utils.py",
"copies": "1",
"size": "3690",
"license": "mit",
"hash": -772470176764385900,
"line_mean": 27.1679389313,
"line_max": 85,
"alpha_frac": 0.5953929539,
"autogenerated": false,
"ratio": 2.7765237020316027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38719166559316026,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from scipy.sparse import bsr_matrix
from menpo.base import name_of_callable
from menpo.math import as_matrix
from menpo.shape import UndirectedGraph
from menpo.visualize import print_progress, bytes_str, print_dynamic
def _covariance_matrix_inverse(cov_mat, n_components):
if n_components is None:
return np.linalg.inv(cov_mat)
else:
try:
s, v, d = np.linalg.svd(cov_mat)
s = s[:, :n_components]
v = v[:n_components]
d = d[:n_components, :]
return s.dot(np.diag(1 / v)).dot(d)
except:
return np.linalg.inv(cov_mat)
def _create_sparse_precision(
X,
graph,
n_features,
n_features_per_vertex,
mode="concatenation",
dtype=np.float32,
n_components=None,
bias=0,
return_covariances=False,
verbose=False,
):
# check mode argument
if mode not in ["concatenation", "subtraction"]:
raise ValueError(
"mode must be either ''concatenation'' "
"or ''subtraction''; {} is given.".format(mode)
)
# Initialize arrays
all_blocks = np.zeros(
(graph.n_edges * 4, n_features_per_vertex, n_features_per_vertex), dtype=dtype
)
if return_covariances:
if mode == "concatenation":
cov_shape = (
graph.n_edges,
2 * n_features_per_vertex,
2 * n_features_per_vertex,
)
else:
cov_shape = (graph.n_edges, n_features_per_vertex, n_features_per_vertex)
all_covariances = np.zeros(cov_shape, dtype=dtype)
columns = np.zeros(graph.n_edges * 4)
rows = np.zeros(graph.n_edges * 4)
# Print information if asked
if verbose:
edges = print_progress(
range(graph.n_edges),
n_items=graph.n_edges,
prefix="Precision per edge",
end_with_newline=False,
)
else:
edges = range(graph.n_edges)
# Compute covariance matrix for each edge, invert it and store it
count = -1
for e in edges:
# edge vertices
v1 = graph.edges[e, 0]
v2 = graph.edges[e, 1]
# find indices in data matrix
v1_from = v1 * n_features_per_vertex
v1_to = (v1 + 1) * n_features_per_vertex
v2_from = v2 * n_features_per_vertex
v2_to = (v2 + 1) * n_features_per_vertex
# data concatenation
if mode == "concatenation":
edge_data = X[:, list(range(v1_from, v1_to)) + list(range(v2_from, v2_to))]
else:
edge_data = X[:, v1_from:v1_to] - X[:, v2_from:v2_to]
# compute covariance matrix
covmat = np.cov(edge_data, rowvar=0, bias=bias)
if return_covariances:
all_covariances[e] = covmat
# invert it
covmat = _covariance_matrix_inverse(covmat, n_components)
# store it
if mode == "concatenation":
# v1, v1
count += 1
all_blocks[count] = covmat[:n_features_per_vertex, :n_features_per_vertex]
rows[count] = v1
columns[count] = v1
# v2, v2
count += 1
all_blocks[count] = covmat[n_features_per_vertex::, n_features_per_vertex::]
rows[count] = v2
columns[count] = v2
# v1, v2
count += 1
all_blocks[count] = covmat[:n_features_per_vertex, n_features_per_vertex::]
rows[count] = v1
columns[count] = v2
# v2, v1
count += 1
all_blocks[count] = covmat[n_features_per_vertex::, :n_features_per_vertex]
rows[count] = v2
columns[count] = v1
else:
# v1, v1
count += 1
all_blocks[count] = covmat
rows[count] = v1
columns[count] = v1
# v2, v2
count += 1
all_blocks[count] = covmat
rows[count] = v2
columns[count] = v2
# v1, v2
count += 1
all_blocks[count] = -covmat
rows[count] = v1
columns[count] = v2
# v2, v1
count += 1
all_blocks[count] = -covmat
rows[count] = v2
columns[count] = v1
# sort rows, columns and all_blocks
rows_arg_sort = rows.argsort()
columns = columns[rows_arg_sort]
all_blocks = all_blocks[rows_arg_sort]
rows = rows[rows_arg_sort]
# create indptr
n_rows = graph.n_vertices
indptr = np.zeros(n_rows + 1)
for i in range(n_rows):
(inds,) = np.where(rows == i)
if inds.size == 0:
indptr[i + 1] = indptr[i]
else:
indptr[i] = inds[0]
indptr[i + 1] = inds[-1] + 1
# create block sparse matrix
if return_covariances:
return (
bsr_matrix(
(all_blocks, columns, indptr),
shape=(n_features, n_features),
dtype=dtype,
),
all_covariances,
)
else:
return bsr_matrix(
(all_blocks, columns, indptr), shape=(n_features, n_features), dtype=dtype
)
def _create_dense_precision(
X,
graph,
n_features,
n_features_per_vertex,
mode="concatenation",
dtype=np.float32,
n_components=None,
bias=0,
return_covariances=False,
verbose=False,
):
# check mode argument
if mode not in ["concatenation", "subtraction"]:
raise ValueError(
"mode must be either ''concatenation'' "
"or ''subtraction''; {} is given.".format(mode)
)
# Initialize precision
precision = np.zeros((n_features, n_features), dtype=dtype)
if return_covariances:
if mode == "concatenation":
cov_shape = (
graph.n_edges,
2 * n_features_per_vertex,
2 * n_features_per_vertex,
)
else:
cov_shape = (graph.n_edges, n_features_per_vertex, n_features_per_vertex)
all_covariances = np.zeros(cov_shape, dtype=dtype)
# Print information if asked
if verbose:
print_dynamic(
"Allocated precision matrix of size {}".format(bytes_str(precision.nbytes))
)
edges = print_progress(
range(graph.n_edges),
n_items=graph.n_edges,
prefix="Precision per edge",
end_with_newline=False,
)
else:
edges = range(graph.n_edges)
# Compute covariance matrix for each edge, invert it and store it
for e in edges:
# edge vertices
v1 = graph.edges[e, 0]
v2 = graph.edges[e, 1]
# find indices in data matrix
v1_from = v1 * n_features_per_vertex
v1_to = (v1 + 1) * n_features_per_vertex
v2_from = v2 * n_features_per_vertex
v2_to = (v2 + 1) * n_features_per_vertex
# data concatenation
if mode == "concatenation":
edge_data = X[:, list(range(v1_from, v1_to)) + list(range(v2_from, v2_to))]
else:
edge_data = X[:, v1_from:v1_to] - X[:, v2_from:v2_to]
# compute covariance matrix
covmat = np.cov(edge_data, rowvar=0, bias=bias)
if return_covariances:
all_covariances[e] = covmat
# invert it
covmat = _covariance_matrix_inverse(covmat, n_components)
# store it
if mode == "concatenation":
# v1, v1
precision[v1_from:v1_to, v1_from:v1_to] += covmat[
:n_features_per_vertex, :n_features_per_vertex
]
# v2, v2
precision[v2_from:v2_to, v2_from:v2_to] += covmat[
n_features_per_vertex::, n_features_per_vertex::
]
# v1, v2
precision[v1_from:v1_to, v2_from:v2_to] = covmat[
:n_features_per_vertex, n_features_per_vertex::
]
# v2, v1
precision[v2_from:v2_to, v1_from:v1_to] = covmat[
n_features_per_vertex::, :n_features_per_vertex
]
elif mode == "subtraction":
# v1, v2
precision[v1_from:v1_to, v2_from:v2_to] = -covmat
# v2, v1
precision[v2_from:v2_to, v1_from:v1_to] = -covmat
# v1, v1
precision[v1_from:v1_to, v1_from:v1_to] += covmat
# v2, v2
precision[v2_from:v2_to, v2_from:v2_to] += covmat
# return covariances
if return_covariances:
return precision, all_covariances
else:
return precision
def _create_sparse_diagonal_precision(
X,
graph,
n_features,
n_features_per_vertex,
dtype=np.float32,
n_components=None,
bias=0,
return_covariances=False,
verbose=False,
):
# initialize covariances matrix
all_blocks = np.zeros(
(graph.n_vertices, n_features_per_vertex, n_features_per_vertex), dtype=dtype
)
if return_covariances:
all_covariances = np.zeros(
(graph.n_vertices, n_features_per_vertex, n_features_per_vertex),
dtype=dtype,
)
columns = np.zeros(graph.n_vertices)
rows = np.zeros(graph.n_vertices)
# Print information if asked
if verbose:
vertices = print_progress(
range(graph.n_vertices),
n_items=graph.n_vertices,
prefix="Precision per vertex",
end_with_newline=False,
)
else:
vertices = range(graph.n_vertices)
# Compute covariance matrix for each patch
for v in vertices:
# find indices in target precision matrix
i_from = v * n_features_per_vertex
i_to = (v + 1) * n_features_per_vertex
# compute covariance
covmat = np.cov(X[:, i_from:i_to], rowvar=0, bias=bias)
if return_covariances:
all_covariances[v] = covmat
# invert it
all_blocks[v] = _covariance_matrix_inverse(covmat, n_components)
# store the inverse covariance and its locations
rows[v] = v
columns[v] = v
# sort rows, columns and all_blocks
rows_arg_sort = rows.argsort()
columns = columns[rows_arg_sort]
all_blocks = all_blocks[rows_arg_sort]
rows = rows[rows_arg_sort]
# create indptr
n_rows = graph.n_vertices
indptr = np.zeros(n_rows + 1)
for i in range(n_rows):
(inds,) = np.where(rows == i)
if inds.size == 0:
indptr[i + 1] = indptr[i]
else:
indptr[i] = inds[0]
indptr[i + 1] = inds[-1] + 1
# create block sparse matrix
if return_covariances:
return (
bsr_matrix(
(all_blocks, columns, indptr),
shape=(n_features, n_features),
dtype=dtype,
),
all_covariances,
)
else:
return bsr_matrix(
(all_blocks, columns, indptr), shape=(n_features, n_features), dtype=dtype
)
def _create_dense_diagonal_precision(
X,
graph,
n_features,
n_features_per_vertex,
dtype=np.float32,
n_components=None,
bias=0,
return_covariances=False,
verbose=False,
):
# Initialize precision
precision = np.zeros((n_features, n_features), dtype=dtype)
if return_covariances:
all_covariances = np.zeros(
(graph.n_vertices, n_features_per_vertex, n_features_per_vertex),
dtype=dtype,
)
if verbose:
print_dynamic(
"Allocated precision matrix of size {}".format(bytes_str(precision.nbytes))
)
# Print information if asked
if verbose:
vertices = print_progress(
range(graph.n_vertices),
n_items=graph.n_vertices,
prefix="Precision per vertex",
end_with_newline=False,
)
else:
vertices = range(graph.n_vertices)
# Compute covariance matrix for each patch
for v in vertices:
# find indices in target precision matrix
i_from = v * n_features_per_vertex
i_to = (v + 1) * n_features_per_vertex
# compute covariance
covmat = np.cov(X[:, i_from:i_to], rowvar=0, bias=bias)
if return_covariances:
all_covariances[v] = covmat
# invert it
covmat = _covariance_matrix_inverse(covmat, n_components)
# insert to precision matrix
precision[i_from:i_to, i_from:i_to] = covmat
# return covariances
if return_covariances:
return precision, all_covariances
else:
return precision
def _increment_sparse_precision(
X,
mean_vector,
covariances,
n,
graph,
n_features,
n_features_per_vertex,
mode="concatenation",
dtype=np.float32,
n_components=None,
bias=0,
verbose=False,
):
# check mode argument
if mode not in ["concatenation", "subtraction"]:
raise ValueError(
"mode must be either ''concatenation'' "
"or ''subtraction''; {} is given.".format(mode)
)
# Initialize arrays
all_blocks = np.zeros(
(graph.n_edges * 4, n_features_per_vertex, n_features_per_vertex), dtype=dtype
)
columns = np.zeros(graph.n_edges * 4)
rows = np.zeros(graph.n_edges * 4)
# Print information if asked
if verbose:
edges = print_progress(
range(graph.n_edges),
n_items=graph.n_edges,
prefix="Precision per edge",
end_with_newline=False,
)
else:
edges = range(graph.n_edges)
# Compute covariance matrix for each edge, invert it and store it
count = -1
for e in edges:
# edge vertices
v1 = graph.edges[e, 0]
v2 = graph.edges[e, 1]
# find indices in data matrix
v1_from = v1 * n_features_per_vertex
v1_to = (v1 + 1) * n_features_per_vertex
v2_from = v2 * n_features_per_vertex
v2_to = (v2 + 1) * n_features_per_vertex
# data concatenation
if mode == "concatenation":
edge_data = X[:, list(range(v1_from, v1_to)) + list(range(v2_from, v2_to))]
m = mean_vector[list(range(v1_from, v1_to)) + list(range(v2_from, v2_to))]
else:
edge_data = X[:, v1_from:v1_to] - X[:, v2_from:v2_to]
m = mean_vector[v1_from:v1_to] - mean_vector[v2_from:v2_to]
# increment
_, covariances[e] = _increment_multivariate_gaussian_cov(
edge_data, m, covariances[e], n, bias=bias
)
# invert it
covmat = _covariance_matrix_inverse(covariances[e], n_components)
# store it
if mode == "concatenation":
# v1, v1
count += 1
all_blocks[count] = covmat[:n_features_per_vertex, :n_features_per_vertex]
rows[count] = v1
columns[count] = v1
# v2, v2
count += 1
all_blocks[count] = covmat[n_features_per_vertex::, n_features_per_vertex::]
rows[count] = v2
columns[count] = v2
# v1, v2
count += 1
all_blocks[count] = covmat[:n_features_per_vertex, n_features_per_vertex::]
rows[count] = v1
columns[count] = v2
# v2, v1
count += 1
all_blocks[count] = covmat[n_features_per_vertex::, :n_features_per_vertex]
rows[count] = v2
columns[count] = v1
else:
# v1, v1
count += 1
all_blocks[count] = covmat
rows[count] = v1
columns[count] = v1
# v2, v2
count += 1
all_blocks[count] = covmat
rows[count] = v2
columns[count] = v2
# v1, v2
count += 1
all_blocks[count] = -covmat
rows[count] = v1
columns[count] = v2
# v2, v1
count += 1
all_blocks[count] = -covmat
rows[count] = v2
columns[count] = v1
# sort rows, columns and all_blocks
rows_arg_sort = rows.argsort()
columns = columns[rows_arg_sort]
all_blocks = all_blocks[rows_arg_sort]
rows = rows[rows_arg_sort]
# create indptr
n_rows = graph.n_vertices
indptr = np.zeros(n_rows + 1)
for i in range(n_rows):
(inds,) = np.where(rows == i)
if inds.size == 0:
indptr[i + 1] = indptr[i]
else:
indptr[i] = inds[0]
indptr[i + 1] = inds[-1] + 1
# create block sparse matrix
return (
bsr_matrix(
(all_blocks, columns, indptr), shape=(n_features, n_features), dtype=dtype
),
covariances,
)
def _increment_dense_precision(
X,
mean_vector,
covariances,
n,
graph,
n_features,
n_features_per_vertex,
mode="concatenation",
dtype=np.float32,
n_components=None,
bias=0,
verbose=False,
):
# check mode argument
if mode not in ["concatenation", "subtraction"]:
raise ValueError(
"mode must be either ''concatenation'' "
"or ''subtraction''; {} is given.".format(mode)
)
# Initialize precision
precision = np.zeros((n_features, n_features), dtype=dtype)
# Print information if asked
if verbose:
print_dynamic(
"Allocated precision matrix of size {}".format(bytes_str(precision.nbytes))
)
edges = print_progress(
range(graph.n_edges),
n_items=graph.n_edges,
prefix="Precision per edge",
end_with_newline=False,
)
else:
edges = range(graph.n_edges)
# Compute covariance matrix for each edge, invert it and store it
for e in edges:
# edge vertices
v1 = graph.edges[e, 0]
v2 = graph.edges[e, 1]
# find indices in data matrix
v1_from = v1 * n_features_per_vertex
v1_to = (v1 + 1) * n_features_per_vertex
v2_from = v2 * n_features_per_vertex
v2_to = (v2 + 1) * n_features_per_vertex
# data concatenation
if mode == "concatenation":
edge_data = X[:, list(range(v1_from, v1_to)) + list(range(v2_from, v2_to))]
m = mean_vector[list(range(v1_from, v1_to)) + list(range(v2_from, v2_to))]
else:
edge_data = X[:, v1_from:v1_to] - X[:, v2_from:v2_to]
m = mean_vector[v1_from:v1_to] - mean_vector[v2_from:v2_to]
# increment
_, covariances[e] = _increment_multivariate_gaussian_cov(
edge_data, m, covariances[e], n, bias=bias
)
# invert it
covmat = _covariance_matrix_inverse(covariances[e], n_components)
# store it
if mode == "concatenation":
# v1, v1
precision[v1_from:v1_to, v1_from:v1_to] += covmat[
:n_features_per_vertex, :n_features_per_vertex
]
# v2, v2
precision[v2_from:v2_to, v2_from:v2_to] += covmat[
n_features_per_vertex::, n_features_per_vertex::
]
# v1, v2
precision[v1_from:v1_to, v2_from:v2_to] = covmat[
:n_features_per_vertex, n_features_per_vertex::
]
# v2, v1
precision[v2_from:v2_to, v1_from:v1_to] = covmat[
n_features_per_vertex::, :n_features_per_vertex
]
elif mode == "subtraction":
# v1, v2
precision[v1_from:v1_to, v2_from:v2_to] = -covmat
# v2, v1
precision[v2_from:v2_to, v1_from:v1_to] = -covmat
# v1, v1
precision[v1_from:v1_to, v1_from:v1_to] += covmat
# v2, v2
precision[v2_from:v2_to, v2_from:v2_to] += covmat
# return covariances
return precision, covariances
def _increment_sparse_diagonal_precision(
X,
mean_vector,
covariances,
n,
graph,
n_features,
n_features_per_vertex,
dtype=np.float32,
n_components=None,
bias=0,
verbose=False,
):
# initialize covariances matrix
all_blocks = np.zeros(
(graph.n_vertices, n_features_per_vertex, n_features_per_vertex), dtype=dtype
)
columns = np.zeros(graph.n_vertices)
rows = np.zeros(graph.n_vertices)
# Print information if asked
if verbose:
vertices = print_progress(
range(graph.n_vertices),
n_items=graph.n_vertices,
prefix="Precision per vertex",
end_with_newline=False,
)
else:
vertices = range(graph.n_vertices)
# Compute covariance matrix for each patch
for v in vertices:
# find indices in target precision matrix
i_from = v * n_features_per_vertex
i_to = (v + 1) * n_features_per_vertex
# get data
edge_data = X[:, i_from:i_to]
m = mean_vector[i_from:i_to]
# increment
_, covariances[v] = _increment_multivariate_gaussian_cov(
edge_data, m, covariances[v], n, bias=bias
)
# invert it
all_blocks[v] = _covariance_matrix_inverse(covariances[v], n_components)
# store the inverse covariance and its locations
rows[v] = v
columns[v] = v
# sort rows, columns and all_blocks
rows_arg_sort = rows.argsort()
columns = columns[rows_arg_sort]
all_blocks = all_blocks[rows_arg_sort]
rows = rows[rows_arg_sort]
# create indptr
n_rows = graph.n_vertices
indptr = np.zeros(n_rows + 1)
for i in range(n_rows):
(inds,) = np.where(rows == i)
if inds.size == 0:
indptr[i + 1] = indptr[i]
else:
indptr[i] = inds[0]
indptr[i + 1] = inds[-1] + 1
# create block sparse matrix
return (
bsr_matrix(
(all_blocks, columns, indptr), shape=(n_features, n_features), dtype=dtype
),
covariances,
)
def _increment_dense_diagonal_precision(
X,
mean_vector,
covariances,
n,
graph,
n_features,
n_features_per_vertex,
dtype=np.float32,
n_components=None,
bias=0,
verbose=False,
):
# Initialize precision
precision = np.zeros((n_features, n_features), dtype=dtype)
# Print information if asked
if verbose:
print_dynamic(
"Allocated precision matrix of size {}".format(bytes_str(precision.nbytes))
)
vertices = print_progress(
range(graph.n_vertices),
n_items=graph.n_vertices,
prefix="Precision per vertex",
end_with_newline=False,
)
else:
vertices = range(graph.n_vertices)
# Compute covariance matrix for each patch
for v in vertices:
# find indices in target precision matrix
i_from = v * n_features_per_vertex
i_to = (v + 1) * n_features_per_vertex
# get data
edge_data = X[:, i_from:i_to]
m = mean_vector[i_from:i_to]
# increment
_, covariances[v] = _increment_multivariate_gaussian_cov(
edge_data, m, covariances[v], n, bias=bias
)
# invert it
precision[i_from:i_to, i_from:i_to] = _covariance_matrix_inverse(
covariances[v], n_components
)
# return covariances
return precision, covariances
def _increment_multivariate_gaussian_mean(X, m, n):
# Get new number of samples
new_n = X.shape[0]
# Update mean vector
# m_{new} = (n m + \sum_{i=1}^{n_{new}} x_i) / (n + n_{new})
# where: m -> old mean vector
# n_{new} -> new number of samples
# n -> old number of samples
# x_i -> new data vectors
return (n * m + np.sum(X, axis=0)) / (n + new_n)
def _increment_multivariate_gaussian_cov(X, m, S, n, bias=0):
# Get new number of samples
new_n = X.shape[0]
# Update mean vector
# m_{new} = (n m + \sum_{i=1}^{n_{new}} x_i) / (n + n_{new})
# where: m_{new} -> new mean vector
# m -> old mean vector
# n_{new} -> new number of samples
# n -> old number of samples
# x_i -> new data vectors
new_m = _increment_multivariate_gaussian_mean(X, m, n)
# Select the normalization value
if bias == 1:
k = n
elif bias == 0:
k = n - 1
else:
raise ValueError("bias must be either 0 or 1")
# Update covariance matrix
# S__{new} = (k S + n m^T m + X^T X - (n + n_{new}) m_{new}^T m_{new})
# / (k + n_{new})
m1 = n * m[None, :].T.dot(m[None, :])
m2 = (n + new_n) * new_m[None, :].T.dot(new_m[None, :])
new_S = (k * S + m1 + X.T.dot(X) - m2) / (k + new_n)
return new_m, new_S
class GMRFVectorModel(object):
r"""
Trains a Gaussian Markov Random Field (GMRF).
Parameters
----------
samples : `ndarray` or `list` or `iterable` of `ndarray`
List or iterable of numpy arrays to build the model from, or an
existing data matrix.
graph : :map:`UndirectedGraph` or :map:`DirectedGraph` or :map:`Tree`
The graph that defines the relations between the features.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a `list` (so we
know how large the data matrix needs to be).
mode : ``{'concatenation', 'subtraction'}``, optional
Defines the feature vector of each edge. Assuming that
:math:`\mathbf{x}_i` and :math:`\mathbf{x}_j` are the feature vectors
of two adjacent vertices (:math:`i,j:(v_i,v_j)\in E`), then the edge's
feature vector in the case of ``'concatenation'`` is
.. math::
\left[{\mathbf{x}_i}^T, {\mathbf{x}_j}^T\right]^T
and in the case of ``'subtraction'``
.. math::
\mathbf{x}_i - \mathbf{x}_j
n_components : `int` or ``None``, optional
When ``None`` (default), the covariance matrix of each edge is inverted
using `np.linalg.inv`. If `int`, it is inverted using truncated SVD
using the specified number of compnents.
dtype : `numpy.dtype`, optional
The data type of the GMRF's precision matrix. For example, it can be set
to `numpy.float32` for single precision or to `numpy.float64` for double
precision. Depending on the size of the precision matrix, this option can
you a lot of memory.
sparse : `bool`, optional
When ``True``, the GMRF's precision matrix has type
`scipy.sparse.bsr_matrix`, otherwise it is a `numpy.array`.
bias : `int`, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
incremental : `bool`, optional
This argument must be set to ``True`` in case the user wants to
incrementally update the GMRF. Note that if ``True``, the model
occupies 2x memory.
verbose : `bool`, optional
If ``True``, the progress of the model's training is printed.
Notes
-----
Let us denote a graph as :math:`G=(V,E)`, where
:math:`V=\{v_i,v_2,\ldots, v_{|V|}\}` is the set of :math:`|V|` vertices and
there is an edge :math:`(v_i,v_j)\in E` for each pair of connected vertices.
Let us also assume that we have a set of random variables
:math:`X=\{X_i\}, \forall i:v_i\in V`, which represent an abstract feature
vector of length :math:`k` extracted from each vertex :math:`v_i`, i.e.
:math:`\mathbf{x}_i,i:v_i\in V`.
A GMRF is described by an undirected graph, where the vertexes stand for
random variables and the edges impose statistical constraints on these
random variables. Thus, the GMRF models the set of random variables with
a multivariate normal distribution
.. math::
p(X=\mathbf{x}|G)\sim\mathcal{N}(\boldsymbol{\mu},\boldsymbol{\Sigma})
We denote by :math:`\mathbf{Q}` the block-sparse precision matrix that is
the inverse of the covariance matrix :math:`\boldsymbol{\Sigma}`, i.e.
:math:`\mathbf{Q}=\boldsymbol{\Sigma}^{-1}`. By applying the GMRF we make
the assumption that the random variables satisfy the three Markov
properties (pairwise, local and global) and that the blocks of the
precision matrix that correspond to disjoint vertexes are zero, i.e.
.. math::
\mathbf{Q}_{ij}=\mathbf{0}_{k\times k},\forall i,j:(v_i,v_j)\notin E
References
----------
.. [1] H. Rue, and L. Held. "Gaussian Markov random fields: theory and
applications," CRC Press, 2005.
.. [2] E. Antonakos, J. Alabort-i-Medina, and S. Zafeiriou. "Active
Pictorial Structures", IEEE International Conference on Computer Vision
& Pattern Recognition (CVPR), Boston, MA, USA, pp. 5435-5444, June 2015.
"""
def __init__(
self,
samples,
graph,
n_samples=None,
mode="concatenation",
n_components=None,
dtype=np.float64,
sparse=True,
bias=0,
incremental=False,
verbose=False,
):
# Generate data matrix
# (n_samples, n_features)
data, self.n_samples = self._data_to_matrix(samples, n_samples)
# n_features and n_features_per_vertex
self.n_features = data.shape[1]
self.n_features_per_vertex = int(self.n_features / graph.n_vertices)
# Assign arguments
self.graph = graph
self.mode = mode
self.n_components = n_components
self.sparse = sparse
self.dtype = dtype
self.bias = bias
self.is_incremental = incremental
# Compute mean vector
self.mean_vector = np.mean(data, axis=0)
# Select correct method to create the precision matrix based on the
# graph type and the sparse flag
if self.graph.n_edges == 0:
if self.sparse:
constructor = _create_sparse_diagonal_precision
else:
constructor = _create_dense_diagonal_precision
else:
if self.sparse:
constructor = partial(_create_sparse_precision, mode=self.mode)
else:
constructor = partial(_create_dense_precision, mode=self.mode)
# Create the precision matrix and optionally store the covariance
# matrices
if self.is_incremental:
self.precision, self._covariance_matrices = constructor(
data,
self.graph,
self.n_features,
self.n_features_per_vertex,
dtype=self.dtype,
n_components=self.n_components,
bias=self.bias,
return_covariances=self.is_incremental,
verbose=verbose,
)
else:
self._covariance_matrices = None
self.precision = constructor(
data,
self.graph,
self.n_features,
self.n_features_per_vertex,
dtype=self.dtype,
n_components=self.n_components,
bias=self.bias,
return_covariances=self.is_incremental,
verbose=verbose,
)
def _data_to_matrix(self, data, n_samples):
# build a data matrix from all the samples
if n_samples is None:
n_samples = len(data)
# Assumed data is ndarray of (n_samples, n_features) or list of samples
if not isinstance(data, np.ndarray):
# Make sure we have an array, slice of the number of requested
# samples
data = np.array(data)[:n_samples]
return data, n_samples
def mean(self):
r"""
Return the mean of the model. For this model, returns the same result
as ``mean_vector``.
:type: `ndarray`
"""
return self.mean_vector
def increment(self, samples, n_samples=None, verbose=False):
r"""
Update the mean and precision matrix of the GMRF by updating the
distributions of all the edges.
Parameters
----------
samples : `ndarray` or `list` or `iterable` of `ndarray`
List or iterable of numpy arrays to build the model from, or an
existing data matrix.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a
list (so we know how large the data matrix needs to be).
verbose : `bool`, optional
If ``True``, the progress of the model's incremental update is
printed.
"""
# Check if it can be incrementally updated
if not self.is_incremental:
raise ValueError("GMRF cannot be incrementally updated.")
# Build a data matrix from the new samples
data, _ = self._data_to_matrix(samples, n_samples)
# Increment the model
self._increment(data=data, verbose=verbose)
def _increment(self, data, verbose):
# Empty memory
self.precision = 0
# Select correct method to create the precision matrix based on the
# graph type and the sparse flag
if self.graph.n_edges == 0:
if self.sparse:
constructor = _increment_sparse_diagonal_precision
else:
constructor = _increment_dense_diagonal_precision
else:
if self.sparse:
constructor = partial(_increment_sparse_precision, mode=self.mode)
else:
constructor = partial(_increment_dense_precision, mode=self.mode)
# Create the precision matrix and optionally store the covariance
# matrices
self.precision, self._covariance_matrices = constructor(
data,
self.mean_vector,
self._covariance_matrices,
self.n_samples,
self.graph,
self.n_features,
self.n_features_per_vertex,
dtype=self.dtype,
n_components=self.n_components,
bias=self.bias,
verbose=verbose,
)
# Update mean and number of samples
self.mean_vector = _increment_multivariate_gaussian_mean(
data, self.mean_vector, self.n_samples
)
self.n_samples += data.shape[0]
def mahalanobis_distance(self, samples, subtract_mean=True, square_root=False):
r"""
Compute the mahalanobis distance given a sample :math:`\mathbf{x}` or an
array of samples :math:`\mathbf{X}`, i.e.
.. math::
\sqrt{(\mathbf{x}-\boldsymbol{\mu})^T \mathbf{Q} (\mathbf{x}-\boldsymbol{\mu})}
\text{ or }
\sqrt{(\mathbf{X}-\boldsymbol{\mu})^T \mathbf{Q} (\mathbf{X}-\boldsymbol{\mu})}
Parameters
----------
samples : `ndarray`
A single data vector or an array of multiple data vectors.
subtract_mean : `bool`, optional
When ``True``, the mean vector is subtracted from the data vector.
square_root : `bool`, optional
If ``False``, the mahalanobis distance gets squared.
"""
samples, _ = self._data_to_matrix(samples, None)
if len(samples.shape) == 1:
samples = samples[..., None].T
return self._mahalanobis_distance(
samples=samples, subtract_mean=subtract_mean, square_root=square_root
)
def _mahalanobis_distance(self, samples, subtract_mean, square_root):
# we assume that samples is an ndarray of n_samples x n_features
# create data matrix
if subtract_mean:
n_samples = samples.shape[0]
samples = samples - np.tile(self.mean_vector[..., None], n_samples).T
# compute mahalanobis per sample
if self.sparse:
# if sparse, unfortunately the einstein sum is not implemented
tmp = self.precision.dot(samples.T)
d = samples.dot(tmp)
d = np.diag(d)
else:
# if dense, then the einstein sum is much faster
d = np.einsum("ij,ij->i", np.dot(samples, self.precision), samples)
# if only one sample, then return a scalar
if d.shape[0] == 1:
d = d[0]
# square root
if square_root:
return np.sqrt(d)
else:
return d
def principal_components_analysis(self, max_n_components=None):
r"""
Returns a :map:`PCAVectorModel` with the Principal Components.
Note that the eigenvalue decomposition is applied directly on the
precision matrix and then the eigenvalues are inverted.
Parameters
----------
max_n_components : `int` or ``None``, optional
The maximum number of principal components. If ``None``, all the
components are returned.
Returns
-------
pca : :map:`PCAVectorModel`
The PCA model.
"""
from .pca import PCAVectorModel
return PCAVectorModel.init_from_covariance_matrix(
C=self.precision,
mean=self.mean_vector,
n_samples=self.n_samples,
centred=True,
is_inverse=True,
max_n_components=max_n_components,
)
@property
def _str_title(self):
r"""
Returns a string containing the name of the model.
:type: `str`
"""
tmp = "a"
if isinstance(self.graph, UndirectedGraph):
tmp = "an"
return "GMRF model on {} {}".format(tmp, self.graph)
def __str__(self):
incremental_str = (
" - Can be incrementally updated."
if self.is_incremental
else " - Cannot be " "incrementally updated."
)
svd_str = (
" - # SVD components: {}".format(self.n_components)
if self.n_components is not None
else " - No " "SVD used."
)
_Q_sparse = "scipy.sparse" if self.sparse else "numpy.array"
q_str = " - Q is stored as {} with {} precision".format(
_Q_sparse, name_of_callable(self.dtype)
)
mode_str = "concatenated" if self.mode == "concatenation" else "subtracted"
str_out = (
"Gaussian MRF Model \n"
" - {}\n"
" - The data of the vertexes of each edge are {}.\n"
"{}\n"
" - # variables (vertexes): {}\n"
" - # features per variable: {}\n"
" - # features in total: {}\n"
"{}\n"
" - # samples: {}\n"
"{}\n".format(
self.graph.__str__(),
mode_str,
q_str,
self.graph.n_vertices,
self.n_features_per_vertex,
self.n_features,
svd_str,
self.n_samples,
incremental_str,
)
)
return str_out
class GMRFModel(GMRFVectorModel):
r"""
Trains a Gaussian Markov Random Field (GMRF).
Parameters
----------
samples : `list` or `iterable` of :map:`Vectorizable`
List or iterable of samples to build the model from.
graph : :map:`UndirectedGraph` or :map:`DirectedGraph` or :map:`Tree`
The graph that defines the relations between the features.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a `list` (so we
know how large the data matrix needs to be).
mode : ``{'concatenation', 'subtraction'}``, optional
Defines the feature vector of each edge. Assuming that
:math:`\mathbf{x}_i` and :math:`\mathbf{x}_j` are the feature vectors
of two adjacent vertices (:math:`i,j:(v_i,v_j)\in E`), then the edge's
feature vector in the case of ``'concatenation'`` is
.. math::
\left[{\mathbf{x}_i}^T, {\mathbf{x}_j}^T\right]^T
and in the case of ``'subtraction'``
.. math::
\mathbf{x}_i - \mathbf{x}_j
n_components : `int` or ``None``, optional
When ``None`` (default), the covariance matrix of each edge is inverted
using `np.linalg.inv`. If `int`, it is inverted using truncated SVD
using the specified number of compnents.
dtype : `numpy.dtype`, optional
The data type of the GMRF's precision matrix. For example, it can be set
to `numpy.float32` for single precision or to `numpy.float64` for double
precision. Depending on the size of the precision matrix, this option can
you a lot of memory.
sparse : `bool`, optional
When ``True``, the GMRF's precision matrix has type
`scipy.sparse.bsr_matrix`, otherwise it is a `numpy.array`.
bias : `int`, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
incremental : `bool`, optional
This argument must be set to ``True`` in case the user wants to
incrementally update the GMRF. Note that if ``True``, the model
occupies 2x memory.
verbose : `bool`, optional
If ``True``, the progress of the model's training is printed.
Notes
-----
Let us denote a graph as :math:`G=(V,E)`, where
:math:`V=\{v_i,v_2,\ldots, v_{|V|}\}` is the set of :math:`|V|` vertices and
there is an edge :math:`(v_i,v_j)\in E` for each pair of connected vertices.
Let us also assume that we have a set of random variables
:math:`X=\{X_i\}, \forall i:v_i\in V`, which represent an abstract feature
vector of length :math:`k` extracted from each vertex :math:`v_i`, i.e.
:math:`\mathbf{x}_i,i:v_i\in V`.
A GMRF is described by an undirected graph, where the vertexes stand for
random variables and the edges impose statistical constraints on these
random variables. Thus, the GMRF models the set of random variables with
a multivariate normal distribution
.. math::
p(X=\mathbf{x}|G)\sim\mathcal{N}(\boldsymbol{\mu},\boldsymbol{\Sigma})
We denote by :math:`\mathbf{Q}` the block-sparse precision matrix that is
the inverse of the covariance matrix :math:`\boldsymbol{\Sigma}`, i.e.
:math:`\mathbf{Q}=\boldsymbol{\Sigma}^{-1}`. By applying the GMRF we make
the assumption that the random variables satisfy the three Markov
properties (pairwise, local and global) and that the blocks of the
precision matrix that correspond to disjoint vertexes are zero, i.e.
.. math::
\mathbf{Q}_{ij}=\mathbf{0}_{k\times k},\forall i,j:(v_i,v_j)\notin E
References
----------
.. [1] H. Rue, and L. Held. "Gaussian Markov random fields: theory and
applications," CRC Press, 2005.
.. [2] E. Antonakos, J. Alabort-i-Medina, and S. Zafeiriou. "Active
Pictorial Structures", IEEE International Conference on Computer Vision
& Pattern Recognition (CVPR), Boston, MA, USA, pp. 5435-5444, June 2015.
"""
def __init__(
self,
samples,
graph,
mode="concatenation",
n_components=None,
dtype=np.float64,
sparse=True,
n_samples=None,
bias=0,
incremental=False,
verbose=False,
):
# Build a data matrix from all the samples
data, self.template_instance = as_matrix(
samples, length=n_samples, return_template=True, verbose=verbose
)
n_samples = data.shape[0]
GMRFVectorModel.__init__(
self,
data,
graph,
mode=mode,
n_components=n_components,
dtype=dtype,
sparse=sparse,
n_samples=n_samples,
bias=bias,
incremental=incremental,
verbose=verbose,
)
def mean(self):
r"""
Return the mean of the model.
:type: :map:`Vectorizable`
"""
return self.template_instance.from_vector(self.mean_vector)
def increment(self, samples, n_samples=None, verbose=False):
r"""
Update the mean and precision matrix of the GMRF by updating the
distributions of all the edges.
Parameters
----------
samples : `list` or `iterable` of :map:`Vectorizable`
List or iterable of samples to build the model from.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a
list (so we know how large the data matrix needs to be).
verbose : `bool`, optional
If ``True``, the progress of the model's incremental update is
printed.
"""
# Check if it can be incrementally updated
if not self.is_incremental:
raise ValueError("GMRF cannot be incrementally updated.")
# Build a data matrix from the new samples
data = as_matrix(samples, length=n_samples, verbose=verbose)
# Increment the model
self._increment(data=data, verbose=verbose)
def mahalanobis_distance(self, samples, subtract_mean=True, square_root=False):
r"""
Compute the mahalanobis distance given a sample :math:`\mathbf{x}` or an
array of samples :math:`\mathbf{X}`, i.e.
.. math::
\sqrt{(\mathbf{x}-\boldsymbol{\mu})^T \mathbf{Q} (\mathbf{x}-\boldsymbol{\mu})}
\text{ or }
\sqrt{(\mathbf{X}-\boldsymbol{\mu})^T \mathbf{Q} (\mathbf{X}-\boldsymbol{\mu})}
Parameters
----------
samples : :map:`Vectorizable` or `list` of :map:`Vectorizable`
The new data sample or a list of samples.
subtract_mean : `bool`, optional
When ``True``, the mean vector is subtracted from the data vector.
square_root : `bool`, optional
If ``False``, the mahalanobis distance gets squared.
"""
if isinstance(samples, list):
samples = as_matrix(
samples, length=None, return_template=False, verbose=False
)
else:
samples = samples.as_vector()[..., None].T
return self._mahalanobis_distance(
samples=samples, subtract_mean=subtract_mean, square_root=square_root
)
def principal_components_analysis(self, max_n_components=None):
r"""
Returns a :map:`PCAModel` with the Principal Components.
Note that the eigenvalue decomposition is applied directly on the
precision matrix and then the eigenvalues are inverted.
Parameters
----------
max_n_components : `int` or ``None``, optional
The maximum number of principal components. If ``None``, all the
components are returned.
Returns
-------
pca : :map:`PCAModel`
The PCA model.
"""
from .pca import PCAModel
return PCAModel.init_from_covariance_matrix(
C=self.precision,
mean=self.mean(),
n_samples=self.n_samples,
centred=True,
is_inverse=True,
max_n_components=max_n_components,
)
| {
"repo_name": "patricksnape/menpo",
"path": "menpo/model/gmrf.py",
"copies": "2",
"size": "47953",
"license": "bsd-3-clause",
"hash": 7701037856883641000,
"line_mean": 32.3238359972,
"line_max": 90,
"alpha_frac": 0.5577753217,
"autogenerated": false,
"ratio": 3.7037923843361398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.526156770603614,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from violajones.HaarLikeFeature import HaarLikeFeature
from violajones.HaarLikeFeature import FeatureTypes
import progressbar
from multiprocessing import Pool
LOADING_BAR_LENGTH = 50
# TODO: select optimal threshold for each feature
# TODO: attentional cascading
def learn(positive_iis, negative_iis, num_classifiers=-1, min_feature_width=1, max_feature_width=-1, min_feature_height=1, max_feature_height=-1):
"""
Selects a set of classifiers. Iteratively takes the best classifiers based
on a weighted error.
:param positive_iis: List of positive integral image examples
:type positive_iis: list[numpy.ndarray]
:param negative_iis: List of negative integral image examples
:type negative_iis: list[numpy.ndarray]
:param num_classifiers: Number of classifiers to select, -1 will use all
classifiers
:type num_classifiers: int
:return: List of selected features
:rtype: list[violajones.HaarLikeFeature.HaarLikeFeature]
"""
num_pos = len(positive_iis)
num_neg = len(negative_iis)
num_imgs = num_pos + num_neg
img_height, img_width = positive_iis[0].shape
# Maximum feature width and height default to image width and height
max_feature_height = img_height if max_feature_height == -1 else max_feature_height
max_feature_width = img_width if max_feature_width == -1 else max_feature_width
# Create initial weights and labels
pos_weights = np.ones(num_pos) * 1. / (2 * num_pos)
neg_weights = np.ones(num_neg) * 1. / (2 * num_neg)
weights = np.hstack((pos_weights, neg_weights))
labels = np.hstack((np.ones(num_pos), np.ones(num_neg) * -1))
images = positive_iis + negative_iis
# Create features for all sizes and locations
features = _create_features(img_height, img_width, min_feature_width, max_feature_width, min_feature_height, max_feature_height)
num_features = len(features)
feature_indexes = list(range(num_features))
num_classifiers = num_features if num_classifiers == -1 else num_classifiers
print('Calculating scores for images..')
votes = np.zeros((num_imgs, num_features))
bar = progressbar.ProgressBar()
# Use as many workers as there are CPUs
pool = Pool(processes=None)
for i in bar(range(num_imgs)):
votes[i, :] = np.array(list(pool.map(partial(_get_feature_vote, image=images[i]), features)))
# select classifiers
classifiers = []
print('Selecting classifiers..')
bar = progressbar.ProgressBar()
for _ in bar(range(num_classifiers)):
classification_errors = np.zeros(len(feature_indexes))
# normalize weights
weights *= 1. / np.sum(weights)
# select best classifier based on the weighted error
for f in range(len(feature_indexes)):
f_idx = feature_indexes[f]
# classifier error is the sum of image weights where the classifier
# is right
error = sum(map(lambda img_idx: weights[img_idx] if labels[img_idx] != votes[img_idx, f_idx] else 0, range(num_imgs)))
classification_errors[f] = error
# get best feature, i.e. with smallest error
min_error_idx = np.argmin(classification_errors)
best_error = classification_errors[min_error_idx]
best_feature_idx = feature_indexes[min_error_idx]
# set feature weight
best_feature = features[best_feature_idx]
feature_weight = 0.5 * np.log((1 - best_error) / best_error)
best_feature.weight = feature_weight
classifiers.append(best_feature)
# update image weights
weights = np.array(list(map(lambda img_idx: weights[img_idx] * np.sqrt((1-best_error)/best_error) if labels[img_idx] != votes[img_idx, best_feature_idx] else weights[img_idx] * np.sqrt(best_error/(1-best_error)), range(num_imgs))))
# remove feature (a feature can't be selected twice)
feature_indexes.remove(best_feature_idx)
return classifiers
def _get_feature_vote(feature, image):
return feature.get_vote(image)
def _create_features(img_height, img_width, min_feature_width, max_feature_width, min_feature_height, max_feature_height):
print('Creating haar-like features..')
features = []
for feature in FeatureTypes:
# FeatureTypes are just tuples
feature_start_width = max(min_feature_width, feature[0])
for feature_width in range(feature_start_width, max_feature_width, feature[0]):
feature_start_height = max(min_feature_height, feature[1])
for feature_height in range(feature_start_height, max_feature_height, feature[1]):
for x in range(img_width - feature_width):
for y in range(img_height - feature_height):
features.append(HaarLikeFeature(feature, (x, y), feature_width, feature_height, 0, 1))
features.append(HaarLikeFeature(feature, (x, y), feature_width, feature_height, 0, -1))
print('..done. ' + str(len(features)) + ' features created.\n')
return features
| {
"repo_name": "Ronneesley/redesocial",
"path": "pesquisas/Viola-Jones/recursos/codigo_python/Viola-Jones-master/violajones/AdaBoost.py",
"copies": "2",
"size": "5105",
"license": "mit",
"hash": 2471837154856277000,
"line_mean": 40.8442622951,
"line_max": 239,
"alpha_frac": 0.6738491675,
"autogenerated": false,
"ratio": 3.6594982078853047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5333347375385304,
"avg_score": null,
"num_lines": null
} |
from functools import partial
#import numpy as np
import hyperopt
#from hyperopt import pyll
from hyperopt.fmin import fmin_pass_expr_memo_ctrl
from hpnnet.nips2011 import nnet1_preproc_space
#from hpnnet.skdata_learning_algo import PyllLearningAlgo
from hpnnet.skdata_learning_algo import eval_fn
from skdata.larochelle_etal_2007.view import RectanglesVectorXV
from skdata.larochelle_etal_2007.view \
import MNIST_RotatedBackgroundImages_VectorXV
def test_nnet_rectangles():
rectangles_eval_fn = partial(eval_fn,
protocol_cls=RectanglesVectorXV)
fmin_pass_expr_memo_ctrl(rectangles_eval_fn)
trials = hyperopt.Trials()
hyperopt.fmin(
rectangles_eval_fn,
space=nnet1_preproc_space(sup_min_epochs=20, sup_max_epochs=40),
max_evals=10,
algo=hyperopt.rand.suggest,
trials=trials,
)
def test_nnet_mrbi():
rectangles_eval_fn = partial(eval_fn,
protocol_cls=MNIST_RotatedBackgroundImages_VectorXV)
fmin_pass_expr_memo_ctrl(rectangles_eval_fn)
trials = hyperopt.Trials()
hyperopt.fmin(
rectangles_eval_fn,
space=nnet1_preproc_space(sup_min_epochs=20, sup_max_epochs=40),
max_evals=10,
algo=hyperopt.rand.suggest,
trials=trials,
)
| {
"repo_name": "hyperopt/hyperopt-nnet",
"path": "hpnnet/tests/test_nips2011.py",
"copies": "1",
"size": "1286",
"license": "bsd-3-clause",
"hash": -7666882368194989000,
"line_mean": 26.3617021277,
"line_max": 72,
"alpha_frac": 0.7060653188,
"autogenerated": false,
"ratio": 3.0187793427230045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42248446615230045,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import menpo.io as mio
def bbox_overlap_area(a, b):
max_overlap = np.min([a.max(axis=0), b.max(axis=0)], axis=0)
min_overlap = np.max([a.min(axis=0), b.min(axis=0)], axis=0)
overlap_size = max_overlap - min_overlap
if np.any(overlap_size < 0):
return 0
else:
return overlap_size.prod()
def bbox_proportion_overlap(a, b):
overlap = bbox_overlap_area(a, b)
return overlap / bbox_area(a)
def bbox_area(b):
return (b.max(axis=0) - b.min(axis=0)).prod()
def bbox_area_ratio(a, b):
return bbox_area(a) / bbox_area(b)
def bbox_overlap_acceptable(gt, d):
return (bbox_proportion_overlap(gt, d) > 0.5 and
bbox_area_ratio(gt, d) > 0.5)
def load_dlib_detector():
from menpodetect import load_dlib_frontal_face_detector
detector = load_dlib_frontal_face_detector()
return partial(detector, greyscale=False)
detector = load_dlib_detector()
def load_opencv_detector():
from menpodetect import load_opencv_frontal_face_detector
detector = load_opencv_frontal_face_detector()
return partial(detector, greyscale=False)
def load_pico_detector():
from menpodetect import load_pico_frontal_face_detector
detector = load_pico_frontal_face_detector()
return partial(detector, greyscale=False)
def detect_and_check(img, det=None, group=None):
if det is None:
det = detector
gt = img.landmarks[group].lms.bounding_box()
bad_fits = []
for detection in detector(img):
if bbox_overlap_acceptable(gt.points, detection.points):
return detection
return None
def normalize(gt):
from menpo.transform import Translation, NonUniformScale
t = Translation(gt.centre()).pseudoinverse()
s = NonUniformScale(gt.range()).pseudoinverse()
return t.compose_before(s)
def random_instance(pca):
weights = np.random.multivariate_normal(np.zeros_like(pca.eigenvalues),
np.diag(pca.eigenvalues))
return pca.instance(weights)
_DETECTORS = {
'dlib': load_dlib_detector,
'pico': load_pico_detector,
'opencv': load_opencv_detector
}
def synthesize_detection(pca_model, lms):
"""Synthesizes a bounding box for a particular detector.
Args:
pca_model: A menpo PCAModel instance.
im: A menpo image.
Returns:
A
"""
gt_bb = lms.bounding_box()
instance = random_instance(pca_model)
return normalize(gt_bb).pseudoinverse().apply(instance)
def create_generator(shapes, detections):
import menpo.io as mio
from menpo.landmark import LandmarkGroup
from menpo.model import PCAModel
# normalize these to size [1, 1], centred on origin
normed_detections = [
normalize(lms.bounding_box()).apply(det)
for lms, det in zip(shapes, detections)
]
# build a PCA model from good detections
return PCAModel(normed_detections)
def load_n_create_generator(pattern, detector_name,
group=None, overwrite=False):
import menpo.io as mio
from menpo.landmark import LandmarkGroup
from menpo.model import PCAModel
try:
detector = _DETECTORS[detector_name]()
except KeyError:
detector_list = ', '.join(list(_DETECTORS.keys()))
raise ValueError('Valid detector types are: {}'.format(detector_list))
print('Running {} detector on {}'.format(detector_name, pattern))
bboxes = [(img, detect_and_check(img, detector, group=group))
for img in mio.import_images(pattern, normalise=False,
verbose=True)]
# find all the detections that did not fail
detections = filter(lambda x: x[1] is not None, bboxes)
print('Creating a model out of {} detections.'.format(len(detections)))
# normalize these to size [1, 1], centred on origin
normed_detections = [
normalize(im.landmarks[group].lms.bounding_box()).apply(det)
for im, det in detections
]
# build a PCA model from good detections
pca = PCAModel(normed_detections)
mio.export_pickle(pca, '{}_gen.pkl'.format(detector_name), overwrite=overwrite)
if __name__ == '__main__':
path = '/Users/gtrigeo/db/lfpw/trainset/*.png'
create_generator(path, 'dlib', group='PTS')
| {
"repo_name": "trigeorgis/mdm",
"path": "detect.py",
"copies": "1",
"size": "4300",
"license": "bsd-3-clause",
"hash": -8842400370397917000,
"line_mean": 28.8611111111,
"line_max": 83,
"alpha_frac": 0.6602325581,
"autogenerated": false,
"ratio": 3.377847604084839,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45380801621848393,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import pandas as pd
import traceback
import easysparql
import data_extraction
import learning
from models import MLModel, PredictionRun, Membership
def get_classes(endpoint=None):
if endpoint is None:
print "get_classes> endpoint should not be None"
return []
return easysparql.get_classes(endpoint=endpoint)
def explore_and_train_tbox(endpoint=None, model_id=None):
if endpoint is None:
print "explore_and_train_tbox> endpoint is None"
return
if model_id is None:
print "explore_and_train_tbox> model_id should not be None"
return
try:
update_progress_func = partial(update_model_progress_for_partial, model_id)
update_model_state(model_id=model_id, new_state=MLModel.RUNNING, new_progress=0,
new_notes="Extracting numerical class/property combinations")
# Safe function
classes_properties_uris = easysparql.get_all_classes_properties_numerical(endpoint=endpoint)
update_model_state(model_id=model_id, new_progress=0,
new_notes="extracting values from gathered class/property")
data, meta_data = data_extraction.data_and_meta_from_class_property_uris(
endpoint=endpoint, class_property_uris=classes_properties_uris, update_func=update_progress_func,
isnumericfilter=True)
update_model_state(model_id=model_id, new_progress=0, new_notes="training the model")
if data is None:
update_model_state(model_id=model_id, new_progress=0, new_state=MLModel.STOPPED,
new_notes="No data is extracted from the endpoint")
return
if np.any(np.isnan(data)):
print "explore_and_train_tbox> there is a nan in the data"
print "**************************"
else:
print "explore_and_train_tbox> no nans in the data"
model = learning.train_with_data_and_meta(data=data, meta_data=meta_data, update_func=update_progress_func)
update_model_state(model_id=model_id, new_progress=0, new_notes="organizing the clusters")
meta_with_clusters = learning.get_cluster_for_meta(training_meta=meta_data, testing_meta=meta_data,
update_func=update_progress_func)
update_model_state(model_id=model_id, new_progress=0, new_notes="Saving the model data")
model_file_name = data_extraction.save_model(model=model, meta_data=meta_data, file_name=str(model_id) + " - ")
if model_file_name is not None:
m = MLModel.objects.filter(id=model_id)
if len(m) == 1:
m = m[0]
m.file_name = model_file_name
m.save()
update_model_state(model_id=model_id, new_progress=100, new_state=MLModel.COMPLETE, new_notes="Completed")
else:
update_model_state(model_id=model_id, new_progress=0, new_state=MLModel.STOPPED, new_notes="model is deleted")
else:
update_model_state(model_id=model_id, new_progress=0, new_state=MLModel.STOPPED, new_notes="Error Saving the model")
except Exception as e:
print "explore_and_train_tbox> Exception %s" % str(e)
traceback.print_exc()
update_model_state(model_id=model_id, new_state=MLModel.STOPPED, new_notes="Not captured error: " + str(e))
def explore_and_train_abox(endpoint=None, model_id=None, classes_uris=[], min_num_of_objects=90):
if endpoint is None:
print "explore_and_train_abox> endpoint is None"
return
if model_id is None:
print "explore_and_train_abox> model_id should not be None"
return
try:
update_progress_func = partial(update_model_progress_for_partial, model_id)
update_model_state(model_id=model_id, new_state=MLModel.RUNNING, new_progress=0,
new_notes="Extracting numerical class/property combinations")
classes_properties_uris = []
for idx, class_uri in enumerate(classes_uris):
update_progress_func(int(idx * 1.0 / len(classes_uris) * 100))
properties = easysparql.get_properties_for_class_abox(endpoint=endpoint, class_uri=class_uri,
raiseexception=True)
for prop in properties:
classes_properties_uris.append((class_uri, prop))
update_progress_func(100)
update_model_state(model_id=model_id, new_progress=0,
new_notes="extracting values from gathered class/property")
data, meta_data = data_extraction.data_and_meta_from_class_property_uris(
endpoint=endpoint, class_property_uris=classes_properties_uris, update_func=update_progress_func,
isnumericfilter=False, min_num_of_objects=min_num_of_objects)
update_model_state(model_id=model_id, new_progress=0, new_notes="training the model")
if data is None:
update_model_state(model_id=model_id, new_progress=0, new_state=MLModel.STOPPED,
new_notes="No data is extracted from the endpoint")
return
if np.any(np.isnan(data)):
print "explore_and_train_abox> there is a nan in the data"
print "**************************"
else:
print "explore_and_train_abox> no nans in the data"
model = learning.train_with_data_and_meta(data=data, meta_data=meta_data, update_func=update_progress_func)
if model is None:
update_model_state(model_id=model_id, new_state=MLModel.STOPPED,
new_notes="leaning failed as model is None")
return
update_model_state(model_id=model_id, new_progress=0, new_notes="organizing the clusters")
meta_with_clusters = learning.get_cluster_for_meta(training_meta=meta_data, testing_meta=meta_data,
update_func=update_progress_func)
update_model_state(model_id=model_id, new_progress=0, new_notes="Saving the model data")
model_file_name = data_extraction.save_model(model=model, meta_data=meta_data, file_name=str(model_id) + " - ")
if model_file_name is not None:
m = MLModel.objects.filter(id=model_id)
if len(m) == 1:
m = m[0]
m.file_name = model_file_name
m.save()
update_model_state(model_id=model_id, new_progress=100, new_state=MLModel.COMPLETE, new_notes="Completed")
else:
update_model_state(model_id=model_id, new_progress=0, new_state=MLModel.STOPPED, new_notes="model is deleted")
else:
update_model_state(model_id=model_id, new_progress=0, new_state=MLModel.STOPPED, new_notes="Error Saving the model")
except Exception as e:
print "explore_and_train_abox> Exception %s" % str(e)
traceback.print_exc()
update_model_state(model_id=model_id, new_state=MLModel.STOPPED, new_notes="Raised error: " + str(e))
def predict_files(predictionrun_id=None, model_dir=None, files=[], original_uploaded_filenames=[], has_header=False):
"""
:param predictionrun_id:
:param model_dir: the dir of the FCM model csv file abs dir
:param files: list of files to be predicted
:return:
"""
if predictionrun_id is None:
print "predict_files> predictionrun_id should not be None"
return
if model_dir is None:
print "predict_files> model_dir should not be None"
return
if len(files) != len(original_uploaded_filenames):
print "predict_files> number of files (%d) does not equal original_uploaded_filenames (%d)" % \
(len(files), len(original_uploaded_filenames))
return
print "original uploaded files:"
print original_uploaded_filenames
update_func = partial(update_predictionrun_progress_for_partial, predictionrun_id)
update_predictionrun_state(predictionrun_id=predictionrun_id, new_progress=0, new_state=PredictionRun.RUNNING)
model, types = learning.load_model(model_dir)
num_of_files = len(files)
for idx, fname in enumerate(files):
update_predictionrun_state(predictionrun_id=predictionrun_id,
new_notes='predicting columns in file: ' + fname.split('/')[-1].strip()[:-4])
data, meta_data = data_extraction.data_and_meta_from_a_mixed_file(file_name=fname, has_header=has_header,
original_file_name=original_uploaded_filenames[idx])
print "predict_files> extracted data shape is %s " % str(data.shape)
u = learning.predict(model=model, data=data, meta_data=meta_data, update_func=update_func)
predictionrun = PredictionRun.objects.filter(id=predictionrun_id)
if len(predictionrun) == 1:
predictionrun = predictionrun[0]
file_column_list = [{"file_name": fc["type"].split(' , ')[0], "column_no": fc["type"].split(' , ')[1]}
for fc in meta_data]
predictionrun.add_memberships(u, file_column_list)
else:
update_predictionrun_state(predictionrun_id=predictionrun_id,
new_notes="predictionrun_id is not longer exists",
new_state=PredictionRun.STOPPED)
return
predictionrun = PredictionRun.objects.filter(id=predictionrun_id)
if len(predictionrun) == 1:
predictionrun = predictionrun[0]
predictionrun.set_types(types)
print "setting types"
print types
else:
update_predictionrun_state(predictionrun_id=predictionrun_id,
new_notes="predictionrun_id is not longer exists",
new_state=PredictionRun.STOPPED)
return
update_predictionrun_state(predictionrun_id=predictionrun_id, new_progress=100, new_state=PredictionRun.COMPLETE,
new_notes='')
def get_types_and_membership(predictionrun_id=None, top_k_candidates=5, model_dir=None):
if model_dir is None:
print 'get_types_and_membership> model_dir should not be None'
return []
if predictionrun_id is None:
print 'get_types_and_membership> predictionrun_id should not be None'
return []
predictionrun = PredictionRun.objects.filter(id=predictionrun_id)
if len(predictionrun) != 1:
print 'get_types_and_membership> predictionrun_id is not longer exists'
return []
predictionrun = predictionrun[0]
model, types = learning.load_model(model_dir)
types = np.array(types)
list_of_mem_with_types = []
print 'mem with types'
for m in Membership.objects.filter(prediction_run=predictionrun):
mem_with_types = {}
mems = m.get_values_as_numpy()
mems_idxs = mems.argsort()[::-1][:top_k_candidates] # idxs sorted from largest (value not largest index) to smallest
mems = mems[mems_idxs]
mems *= 100
mem_with_types["typesscores"] = zip(mems.tolist(), types[mems_idxs].tolist())
mem_with_types["column_no"] = m.column_no
mem_with_types["file_name"] = m.file_name
list_of_mem_with_types.append(mem_with_types)
return list_of_mem_with_types
####################################################################
# State update functions #
####################################################################
def update_model_progress_for_partial(model_id, new_progress):
return update_model_state(model_id=model_id, new_progress=new_progress)
def update_model_state(model_id=None, new_state=None, new_notes=None, new_progress=None):
m = MLModel.objects.filter(id=model_id)
if len(m) == 1:
m = m[0]
if new_state is not None:
m.state = new_state
if new_notes is not None:
m.notes = new_notes
if new_progress is not None:
m.progress = new_progress
m.save()
return m
return None
def update_predictionrun_progress_for_partial(predictionrun_id, new_progress):
return update_predictionrun_state(predictionrun_id=predictionrun_id, new_progress=new_progress)
def update_predictionrun_state(predictionrun_id=None, new_state=None, new_notes=None, new_progress=None):
m = PredictionRun.objects.filter(id=predictionrun_id)
if len(m) == 1:
m = m[0]
if new_state is not None:
m.state = new_state
if new_notes is not None:
m.notes = new_notes
if new_progress is not None:
m.progress = new_progress
m.save()
return m
return None
| {
"repo_name": "ahmad88me/tada",
"path": "tadacode/tadaa/core.py",
"copies": "1",
"size": "12923",
"license": "mit",
"hash": 5943542223786269000,
"line_mean": 48.3244274809,
"line_max": 128,
"alpha_frac": 0.6164977172,
"autogenerated": false,
"ratio": 3.7534127214638398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9839227090953268,
"avg_score": 0.006136669542114396,
"num_lines": 262
} |
from functools import partial
import numpy as np
import pandas as pd
from .utils import isstr, aggregate_common_doc, funcs_no_separate_nan
from .utils_numpy import allnan, anynan, check_dtype
from .aggregate_numpy import _aggregate_base
def _wrapper(group_idx, a, size, fill_value, func='sum', dtype=None, ddof=0, **kwargs):
funcname = func.__name__ if callable(func) else func
kwargs = dict()
if funcname in ('var', 'std'):
kwargs['ddof'] = ddof
df = pd.DataFrame({'group_idx': group_idx, 'a': a})
if func == "sort":
grouped = df.groupby('group_idx', sort=True)
else:
grouped = df.groupby('group_idx', sort=False).aggregate(func, **kwargs)
dtype = check_dtype(dtype, getattr(func, '__name__', funcname), a, size)
if funcname.startswith('cum'):
ret = grouped.values[:, 0]
else:
ret = np.full(size, fill_value, dtype=dtype)
ret[grouped.index] = grouped.values[:, 0]
return ret
_supported_funcs = 'sum prod all any min max mean var std first last cumsum cumprod cummax cummin'.split()
_impl_dict = {fn: partial(_wrapper, func=fn) for fn in _supported_funcs}
_impl_dict.update(('nan' + fn, partial(_wrapper, func=fn))
for fn in _supported_funcs
if fn not in funcs_no_separate_nan)
_impl_dict.update(allnan=partial(_wrapper, func=allnan),
anynan=partial(_wrapper, func=anynan),
len=partial(_wrapper, func='count'),
nanlen=partial(_wrapper, func='count'),
argmax=partial(_wrapper, func='idxmax'),
argmin=partial(_wrapper, func='idxmin'),
generic=_wrapper)
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
nansqueeze = isstr(func) and func.startswith('nan')
return _aggregate_base(group_idx, a, size=size, fill_value=fill_value,
order=order, dtype=dtype, func=func, axis=axis,
_impl_dict=_impl_dict, _nansqueeze=nansqueeze, **kwargs)
aggregate.__doc__ = """
This is the pandas implementation of aggregate. It makes use of
`pandas`'s groupby machienery and is mainly used for reference
and benchmarking.
""" + aggregate_common_doc
| {
"repo_name": "ml31415/numpy-groupies",
"path": "numpy_groupies/aggregate_pandas.py",
"copies": "1",
"size": "2320",
"license": "bsd-2-clause",
"hash": -8894907731305714000,
"line_mean": 40.4285714286,
"line_max": 106,
"alpha_frac": 0.6202586207,
"autogenerated": false,
"ratio": 3.6535433070866143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9769595950439818,
"avg_score": 0.0008411954693591078,
"num_lines": 56
} |
from functools import partial
import numpy as np
import theano
import theano.tensor as tt
from scipy import stats
import warnings
from pymc3.util import get_variable_name
from .dist_math import bound, factln, binomln, betaln, logpow
from .distribution import Discrete, draw_values, generate_samples, reshape_sampled
from pymc3.math import tround
from ..math import logaddexp
__all__ = ['Binomial', 'BetaBinomial', 'Bernoulli', 'DiscreteWeibull',
'Poisson', 'NegativeBinomial', 'ConstantDist', 'Constant',
'ZeroInflatedPoisson', 'ZeroInflatedBinomial', 'ZeroInflatedNegativeBinomial',
'DiscreteUniform', 'Geometric', 'Categorical']
class Binomial(Discrete):
R"""
Binomial log-likelihood.
The discrete probability distribution of the number of successes
in a sequence of n independent yes/no experiments, each of which
yields success with probability p.
.. math:: f(x \mid n, p) = \binom{n}{x} p^x (1-p)^{n-x}
======== ==========================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n p`
Variance :math:`n p (1 - p)`
======== ==========================================
Parameters
----------
n : int
Number of Bernoulli trials (n >= 0).
p : float
Probability of success in each trial (0 < p < 1).
"""
def __init__(self, n, p, *args, **kwargs):
super(Binomial, self).__init__(*args, **kwargs)
self.n = n = tt.as_tensor_variable(n)
self.p = p = tt.as_tensor_variable(p)
self.mode = tt.cast(tround(n * p), self.dtype)
def random(self, point=None, size=None, repeat=None):
n, p = draw_values([self.n, self.p], point=point)
return generate_samples(stats.binom.rvs, n=n, p=p,
dist_shape=self.shape,
size=size)
def logp(self, value):
n = self.n
p = self.p
return bound(
binomln(n, value) + logpow(p, value) + logpow(1 - p, n - value),
0 <= value, value <= n,
0 <= p, p <= 1)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
n = dist.n
p = dist.p
return r'${} \sim \text{{Binomial}}(\mathit{{n}}={}, \mathit{{p}}={})$'.format(name,
get_variable_name(n),
get_variable_name(p))
class BetaBinomial(Discrete):
R"""
Beta-binomial log-likelihood.
Equivalent to binomial random variable with success probability
drawn from a beta distribution.
.. math::
f(x \mid \alpha, \beta, n) =
\binom{n}{x}
\frac{B(x + \alpha, n - x + \beta)}{B(\alpha, \beta)}
======== =================================================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n \dfrac{\alpha}{\alpha + \beta}`
Variance :math:`n \dfrac{\alpha \beta}{(\alpha+\beta)^2 (\alpha+\beta+1)}`
======== =================================================================
Parameters
----------
n : int
Number of Bernoulli trials (n >= 0).
alpha : float
alpha > 0.
beta : float
beta > 0.
"""
def __init__(self, alpha, beta, n, *args, **kwargs):
super(BetaBinomial, self).__init__(*args, **kwargs)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.beta = beta = tt.as_tensor_variable(beta)
self.n = n = tt.as_tensor_variable(n)
self.mode = tt.cast(tround(alpha / (alpha + beta)), 'int8')
def _random(self, alpha, beta, n, size=None):
size = size or 1
p = np.atleast_1d(stats.beta.rvs(a=alpha, b=beta, size=np.prod(size)))
# Sometimes scipy.beta returns nan. Ugh.
while np.any(np.isnan(p)):
i = np.isnan(p)
p[i] = stats.beta.rvs(a=alpha, b=beta, size=np.sum(i))
# Sigh...
_n, _p, _size = np.atleast_1d(n).flatten(), p.flatten(), np.prod(size)
samples = np.reshape(stats.binom.rvs(n=_n, p=_p, size=_size), size)
return samples
def random(self, point=None, size=None, repeat=None):
alpha, beta, n = \
draw_values([self.alpha, self.beta, self.n], point=point)
return generate_samples(self._random, alpha=alpha, beta=beta, n=n,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(binomln(self.n, value)
+ betaln(value + alpha, self.n - value + beta)
- betaln(alpha, beta),
value >= 0, value <= self.n,
alpha > 0, beta > 0)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
alpha = dist.alpha
beta = dist.beta
return r'${} \sim \text{{NegativeBinomial}}(\mathit{{alpha}}={}, \mathit{{beta}}={})$'.format(name,
get_variable_name(alpha),
get_variable_name(beta))
class Bernoulli(Discrete):
R"""Bernoulli log-likelihood
The Bernoulli distribution describes the probability of successes
(x=1) and failures (x=0).
.. math:: f(x \mid p) = p^{x} (1-p)^{1-x}
======== ======================
Support :math:`x \in \{0, 1\}`
Mean :math:`p`
Variance :math:`p (1 - p)`
======== ======================
Parameters
----------
p : float
Probability of success (0 < p < 1).
"""
def __init__(self, p, *args, **kwargs):
super(Bernoulli, self).__init__(*args, **kwargs)
self.p = p = tt.as_tensor_variable(p)
self.mode = tt.cast(tround(p), 'int8')
def random(self, point=None, size=None, repeat=None):
p = draw_values([self.p], point=point)[0]
return generate_samples(stats.bernoulli.rvs, p,
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
return bound(
tt.switch(value, tt.log(p), tt.log(1 - p)),
value >= 0, value <= 1,
p >= 0, p <= 1)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
p = dist.p
return r'${} \sim \text{{Bernoulli}}(\mathit{{p}}={})$'.format(name,
get_variable_name(p))
class DiscreteWeibull(Discrete):
R"""Discrete Weibull log-likelihood
The discrete Weibull distribution is a flexible model of count data that
can handle both over- and under-dispersion.
.. math:: f(x \mid q, \beta) = q^{x^{\beta}} - q^{(x + 1)^{\beta}}
======== ======================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu = \sum_{x = 1}^{\infty} q^{x^{\beta}}`
Variance :math:`2 \sum_{x = 1}^{\infty} x q^{x^{\beta}} - \mu - \mu^2`
======== ======================
"""
def __init__(self, q, beta, *args, **kwargs):
super(DiscreteWeibull, self).__init__(*args, defaults=['median'], **kwargs)
self.q = q = tt.as_tensor_variable(q)
self.beta = beta = tt.as_tensor_variable(beta)
self.median = self._ppf(0.5)
def logp(self, value):
q = self.q
beta = self.beta
return bound(tt.log(tt.power(q, tt.power(value, beta)) - tt.power(q, tt.power(value + 1, beta))),
0 <= value,
0 < q, q < 1,
0 < beta)
def _ppf(self, p):
"""
The percentile point function (the inverse of the cumulative
distribution function) of the discrete Weibull distribution.
"""
q = self.q
beta = self.beta
return (tt.ceil(tt.power(tt.log(1 - p) / tt.log(q), 1. / beta)) - 1).astype('int64')
def _random(self, q, beta, size=None):
p = np.random.uniform(size=size)
return np.ceil(np.power(np.log(1 - p) / np.log(q), 1. / beta)) - 1
def random(self, point=None, size=None, repeat=None):
q, beta = draw_values([self.q, self.beta], point=point)
return generate_samples(self._random, q, beta,
dist_shape=self.shape,
size=size)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
q = dist.q
beta = dist.beta
return r'${} \sim \text{{DiscreteWeibull}}(\mathit{{q}}={}, \mathit{{beta}}={})$'.format(name,
get_variable_name(q),
get_variable_name(beta))
class Poisson(Discrete):
R"""
Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
.. math:: f(x \mid \mu) = \frac{e^{-\mu}\mu^x}{x!}
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
Variance :math:`\mu`
======== ==========================
Parameters
----------
mu : float
Expected number of occurrences during the given interval
(mu >= 0).
Notes
-----
The Poisson distribution can be derived as a limiting case of the
binomial distribution.
"""
def __init__(self, mu, *args, **kwargs):
super(Poisson, self).__init__(*args, **kwargs)
self.mu = mu = tt.as_tensor_variable(mu)
self.mode = tt.floor(mu).astype('int32')
def random(self, point=None, size=None, repeat=None):
mu = draw_values([self.mu], point=point)[0]
return generate_samples(stats.poisson.rvs, mu,
dist_shape=self.shape,
size=size)
def logp(self, value):
mu = self.mu
log_prob = bound(
logpow(mu, value) - factln(value) - mu,
mu >= 0, value >= 0)
# Return zero when mu and value are both zero
return tt.switch(tt.eq(mu, 0) * tt.eq(value, 0),
0, log_prob)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
mu = dist.mu
return r'${} \sim \text{{Poisson}}(\mathit{{mu}}={})$'.format(name,
get_variable_name(mu))
class NegativeBinomial(Discrete):
R"""
Negative binomial log-likelihood.
The negative binomial distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
.. math::
f(x \mid \mu, \alpha) =
\frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)}
(\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
======== ==========================
Parameters
----------
mu : float
Poission distribution parameter (mu > 0).
alpha : float
Gamma distribution parameter (alpha > 0).
"""
def __init__(self, mu, alpha, *args, **kwargs):
super(NegativeBinomial, self).__init__(*args, **kwargs)
self.mu = mu = tt.as_tensor_variable(mu)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.mode = tt.floor(mu).astype('int32')
def random(self, point=None, size=None, repeat=None):
mu, alpha = draw_values([self.mu, self.alpha], point=point)
g = generate_samples(stats.gamma.rvs, alpha, scale=mu / alpha,
dist_shape=self.shape,
size=size)
g[g == 0] = np.finfo(float).eps # Just in case
return reshape_sampled(stats.poisson.rvs(g), size, self.shape)
def logp(self, value):
mu = self.mu
alpha = self.alpha
negbinom = bound(binomln(value + alpha - 1, value)
+ logpow(mu / (mu + alpha), value)
+ logpow(alpha / (mu + alpha), alpha),
value >= 0, mu > 0, alpha > 0)
# Return Poisson when alpha gets very large.
return tt.switch(tt.gt(alpha, 1e10),
Poisson.dist(self.mu).logp(value),
negbinom)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
mu = dist.mu
alpha = dist.alpha
return r'${} \sim \text{{NegativeBinomial}}(\mathit{{mu}}={}, \mathit{{alpha}}={})$'.format(name,
get_variable_name(mu),
get_variable_name(alpha))
class Geometric(Discrete):
R"""
Geometric log-likelihood.
The probability that the first success in a sequence of Bernoulli
trials occurs on the x'th trial.
.. math:: f(x \mid p) = p(1-p)^{x-1}
======== =============================
Support :math:`x \in \mathbb{N}_{>0}`
Mean :math:`\dfrac{1}{p}`
Variance :math:`\dfrac{1 - p}{p^2}`
======== =============================
Parameters
----------
p : float
Probability of success on an individual trial (0 < p <= 1).
"""
def __init__(self, p, *args, **kwargs):
super(Geometric, self).__init__(*args, **kwargs)
self.p = p = tt.as_tensor_variable(p)
self.mode = 1
def random(self, point=None, size=None, repeat=None):
p = draw_values([self.p], point=point)[0]
return generate_samples(np.random.geometric, p,
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
return bound(tt.log(p) + logpow(1 - p, value - 1),
0 <= p, p <= 1, value >= 1)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
p = dist.p
return r'${} \sim \text{{Geometric}}(\mathit{{p}}={})$'.format(name,
get_variable_name(p))
class DiscreteUniform(Discrete):
R"""
Discrete uniform distribution.
.. math:: f(x \mid lower, upper) = \frac{1}{upper-lower}
======== ===============================================
Support :math:`x \in {lower, lower + 1, \ldots, upper}`
Mean :math:`\dfrac{lower + upper}{2}`
Variance :math:`\dfrac{(upper - lower)^2}{12}`
======== ===============================================
Parameters
----------
lower : int
Lower limit.
upper : int
Upper limit (upper > lower).
"""
def __init__(self, lower, upper, *args, **kwargs):
super(DiscreteUniform, self).__init__(*args, **kwargs)
self.lower = tt.floor(lower).astype('int32')
self.upper = tt.floor(upper).astype('int32')
self.mode = tt.maximum(
tt.floor((upper + lower) / 2.).astype('int32'), self.lower)
def _random(self, lower, upper, size=None):
# This way seems to be the only to deal with lower and upper
# as array-like.
samples = stats.uniform.rvs(lower, upper - lower - np.finfo(float).eps,
size=size)
return np.floor(samples).astype('int32')
def random(self, point=None, size=None, repeat=None):
lower, upper = draw_values([self.lower, self.upper], point=point)
return generate_samples(self._random,
lower, upper,
dist_shape=self.shape,
size=size)
def logp(self, value):
upper = self.upper
lower = self.lower
return bound(-tt.log(upper - lower + 1),
lower <= value, value <= upper)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
lower = dist.lower
upper = dist.upper
return r'${} \sim \text{{DiscreteUniform}}(\mathit{{lower}}={}, \mathit{{upper}}={})$'.format(name,
get_variable_name(lower),
get_variable_name(upper))
class Categorical(Discrete):
R"""
Categorical log-likelihood.
The most general discrete distribution.
.. math:: f(x \mid p) = p_x
======== ===================================
Support :math:`x \in \{0, 1, \ldots, |p|-1\}`
======== ===================================
Parameters
----------
p : array of floats
p > 0 and the elements of p must sum to 1. They will be automatically
rescaled otherwise.
"""
def __init__(self, p, *args, **kwargs):
super(Categorical, self).__init__(*args, **kwargs)
try:
self.k = tt.shape(p)[-1].tag.test_value
except AttributeError:
self.k = tt.shape(p)[-1]
self.p = p = tt.as_tensor_variable(p)
self.p = (p.T / tt.sum(p, -1)).T
self.mode = tt.argmax(p)
def random(self, point=None, size=None, repeat=None):
def random_choice(k, *args, **kwargs):
if len(kwargs['p'].shape) > 1:
return np.asarray(
[np.random.choice(k, p=p)
for p in kwargs['p']]
)
else:
return np.random.choice(k, *args, **kwargs)
p, k = draw_values([self.p, self.k], point=point)
return generate_samples(partial(random_choice, np.arange(k)),
p=p,
broadcast_shape=p.shape[:-1] or (1,),
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
k = self.k
# Clip values before using them for indexing
value_clip = tt.clip(value, 0, k - 1)
sumto1 = theano.gradient.zero_grad(
tt.le(abs(tt.sum(p, axis=-1) - 1), 1e-5))
if p.ndim > 1:
a = tt.log(p[tt.arange(p.shape[0]), value_clip])
else:
a = tt.log(p[value_clip])
return bound(a, value >= 0, value <= (k - 1), sumto1)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
p = dist.p
return r'${} \sim \text{{Categorical}}(\mathit{{p}}={})$'.format(name,
get_variable_name(p))
class Constant(Discrete):
"""
Constant log-likelihood.
Parameters
----------
value : float or int
Constant parameter.
"""
def __init__(self, c, *args, **kwargs):
warnings.warn("Constant has been deprecated. We recommend using a Determinstic object instead.",
DeprecationWarning)
super(Constant, self).__init__(*args, **kwargs)
self.mean = self.median = self.mode = self.c = c = tt.as_tensor_variable(c)
def random(self, point=None, size=None, repeat=None):
c = draw_values([self.c], point=point)[0]
dtype = np.array(c).dtype
def _random(c, dtype=dtype, size=None):
return np.full(size, fill_value=c, dtype=dtype)
return generate_samples(_random, c=c, dist_shape=self.shape,
size=size).astype(dtype)
def logp(self, value):
c = self.c
return bound(0, tt.eq(value, c))
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
return r'${} \sim \text{{Constant}}()$'.format(name)
ConstantDist = Constant
class ZeroInflatedPoisson(Discrete):
R"""
Zero-inflated Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
.. math::
f(x \mid \psi, \theta) = \left\{ \begin{array}{l}
(1-\psi) + \psi e^{-\theta}, \text{if } x = 0 \\
\psi \frac{e^{-\theta}\theta^x}{x!}, \text{if } x=1,2,3,\ldots
\end{array} \right.
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi\theta`
Variance :math:`\theta + \frac{1-\psi}{\psi}\theta^2`
======== ==========================
Parameters
----------
psi : float
Expected proportion of Poisson variates (0 < psi < 1)
theta : float
Expected number of occurrences during the given interval
(theta >= 0).
"""
def __init__(self, psi, theta, *args, **kwargs):
super(ZeroInflatedPoisson, self).__init__(*args, **kwargs)
self.theta = theta = tt.as_tensor_variable(theta)
self.psi = psi = tt.as_tensor_variable(psi)
self.pois = Poisson.dist(theta)
self.mode = self.pois.mode
def random(self, point=None, size=None, repeat=None):
theta, psi = draw_values([self.theta, self.psi], point=point)
g = generate_samples(stats.poisson.rvs, theta,
dist_shape=self.shape,
size=size)
sampled = g * (np.random.random(np.squeeze(g.shape)) < psi)
return reshape_sampled(sampled, size, self.shape)
def logp(self, value):
psi = self.psi
theta = self.theta
logp_val = tt.switch(
tt.gt(value, 0),
tt.log(psi) + self.pois.logp(value),
logaddexp(tt.log1p(-psi), tt.log(psi) - theta))
return bound(
logp_val,
0 <= value,
0 <= psi, psi <= 1,
0 <= theta)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
theta = dist.theta
psi = dist.psi
return r'${} \sim \text{{ZeroInflatedPoisson}}(\mathit{{theta}}={}, \mathit{{psi}}={})$'.format(name,
get_variable_name(theta),
get_variable_name(psi))
class ZeroInflatedBinomial(Discrete):
R"""
Zero-inflated Binomial log-likelihood.
.. math::
f(x \mid \psi, n, p) = \left\{ \begin{array}{l}
(1-\psi) + \psi (1-p)^{n}, \text{if } x = 0 \\
\psi {n \choose x} p^x (1-p)^{n-x}, \text{if } x=1,2,3,\ldots,n
\end{array} \right.
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`(1 - \psi) n p`
Variance :math:`(1-\psi) n p [1 - p(1 - \psi n)].`
======== ==========================
Parameters
----------
psi : float
Expected proportion of Binomial variates (0 < psi < 1)
n : int
Number of Bernoulli trials (n >= 0).
p : float
Probability of success in each trial (0 < p < 1).
"""
def __init__(self, psi, n, p, *args, **kwargs):
super(ZeroInflatedBinomial, self).__init__(*args, **kwargs)
self.n = n = tt.as_tensor_variable(n)
self.p = p = tt.as_tensor_variable(p)
self.psi = psi = tt.as_tensor_variable(psi)
self.bin = Binomial.dist(n, p)
self.mode = self.bin.mode
def random(self, point=None, size=None, repeat=None):
n, p, psi = draw_values([self.n, self.p, self.psi], point=point)
g = generate_samples(stats.binom.rvs, n, p,
dist_shape=self.shape,
size=size)
sampled = g * (np.random.random(np.squeeze(g.shape)) < psi)
return reshape_sampled(sampled, size, self.shape)
def logp(self, value):
psi = self.psi
p = self.p
n = self.n
logp_val = tt.switch(
tt.gt(value, 0),
tt.log(psi) + self.bin.logp(value),
logaddexp(tt.log1p(-psi), tt.log(psi) + n * tt.log1p(-p)))
return bound(
logp_val,
0 <= value, value <= n,
0 <= psi, psi <= 1,
0 <= p, p <= 1)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
n = dist.n
p = dist.p
psi = dist.psi
name_n = get_variable_name(n)
name_p = get_variable_name(p)
name_psi = get_variable_name(psi)
return (r'${} \sim \text{{ZeroInflatedBinomial}}'
r'(\mathit{{n}}={}, \mathit{{p}}={}, '
r'\mathit{{psi}}={})$'
.format(name, name_n, name_p, name_psi))
class ZeroInflatedNegativeBinomial(Discrete):
R"""
Zero-Inflated Negative binomial log-likelihood.
The Zero-inflated version of the Negative Binomial (NB).
The NB distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
.. math::
f(x \mid \psi, \mu, \alpha) = \left\{
\begin{array}{l}
(1-\psi) + \psi \left (
\frac{\alpha}{\alpha+\mu}
\right) ^\alpha, \text{if } x = 0 \\
\psi \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} \left (
\frac{\alpha}{\mu+\alpha}
\right)^\alpha \left(
\frac{\mu}{\mu+\alpha}
\right)^x, \text{if } x=1,2,3,\ldots
\end{array}
\right.
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi\mu`
Var :math:`\psi\mu + \left (1 + \frac{\mu}{\alpha} + \frac{1-\psi}{\mu} \right)`
======== ==========================
Parameters
----------
psi : float
Expected proportion of NegativeBinomial variates (0 < psi < 1)
mu : float
Poission distribution parameter (mu > 0).
alpha : float
Gamma distribution parameter (alpha > 0).
"""
def __init__(self, psi, mu, alpha, *args, **kwargs):
super(ZeroInflatedNegativeBinomial, self).__init__(*args, **kwargs)
self.mu = mu = tt.as_tensor_variable(mu)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.psi = psi = tt.as_tensor_variable(psi)
self.nb = NegativeBinomial.dist(mu, alpha)
self.mode = self.nb.mode
def random(self, point=None, size=None, repeat=None):
mu, alpha, psi = draw_values(
[self.mu, self.alpha, self.psi], point=point)
g = generate_samples(stats.gamma.rvs, alpha, scale=mu / alpha,
dist_shape=self.shape,
size=size)
g[g == 0] = np.finfo(float).eps # Just in case
sampled = stats.poisson.rvs(g) * (np.random.random(np.squeeze(g.shape)) < psi)
return reshape_sampled(sampled, size, self.shape)
def logp(self, value):
alpha = self.alpha
mu = self.mu
psi = self.psi
logp_other = tt.log(psi) + self.nb.logp(value)
logp_0 = logaddexp(
tt.log1p(-psi),
tt.log(psi) + alpha * (tt.log(alpha) - tt.log(alpha + mu)))
logp_val = tt.switch(
tt.gt(value, 0),
logp_other,
logp_0)
return bound(
logp_val,
0 <= value,
0 <= psi, psi <= 1,
mu > 0, alpha > 0)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
mu = dist.mu
alpha = dist.alpha
psi = dist.psi
name_mu = get_variable_name(mu)
name_alpha = get_variable_name(alpha)
name_psi = get_variable_name(psi)
return (r'${} \sim \text{{ZeroInflatedNegativeBinomial}}'
r'(\mathit{{mu}}={}, \mathit{{alpha}}={}, '
r'\mathit{{psi}}={})$'
.format(name, name_mu, name_alpha, name_psi))
| {
"repo_name": "springcoil/pymc3",
"path": "pymc3/distributions/discrete.py",
"copies": "1",
"size": "27864",
"license": "apache-2.0",
"hash": -275463274920102980,
"line_mean": 32.8155339806,
"line_max": 109,
"alpha_frac": 0.4938271605,
"autogenerated": false,
"ratio": 3.63096168881939,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.462478884931939,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import z5py
import nifty.graph.rag as nrag
from cluster_tools.utils.segmentation_utils import mutex_watershed_with_seeds, mutex_watershed
# from cluster_tools.utils.segmentation_utils import compute_grid_graph
from two_pass_agglomeration import two_pass_agglomeration
from cremi_tools.viewer.volumina import view
# TODO this should also take strides and randomize_strides
def compute_state(affs, seg, offsets, n_attractive):
# with affogato TODO debug this
# FIXME the uv ids don't make sense!
# grid_graph = compute_grid_graph(segmentation.shape)
# uvs, weights, attractive = grid_graph.compute_state_for_segmentation(affs, segmentation, offsets,
# n_attractive_channels=3,
# ignore_label=False)
# weights[np.logical_not(attractive)] *= -1
# state = (uvs, weights)
# with nifty
rag = nrag.gridRag(seg, numberOfLabels=int(seg.max() + 1),
numberOfThreads=1)
uv_ids = rag.uvIds()
affs_attractive = affs[:n_attractive]
# -2 corresponds to max value
weights_attractive = nrag.accumulateAffinityStandartFeatures(rag, affs_attractive, offsets,
numberOfThreads=1)[:, -2]
affs_repulsive = np.require(affs[n_attractive:], requirements='C')
weights_repulsive = nrag.accumulateAffinityStandartFeatures(rag, affs_repulsive, offsets,
numberOfThreads=1)[:, -2]
weights = weights_attractive
repulsive = weights_repulsive > weights_attractive
weights[repulsive] = -1*weights_repulsive[repulsive]
return uv_ids, weights
def mws_agglomerator(affs, offsets, previous_segmentation=None,
previous_edges=None, previous_weights=None, return_state=False,
strides=None, randomize_strides=True):
if previous_segmentation is not None:
assert previous_edges is not None
assert previous_weights is not None
assert len(previous_edges) == len(previous_weights), "%i, %i" % (len(previous_edges),
len(previous_weights))
# transform the seed state to what is expected by mutex_watershed_with_seeds
repulsive = previous_weights < 0
attractive = np.logical_not(repulsive)
seed_state = {'attractive': (previous_edges[attractive], previous_weights[attractive]),
'repulsive': (previous_edges[repulsive], np.abs(previous_weights[repulsive]))}
segmentation = mutex_watershed_with_seeds(affs, offsets, seeds=previous_segmentation,
strides=strides, randomize_strides=randomize_strides,
seed_state=seed_state)
else:
segmentation = mutex_watershed(affs, offsets, strides,
randomize_strides=randomize_strides)
if return_state:
state = compute_state(affs, segmentation, offsets, 3)
return segmentation, state
return segmentation
def test_tp():
path = '/home/pape/Work/data/cluster_tools_test_data/test_data.n5'
aff_key = '/volumes/full_affinities'
f = z5py.File(path)
ds_affs = f[aff_key]
ds_affs.n_threads = 8
affs = ds_affs[:]
# affs = affs[:, :10, :256]
# affs = affs[:, :20, :256]
print(affs.shape)
offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1],
[-1, -1, -1], [-1, 1, 1], [-1, -1, 1], [-1, 1, -1],
[0, -9, 0], [0, 0, -9],
[0, -9, -9], [0, 9, -9], [0, -9, -4], [0, -4, -9], [0, 4, -9], [0, 9, -4],
[0, -27, 0], [0, 0, -27]]
block_shape = [10, 256, 256]
halo = [2, 32, 32]
print("Start agglomeration")
agglomerator = partial(mws_agglomerator, strides=[2, 10, 10], randomize_strides=True)
seg = two_pass_agglomeration(affs, offsets, agglomerator, block_shape, halo, 4)
print(seg.shape)
view([affs[1], seg])
if __name__ == '__main__':
test_tp()
| {
"repo_name": "DerThorsten/nifty",
"path": "test_two_pass.py",
"copies": "1",
"size": "4253",
"license": "mit",
"hash": 4510196145845485000,
"line_mean": 39.8942307692,
"line_max": 103,
"alpha_frac": 0.5767693393,
"autogenerated": false,
"ratio": 3.5265339966832503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9593538633939137,
"avg_score": 0.0019529404088226912,
"num_lines": 104
} |
from functools import partial
import numpy as np
def _obj_wrapper(func, args, kwargs, x):
return func(x, *args, **kwargs)
def _is_feasible_wrapper(func, x):
return np.all(func(x)>=0)
def _cons_none_wrapper(x):
return np.array([0])
def _cons_ieqcons_wrapper(ieqcons, args, kwargs, x):
return np.array([y(x, *args, **kwargs) for y in ieqcons])
def _cons_f_ieqcons_wrapper(f_ieqcons, args, kwargs, x):
return np.array(f_ieqcons(x, *args, **kwargs))
def pso(func, lb, ub, ieqcons=[], f_ieqcons=None, args=(), kwargs={},
swarmsize=100, omega=0.5, phip=0.5, phig=0.5, maxiter=100,
minstep=1e-8, minfunc=1e-8, debug=False, processes=1,
particle_output=False):
"""
Perform a particle swarm optimization (PSO)
Parameters
==========
func : function
The function to be minimized
lb : array
The lower bounds of the design variable(s)
ub : array
The upper bounds of the design variable(s)
Optional
========
ieqcons : list
A list of functions of length n such that ieqcons[j](x,*args) >= 0.0 in
a successfully optimized problem (Default: [])
f_ieqcons : function
Returns a 1-D array in which each element must be greater or equal
to 0.0 in a successfully optimized problem. If f_ieqcons is specified,
ieqcons is ignored (Default: None)
args : tuple
Additional arguments passed to objective and constraint functions
(Default: empty tuple)
kwargs : dict
Additional keyword arguments passed to objective and constraint
functions (Default: empty dict)
swarmsize : int
The number of particles in the swarm (Default: 100)
omega : scalar
Particle velocity scaling factor (Default: 0.5)
phip : scalar
Scaling factor to search away from the particle's best known position
(Default: 0.5)
phig : scalar
Scaling factor to search away from the swarm's best known position
(Default: 0.5)
maxiter : int
The maximum number of iterations for the swarm to search (Default: 100)
minstep : scalar
The minimum stepsize of swarm's best position before the search
terminates (Default: 1e-8)
minfunc : scalar
The minimum change of swarm's best objective value before the search
terminates (Default: 1e-8)
debug : boolean
If True, progress statements will be displayed every iteration
(Default: False)
processes : int
The number of processes to use to evaluate objective function and
constraints (default: 1)
particle_output : boolean
Whether to include the best per-particle position and the objective
values at those.
Returns
=======
g : array
The swarm's best known position (optimal design)
f : scalar
The objective value at ``g``
p : array
The best known position per particle
pf: arrray
The objective values at each position in p
"""
assert len(lb)==len(ub), 'Lower- and upper-bounds must be the same length'
assert hasattr(func, '__call__'), 'Invalid function handle'
lb = np.array(lb)
ub = np.array(ub)
assert np.all(ub>lb), 'All upper-bound values must be greater than lower-bound values'
vhigh = np.abs(ub - lb)
vlow = -vhigh
# Initialize objective function
obj = partial(_obj_wrapper, func, args, kwargs)
# Check for constraint function(s) #########################################
if f_ieqcons is None:
if not len(ieqcons):
if debug:
print('No constraints given.')
cons = _cons_none_wrapper
else:
if debug:
print('Converting ieqcons to a single constraint function')
cons = partial(_cons_ieqcons_wrapper, ieqcons, args, kwargs)
else:
if debug:
print('Single constraint function given in f_ieqcons')
cons = partial(_cons_f_ieqcons_wrapper, f_ieqcons, args, kwargs)
is_feasible = partial(_is_feasible_wrapper, cons)
# Initialize the multiprocessing module if necessary
if processes > 1:
import multiprocessing
mp_pool = multiprocessing.Pool(processes)
# Initialize the particle swarm ############################################
S = swarmsize
D = len(lb) # the number of dimensions each particle has
x = np.random.rand(S, D) # particle positions
v = np.zeros_like(x) # particle velocities
p = np.zeros_like(x) # best particle positions
fx = np.zeros(S) # current particle function values
fs = np.zeros(S, dtype=bool) # feasibility of each particle
fp = np.ones(S)*np.inf # best particle function values
g = [] # best swarm position
fg = np.inf # best swarm position starting value
# Initialize the particle's position
x = lb + x*(ub - lb)
# Calculate objective and constraints for each particle
if processes > 1:
fx = np.array(mp_pool.map(obj, x))
fs = np.array(mp_pool.map(is_feasible, x))
else:
for i in range(S):
fx[i] = obj(x[i, :])
fs[i] = is_feasible(x[i, :])
# Store particle's best position (if constraints are satisfied)
i_update = np.logical_and((fx < fp), fs)
p[i_update, :] = x[i_update, :].copy()
fp[i_update] = fx[i_update]
# Update swarm's best position
i_min = np.argmin(fp)
if fp[i_min] < fg:
fg = fp[i_min]
g = p[i_min, :].copy()
else:
# At the start, there may not be any feasible starting point, so just
# give it a temporary "best" point since it's likely to change
g = x[0, :].copy()
# Initialize the particle's velocity
v = vlow + np.random.rand(S, D)*(vhigh - vlow)
# Iterate until termination criterion met ##################################
it = 1
while it <= maxiter:
rp = np.random.uniform(size=(S, D))
rg = np.random.uniform(size=(S, D))
# Update the particles velocities
v = omega*v + phip*rp*(p - x) + phig*rg*(g - x)
# Update the particles' positions
x = x + v
# Correct for bound violations
maskl = x < lb
masku = x > ub
x = x*(~np.logical_or(maskl, masku)) + lb*maskl + ub*masku
# Update objectives and constraints
if processes > 1:
fx = np.array(mp_pool.map(obj, x))
fs = np.array(mp_pool.map(is_feasible, x))
else:
for i in range(S):
fx[i] = obj(x[i, :])
fs[i] = is_feasible(x[i, :])
# Store particle's best position (if constraints are satisfied)
i_update = np.logical_and((fx < fp), fs)
p[i_update, :] = x[i_update, :]. copy()
fp[i_update] = fx[i_update]
# Compare swarm's best position with global best position
i_min = np.argmin(fp)
if fp[i_min] < fg:
if debug:
print('New best for swarm at iteration {:}: {:} {:}'\
.format(it, p[i_min, :], fp[i_min]))
p_min = p[i_min, :].copy()
stepsize = np.sqrt(np.sum((g - p_min)**2))
if np.abs(fg - fp[i_min]) <= minfunc:
print('Stopping search: Swarm best objective change less than {:}'\
.format(minfunc))
if particle_output:
return p_min, fp[i_min], p, fp
else:
return p_min, fp[i_min]
elif stepsize <= minstep:
print('Stopping search: Swarm best position change less than {:}'\
.format(minstep))
if particle_output:
return p_min, fp[i_min], p, fp
else:
return p_min, fp[i_min]
else:
g = p_min.copy()
fg = fp[i_min]
if debug:
print('Best after iteration {:}: {:} {:}'.format(it, g, fg))
it += 1
print('Stopping search: maximum iterations reached --> {:}'.format(maxiter))
if not is_feasible(g):
print("However, the optimization couldn't find a feasible design. Sorry")
if particle_output:
return g, fg, p, fp
else:
return g, fg | {
"repo_name": "sujithvm/skynet",
"path": "code/pso.py",
"copies": "1",
"size": "8374",
"license": "mit",
"hash": -9109218231293582000,
"line_mean": 34.7905982906,
"line_max": 90,
"alpha_frac": 0.5695008359,
"autogenerated": false,
"ratio": 3.790855590765052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48603564266650523,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from optimize_utils import *
def initial_population(domain, size):
if domain == "binary":
return np.random.randint(2, size=size)
elif domain == "gaussian":
return np.random.randn(*size)
else:
raise ValueError("Unknown domain: %s" % domain)
def child_generation(p1, p2):
selection_matrix = np.random.randint(2, size=p1.shape)
return p1 * selection_matrix + p2 * (1 - selection_matrix)
def binary_mutation(population, mutation_rate):
mutation_matrix = np.random.uniform(size=population.shape)
mutation_matrix = (mutation_matrix < mutation_rate).astype(np.int)
population ^= mutation_matrix
return population
def gaussian_mutation(population, mutation_rate):
return population + mutation_rate * np.random.randn(*population.shape)
MUTATIONS = dict(binary=binary_mutation,
gaussian=gaussian_mutation)
def mutation_function(domain, mutation_rate):
return partial(MUTATIONS[domain], mutation_rate=mutation_rate)
def _minimize_genetic(fun,
x0, # only used to get number of features
args=(),
callback=None,
cutoff_percentile=0.2,
decrease_percentile=True,
n_iter=100,
domain="binary",
mutation_rate=0.05,
batch_size=100,
maxiter=None,
keep_best=True):
"""
genetic algorithm
TODO
- have convergence criteria
"""
if maxiter is not None:
n_iter = int(maxiter / batch_size)
best = None
best_score = float("inf")
population = None
mutate = mutation_function(domain, mutation_rate)
for percentile in np.linspace(cutoff_percentile, 0, n_iter):
if population is None:
population = initial_population(domain, (batch_size, len(x0)))
else:
# create child generation
p1 = parents[np.random.choice(len(parents), batch_size, True)]
p2 = parents[np.random.choice(len(parents), batch_size, True)]
population = child_generation(p1, p2)
# perform mutation
population = mutate(population)
# score population
scores = score_multi(fun, population, args, callback)
if not decrease_percentile:
percentile = cutoff_percentile
# find parent generation
cutoff = np.percentile(scores, percentile * 100)
idx = scores <= cutoff
parents = population[idx]
# store best
if keep_best:
for trial, score in zip(population, scores):
if score <= best_score:
best_score = score
best = trial
if keep_best:
x = best
fval = best_score
else:
x = parents[0]
fval = scores[idx[0]]
nfev = n_iter * batch_size
return to_result(x=x, fun=fval, niter=n_iter, nfev=nfev)
| {
"repo_name": "diogo149/simbo",
"path": "optimize/genetic.py",
"copies": "1",
"size": "3059",
"license": "mit",
"hash": 3406762692997374000,
"line_mean": 30.5360824742,
"line_max": 74,
"alpha_frac": 0.5845047401,
"autogenerated": false,
"ratio": 4.196159122085048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5280663862185048,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from config import *
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
position_enc = np.array([[
pos / np.power(10000, 2 * (j // 2) / d_pos_vec)
for j in range(d_pos_vec)
] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc.astype("float32")
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
pre_softmax_shape=None,
post_softmax_shape=None,
cache=None):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
bias_attr=False,
num_flatten_dims=2)
k = layers.fc(input=keys,
size=d_key * n_head,
bias_attr=False,
num_flatten_dims=2)
v = layers.fc(input=values,
size=d_value * n_head,
bias_attr=False,
num_flatten_dims=2)
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
if n_head == 1:
return x
hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped = layers.reshape(
x=x, shape=[0, 0, n_head, hidden_size // n_head])
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]]))
def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate):
"""
Scaled Dot-Product Attention
"""
scaled_q = layers.scale(x=q, scale=d_model**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
weights = layers.reshape(
x=layers.elementwise_add(
x=product, y=attn_bias) if attn_bias else product,
shape=[-1, product.shape[-1]],
actual_shape=pre_softmax_shape,
act="softmax")
weights = layers.reshape(
x=weights, shape=product.shape, actual_shape=post_softmax_shape)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
if cache is not None: # use cache and concat time steps
k = cache["k"] = layers.concat([cache["k"], k], axis=1)
v = cache["v"] = layers.concat([cache["v"], v], axis=1)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_model,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu")
out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def prepare_encoder(src_word,
src_pos,
src_vocab_size,
src_emb_dim,
src_max_len,
dropout_rate=0.,
src_data_shape=None,
word_emb_param_name=None,
pos_enc_param_name=None):
"""Add word embeddings and position encodings.
The output tensor has a shape of:
[batch_size, max_src_length_in_batch, d_model].
This module is used at the bottom of the encoder stacks.
"""
src_word_emb = layers.embedding(
src_word,
size=[src_vocab_size, src_emb_dim],
param_attr=fluid.ParamAttr(
name=word_emb_param_name,
initializer=fluid.initializer.Normal(0., src_emb_dim**-0.5)))
src_word_emb = layers.scale(x=src_word_emb, scale=src_emb_dim**0.5)
src_pos_enc = layers.embedding(
src_pos,
size=[src_max_len, src_emb_dim],
param_attr=fluid.ParamAttr(
name=pos_enc_param_name, trainable=False))
enc_input = src_word_emb + src_pos_enc
enc_input = layers.reshape(
x=enc_input,
shape=[batch_size, seq_len, src_emb_dim],
actual_shape=src_data_shape)
return layers.dropout(
enc_input,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False) if dropout_rate else enc_input
prepare_encoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[0])
prepare_decoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[1])
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.,
pre_softmax_shape=None,
post_softmax_shape=None):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
enc_input, enc_input, enc_input, attn_bias, d_key, d_value, d_model,
n_head, dropout_rate, pre_softmax_shape, post_softmax_shape)
attn_output = post_process_layer(enc_input, attn_output, "dan",
dropout_rate)
ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model)
return post_process_layer(attn_output, ffd_output, "dan", dropout_rate)
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.,
pre_softmax_shape=None,
post_softmax_shape=None):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
for i in range(n_layer):
enc_output = encoder_layer(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
pre_softmax_shape,
post_softmax_shape, )
enc_input = enc_output
return enc_output
def decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.,
slf_attn_pre_softmax_shape=None,
slf_attn_post_softmax_shape=None,
src_attn_pre_softmax_shape=None,
src_attn_post_softmax_shape=None,
cache=None):
""" The layer to be stacked in decoder part.
The structure of this module is similar to that in the encoder part except
a multi-head attention is added to implement encoder-decoder attention.
"""
slf_attn_output = multi_head_attention(
dec_input,
dec_input,
dec_input,
slf_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate,
slf_attn_pre_softmax_shape,
slf_attn_post_softmax_shape,
cache, )
slf_attn_output = post_process_layer(
dec_input,
slf_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
enc_attn_output = multi_head_attention(
slf_attn_output,
enc_output,
enc_output,
dec_enc_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate,
src_attn_pre_softmax_shape,
src_attn_post_softmax_shape, )
enc_attn_output = post_process_layer(
slf_attn_output,
enc_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
ffd_output = positionwise_feed_forward(
enc_attn_output,
d_inner_hid,
d_model, )
dec_output = post_process_layer(
enc_attn_output,
ffd_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
return dec_output
def decoder(dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.,
slf_attn_pre_softmax_shape=None,
slf_attn_post_softmax_shape=None,
src_attn_pre_softmax_shape=None,
src_attn_post_softmax_shape=None,
caches=None):
"""
The decoder is composed of a stack of identical decoder_layer layers.
"""
for i in range(n_layer):
dec_output = decoder_layer(
dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
slf_attn_pre_softmax_shape,
slf_attn_post_softmax_shape,
src_attn_pre_softmax_shape,
src_attn_post_softmax_shape,
None if caches is None else caches[i], )
dec_input = dec_output
return dec_output
def make_all_inputs(input_fields):
"""
Define the input data layers for the transformer model.
"""
inputs = []
for input_field in input_fields:
input_var = layers.data(
name=input_field,
shape=input_descs[input_field][0],
dtype=input_descs[input_field][1],
lod_level=input_descs[input_field][2]
if len(input_descs[input_field]) == 3 else 0,
append_batch_size=False)
inputs.append(input_var)
return inputs
def transformer(
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
label_smooth_eps, ):
if weight_sharing:
assert src_vocab_size == src_vocab_size, (
"Vocabularies in source and target should be same for weight sharing."
)
enc_inputs = make_all_inputs(encoder_data_input_fields +
encoder_util_input_fields)
enc_output = wrap_encoder(
src_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
enc_inputs, )
dec_inputs = make_all_inputs(decoder_data_input_fields[:-1] +
decoder_util_input_fields)
predict = wrap_decoder(
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
dec_inputs,
enc_output, )
# Padding index do not contribute to the total loss. The weights is used to
# cancel padding index in calculating the loss.
label, weights = make_all_inputs(label_data_input_fields)
if label_smooth_eps:
label = layers.label_smooth(
label=layers.one_hot(
input=label, depth=trg_vocab_size),
epsilon=label_smooth_eps)
cost = layers.softmax_with_cross_entropy(
logits=predict,
label=label,
soft_label=True if label_smooth_eps else False)
weighted_cost = cost * weights
sum_cost = layers.reduce_sum(weighted_cost)
token_num = layers.reduce_sum(weights)
avg_cost = sum_cost / token_num
avg_cost.stop_gradient = True
return sum_cost, avg_cost, predict, token_num
def wrap_encoder(src_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
enc_inputs=None):
"""
The wrapper assembles together all needed layers for the encoder.
"""
if enc_inputs is None:
# This is used to implement independent encoder program in inference.
src_word, src_pos, src_slf_attn_bias, src_data_shape, \
slf_attn_pre_softmax_shape, slf_attn_post_softmax_shape = \
make_all_inputs(encoder_data_input_fields +
encoder_util_input_fields)
else:
src_word, src_pos, src_slf_attn_bias, src_data_shape, \
slf_attn_pre_softmax_shape, slf_attn_post_softmax_shape = \
enc_inputs
enc_input = prepare_encoder(
src_word,
src_pos,
src_vocab_size,
d_model,
max_length,
dropout_rate,
src_data_shape,
word_emb_param_name=word_emb_param_names[0])
enc_output = encoder(
enc_input,
src_slf_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
slf_attn_pre_softmax_shape,
slf_attn_post_softmax_shape, )
return enc_output
def wrap_decoder(trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
dec_inputs=None,
enc_output=None,
caches=None):
"""
The wrapper assembles together all needed layers for the decoder.
"""
if dec_inputs is None:
# This is used to implement independent decoder program in inference.
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, \
enc_output, trg_data_shape, slf_attn_pre_softmax_shape, \
slf_attn_post_softmax_shape, src_attn_pre_softmax_shape, \
src_attn_post_softmax_shape = make_all_inputs(
decoder_data_input_fields + decoder_util_input_fields)
else:
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, \
trg_data_shape, slf_attn_pre_softmax_shape, \
slf_attn_post_softmax_shape, src_attn_pre_softmax_shape, \
src_attn_post_softmax_shape = dec_inputs
dec_input = prepare_decoder(
trg_word,
trg_pos,
trg_vocab_size,
d_model,
max_length,
dropout_rate,
trg_data_shape,
word_emb_param_name=word_emb_param_names[0]
if weight_sharing else word_emb_param_names[1])
dec_output = decoder(
dec_input,
enc_output,
trg_slf_attn_bias,
trg_src_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
slf_attn_pre_softmax_shape,
slf_attn_post_softmax_shape,
src_attn_pre_softmax_shape,
src_attn_post_softmax_shape,
caches, )
# Return logits for training and probs for inference.
if weight_sharing:
predict = layers.reshape(
x=layers.matmul(
x=dec_output,
y=fluid.get_var(word_emb_param_names[0]),
transpose_y=True),
shape=[-1, trg_vocab_size],
act="softmax" if dec_inputs is None else None)
else:
predict = layers.reshape(
x=layers.fc(input=dec_output,
size=trg_vocab_size,
bias_attr=False,
num_flatten_dims=2),
shape=[-1, trg_vocab_size],
act="softmax" if dec_inputs is None else None)
return predict
def fast_decode(
src_vocab_size,
trg_vocab_size,
max_in_len,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
beam_size,
max_out_len,
eos_idx, ):
"""
Use beam search to decode. Caches will be used to store states of history
steps which can make the decoding faster.
"""
enc_output = wrap_encoder(src_vocab_size, max_in_len, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid,
dropout_rate, weight_sharing)
start_tokens, init_scores, trg_src_attn_bias, trg_data_shape, \
slf_attn_pre_softmax_shape, slf_attn_post_softmax_shape, \
src_attn_pre_softmax_shape, src_attn_post_softmax_shape, \
attn_pre_softmax_shape_delta, attn_post_softmax_shape_delta = \
make_all_inputs(fast_decoder_data_input_fields +
fast_decoder_util_input_fields)
def beam_search():
max_len = layers.fill_constant(
shape=[1], dtype=start_tokens.dtype, value=max_out_len)
step_idx = layers.fill_constant(
shape=[1], dtype=start_tokens.dtype, value=0)
cond = layers.less_than(x=step_idx, y=max_len)
while_op = layers.While(cond)
# array states will be stored for each step.
ids = layers.array_write(start_tokens, step_idx)
scores = layers.array_write(init_scores, step_idx)
# cell states will be overwrited at each step.
# caches contains states of history steps to reduce redundant
# computation in decoder.
caches = [{
"k": layers.fill_constant_batch_size_like(
input=start_tokens,
shape=[-1, 0, d_model],
dtype=enc_output.dtype,
value=0),
"v": layers.fill_constant_batch_size_like(
input=start_tokens,
shape=[-1, 0, d_model],
dtype=enc_output.dtype,
value=0)
} for i in range(n_layer)]
with while_op.block():
pre_ids = layers.array_read(array=ids, i=step_idx)
pre_scores = layers.array_read(array=scores, i=step_idx)
# sequence_expand can gather sequences according to lod thus can be
# used in beam search to sift states corresponding to selected ids.
pre_src_attn_bias = layers.sequence_expand(
x=trg_src_attn_bias, y=pre_scores)
pre_enc_output = layers.sequence_expand(x=enc_output, y=pre_scores)
pre_caches = [{
"k": layers.sequence_expand(
x=cache["k"], y=pre_scores),
"v": layers.sequence_expand(
x=cache["v"], y=pre_scores),
} for cache in caches]
pre_pos = layers.elementwise_mul(
x=layers.fill_constant_batch_size_like(
input=pre_enc_output, # cann't use pre_ids here since it has lod
value=1,
shape=[-1, 1],
dtype=pre_ids.dtype),
y=layers.increment(
x=step_idx, value=1.0, in_place=False),
axis=0)
logits = wrap_decoder(
trg_vocab_size,
max_in_len,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
weight_sharing,
dec_inputs=(
pre_ids, pre_pos, None, pre_src_attn_bias, trg_data_shape,
slf_attn_pre_softmax_shape, slf_attn_post_softmax_shape,
src_attn_pre_softmax_shape, src_attn_post_softmax_shape),
enc_output=pre_enc_output,
caches=pre_caches)
topk_scores, topk_indices = layers.topk(
input=layers.softmax(logits), k=beam_size)
accu_scores = layers.elementwise_add(
x=layers.log(topk_scores),
y=layers.reshape(
pre_scores, shape=[-1]),
axis=0)
# beam_search op uses lod to distinguish branches.
topk_indices = layers.lod_reset(topk_indices, pre_ids)
selected_ids, selected_scores = layers.beam_search(
pre_ids=pre_ids,
pre_scores=pre_scores,
ids=topk_indices,
scores=accu_scores,
beam_size=beam_size,
end_id=eos_idx)
layers.increment(x=step_idx, value=1.0, in_place=True)
# update states
layers.array_write(selected_ids, i=step_idx, array=ids)
layers.array_write(selected_scores, i=step_idx, array=scores)
layers.assign(pre_src_attn_bias, trg_src_attn_bias)
layers.assign(pre_enc_output, enc_output)
for i in range(n_layer):
layers.assign(pre_caches[i]["k"], caches[i]["k"])
layers.assign(pre_caches[i]["v"], caches[i]["v"])
layers.assign(
layers.elementwise_add(
x=slf_attn_pre_softmax_shape,
y=attn_pre_softmax_shape_delta),
slf_attn_pre_softmax_shape)
layers.assign(
layers.elementwise_add(
x=slf_attn_post_softmax_shape,
y=attn_post_softmax_shape_delta),
slf_attn_post_softmax_shape)
length_cond = layers.less_than(x=step_idx, y=max_len)
finish_cond = layers.logical_not(layers.is_empty(x=selected_ids))
layers.logical_and(x=length_cond, y=finish_cond, out=cond)
finished_ids, finished_scores = layers.beam_search_decode(
ids, scores, beam_size=beam_size, end_id=eos_idx)
return finished_ids, finished_scores
finished_ids, finished_scores = beam_search()
return finished_ids, finished_scores
| {
"repo_name": "lcy-seso/models",
"path": "fluid/neural_machine_translation/transformer/model.py",
"copies": "1",
"size": "26190",
"license": "apache-2.0",
"hash": 662389911366689800,
"line_mean": 34.2016129032,
"line_max": 85,
"alpha_frac": 0.5373043146,
"autogenerated": false,
"ratio": 3.7366243401341133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4773928654734113,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from config import *
def wrap_layer_with_block(layer, block_idx):
"""
Make layer define support indicating block, by which we can add layers
to other blocks within current block. This will make it easy to define
cache among while loop.
"""
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to switch to the given block in a program by
using the Python `with` keyword.
"""
def __init__(self, block_idx=None, main_program=None):
self.main_program = fluid.default_main_program(
) if main_program is None else main_program
self.old_block_idx = self.main_program.current_block().idx
self.new_block_idx = block_idx
def __enter__(self):
self.main_program.current_block_idx = self.new_block_idx
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.current_block_idx = self.old_block_idx
if exc_type is not None:
return False # re-raise exception
return True
def layer_wrapper(*args, **kwargs):
with BlockGuard(block_idx):
return layer(*args, **kwargs)
return layer_wrapper
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
channels = d_pos_vec
position = np.arange(n_position)
num_timescales = channels // 2
log_timescale_increment = (np.log(float(1e4) / float(1)) /
(num_timescales - 1))
inv_timescales = np.exp(np.arange(
num_timescales)) * -log_timescale_increment
scaled_time = np.expand_dims(position, 1) * np.expand_dims(inv_timescales,
0)
signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1)
signal = np.pad(signal, [[0, 0], [0, np.mod(channels, 2)]], 'constant')
position_enc = signal
return position_enc.astype("float32")
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
gather_idx=None,
static_kv=False):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
bias_attr=False,
num_flatten_dims=2)
# For encoder-decoder attention in inference, insert the ops and vars
# into global block to use as cache among beam search.
fc_layer = wrap_layer_with_block(
layers.fc, fluid.default_main_program().current_block()
.parent_idx) if cache is not None and static_kv else layers.fc
k = fc_layer(
input=keys,
size=d_key * n_head,
bias_attr=False,
num_flatten_dims=2)
v = fc_layer(
input=values,
size=d_value * n_head,
bias_attr=False,
num_flatten_dims=2)
return q, k, v
def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Reshape input tensors at the last dimension to split multi-heads
and then transpose. Specifically, transform the input tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] to the output tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped_q = layers.reshape(
x=queries, shape=[0, 0, n_head, d_key], inplace=True)
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
# For encoder-decoder attention in inference, insert the ops and vars
# into global block to use as cache among beam search.
reshape_layer = wrap_layer_with_block(
layers.reshape,
fluid.default_main_program().current_block()
.parent_idx) if cache is not None and static_kv else layers.reshape
transpose_layer = wrap_layer_with_block(
layers.transpose,
fluid.default_main_program().current_block().
parent_idx) if cache is not None and static_kv else layers.transpose
reshaped_k = reshape_layer(
x=keys, shape=[0, 0, n_head, d_key], inplace=True)
k = transpose_layer(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = reshape_layer(
x=values, shape=[0, 0, n_head, d_value], inplace=True)
v = transpose_layer(x=reshaped_v, perm=[0, 2, 1, 3])
if cache is not None: # only for faster inference
if static_kv: # For encoder-decoder attention in inference
cache_k, cache_v = cache["static_k"], cache["static_v"]
# To init the static_k and static_v in cache.
# Maybe we can use condition_op(if_else) to do these at the first
# step in while loop to replace these, however it might be less
# efficient.
static_cache_init = wrap_layer_with_block(
layers.assign,
fluid.default_main_program().current_block().parent_idx)
static_cache_init(k, cache_k)
static_cache_init(v, cache_v)
else: # For decoder self-attention in inference
cache_k, cache_v = cache["k"], cache["v"]
# gather cell states corresponding to selected parent
select_k = layers.gather(cache_k, index=gather_idx)
select_v = layers.gather(cache_v, index=gather_idx)
if not static_kv:
# For self attention in inference, use cache and concat time steps.
select_k = layers.concat([select_k, k], axis=2)
select_v = layers.concat([select_v, v], axis=2)
# update cell states(caches) cached in global block
layers.assign(select_k, cache_k)
layers.assign(select_v, cache_v)
return q, select_k, select_v
return q, k, v
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_key**-0.5)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_model,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid, dropout_rate):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu")
if dropout_rate:
hidden = layers.dropout(
hidden,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def prepare_encoder_decoder(src_word,
src_pos,
src_vocab_size,
src_emb_dim,
src_max_len,
dropout_rate=0.,
word_emb_param_name=None,
pos_enc_param_name=None):
"""Add word embeddings and position encodings.
The output tensor has a shape of:
[batch_size, max_src_length_in_batch, d_model].
This module is used at the bottom of the encoder stacks.
"""
src_word_emb = layers.embedding(
src_word,
size=[src_vocab_size, src_emb_dim],
padding_idx=ModelHyperParams.bos_idx, # set embedding of bos to 0
param_attr=fluid.ParamAttr(
name=word_emb_param_name,
initializer=fluid.initializer.Normal(0., src_emb_dim**-0.5)))
src_word_emb = layers.scale(x=src_word_emb, scale=src_emb_dim**0.5)
src_pos_enc = layers.embedding(
src_pos,
size=[src_max_len, src_emb_dim],
param_attr=fluid.ParamAttr(
name=pos_enc_param_name, trainable=False))
src_pos_enc.stop_gradient = True
enc_input = src_word_emb + src_pos_enc
return layers.dropout(
enc_input,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False) if dropout_rate else enc_input
prepare_encoder = partial(
prepare_encoder_decoder, pos_enc_param_name=pos_enc_param_names[0])
prepare_decoder = partial(
prepare_encoder_decoder, pos_enc_param_name=pos_enc_param_names[1])
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd="n",
postprocess_cmd="da"):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
pre_process_layer(enc_input, preprocess_cmd,
prepostprocess_dropout), None, None, attn_bias, d_key,
d_value, d_model, n_head, attention_dropout)
attn_output = post_process_layer(enc_input, attn_output, postprocess_cmd,
prepostprocess_dropout)
ffd_output = positionwise_feed_forward(
pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),
d_inner_hid, d_model, relu_dropout)
return post_process_layer(attn_output, ffd_output, postprocess_cmd,
prepostprocess_dropout)
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd="n",
postprocess_cmd="da"):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
for i in range(n_layer):
enc_output = encoder_layer(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd, )
enc_input = enc_output
enc_output = pre_process_layer(enc_output, preprocess_cmd,
prepostprocess_dropout)
return enc_output
def decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
cache=None,
gather_idx=None):
""" The layer to be stacked in decoder part.
The structure of this module is similar to that in the encoder part except
a multi-head attention is added to implement encoder-decoder attention.
"""
slf_attn_output = multi_head_attention(
pre_process_layer(dec_input, preprocess_cmd, prepostprocess_dropout),
None,
None,
slf_attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
cache=cache,
gather_idx=gather_idx)
slf_attn_output = post_process_layer(
dec_input,
slf_attn_output,
postprocess_cmd,
prepostprocess_dropout, )
enc_attn_output = multi_head_attention(
pre_process_layer(slf_attn_output, preprocess_cmd,
prepostprocess_dropout),
enc_output,
enc_output,
dec_enc_attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
cache=cache,
gather_idx=gather_idx,
static_kv=True)
enc_attn_output = post_process_layer(
slf_attn_output,
enc_attn_output,
postprocess_cmd,
prepostprocess_dropout, )
ffd_output = positionwise_feed_forward(
pre_process_layer(enc_attn_output, preprocess_cmd,
prepostprocess_dropout),
d_inner_hid,
d_model,
relu_dropout, )
dec_output = post_process_layer(
enc_attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout, )
return dec_output
def decoder(dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
caches=None,
gather_idx=None):
"""
The decoder is composed of a stack of identical decoder_layer layers.
"""
for i in range(n_layer):
dec_output = decoder_layer(
dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
cache=None if caches is None else caches[i],
gather_idx=gather_idx)
dec_input = dec_output
dec_output = pre_process_layer(dec_output, preprocess_cmd,
prepostprocess_dropout)
return dec_output
def make_all_inputs(input_fields):
"""
Define the input data layers for the transformer model.
"""
inputs = []
for input_field in input_fields:
input_var = layers.data(
name=input_field,
shape=input_descs[input_field][0],
dtype=input_descs[input_field][1],
lod_level=input_descs[input_field][2]
if len(input_descs[input_field]) == 3 else 0,
append_batch_size=False)
inputs.append(input_var)
return inputs
def make_all_py_reader_inputs(input_fields, is_test=False):
reader = layers.py_reader(
capacity=20,
name="test_reader" if is_test else "train_reader",
shapes=[input_descs[input_field][0] for input_field in input_fields],
dtypes=[input_descs[input_field][1] for input_field in input_fields],
lod_levels=[
input_descs[input_field][2]
if len(input_descs[input_field]) == 3 else 0
for input_field in input_fields
])
return layers.read_file(reader), reader
def transformer(src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
label_smooth_eps,
use_py_reader=False,
is_test=False):
if weight_sharing:
assert src_vocab_size == trg_vocab_size, (
"Vocabularies in source and target should be same for weight sharing."
)
data_input_names = encoder_data_input_fields + \
decoder_data_input_fields[:-1] + label_data_input_fields
if use_py_reader:
all_inputs, reader = make_all_py_reader_inputs(data_input_names,
is_test)
else:
all_inputs = make_all_inputs(data_input_names)
enc_inputs_len = len(encoder_data_input_fields)
dec_inputs_len = len(decoder_data_input_fields[:-1])
enc_inputs = all_inputs[0:enc_inputs_len]
dec_inputs = all_inputs[enc_inputs_len:enc_inputs_len + dec_inputs_len]
label = all_inputs[-2]
weights = all_inputs[-1]
enc_output = wrap_encoder(
src_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
enc_inputs, )
predict = wrap_decoder(
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
dec_inputs,
enc_output, )
# Padding index do not contribute to the total loss. The weights is used to
# cancel padding index in calculating the loss.
if label_smooth_eps:
label = layers.label_smooth(
label=layers.one_hot(
input=label, depth=trg_vocab_size),
epsilon=label_smooth_eps)
cost = layers.softmax_with_cross_entropy(
logits=predict,
label=label,
soft_label=True if label_smooth_eps else False)
weighted_cost = cost * weights
sum_cost = layers.reduce_sum(weighted_cost)
token_num = layers.reduce_sum(weights)
token_num.stop_gradient = True
avg_cost = sum_cost / token_num
return sum_cost, avg_cost, predict, token_num, reader if use_py_reader else None
def wrap_encoder(src_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
enc_inputs=None):
"""
The wrapper assembles together all needed layers for the encoder.
"""
if enc_inputs is None:
# This is used to implement independent encoder program in inference.
src_word, src_pos, src_slf_attn_bias = make_all_inputs(
encoder_data_input_fields)
else:
src_word, src_pos, src_slf_attn_bias = enc_inputs
enc_input = prepare_encoder(
src_word,
src_pos,
src_vocab_size,
d_model,
max_length,
prepostprocess_dropout,
word_emb_param_name=word_emb_param_names[0])
enc_output = encoder(
enc_input,
src_slf_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd, )
return enc_output
def wrap_decoder(trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
dec_inputs=None,
enc_output=None,
caches=None,
gather_idx=None):
"""
The wrapper assembles together all needed layers for the decoder.
"""
if dec_inputs is None:
# This is used to implement independent decoder program in inference.
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, enc_output = \
make_all_inputs(decoder_data_input_fields)
else:
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs
dec_input = prepare_decoder(
trg_word,
trg_pos,
trg_vocab_size,
d_model,
max_length,
prepostprocess_dropout,
word_emb_param_name=word_emb_param_names[0]
if weight_sharing else word_emb_param_names[1])
dec_output = decoder(
dec_input,
enc_output,
trg_slf_attn_bias,
trg_src_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
caches=caches,
gather_idx=gather_idx)
# Reshape to 2D tensor to use GEMM instead of BatchedGEMM
dec_output = layers.reshape(
dec_output, shape=[-1, dec_output.shape[-1]], inplace=True)
if weight_sharing:
predict = layers.matmul(
x=dec_output,
y=fluid.default_main_program().global_block().var(
word_emb_param_names[0]),
transpose_y=True)
else:
predict = layers.fc(input=dec_output,
size=trg_vocab_size,
bias_attr=False)
if dec_inputs is None:
# Return probs for independent decoder program.
predict = layers.softmax(predict)
return predict
def fast_decode(src_vocab_size,
trg_vocab_size,
max_in_len,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
beam_size,
max_out_len,
eos_idx,
use_py_reader=False):
"""
Use beam search to decode. Caches will be used to store states of history
steps which can make the decoding faster.
"""
data_input_names = encoder_data_input_fields + fast_decoder_data_input_fields
if use_py_reader:
all_inputs, reader = make_all_py_reader_inputs(data_input_names)
else:
all_inputs = make_all_inputs(data_input_names)
enc_inputs_len = len(encoder_data_input_fields)
dec_inputs_len = len(fast_decoder_data_input_fields)
enc_inputs = all_inputs[0:enc_inputs_len]
dec_inputs = all_inputs[enc_inputs_len:enc_inputs_len + dec_inputs_len]
enc_output = wrap_encoder(
src_vocab_size,
max_in_len,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
enc_inputs, )
start_tokens, init_scores, parent_idx, trg_src_attn_bias = dec_inputs
def beam_search():
max_len = layers.fill_constant(
shape=[1],
dtype=start_tokens.dtype,
value=max_out_len,
force_cpu=True)
step_idx = layers.fill_constant(
shape=[1], dtype=start_tokens.dtype, value=0, force_cpu=True)
cond = layers.less_than(x=step_idx, y=max_len) # default force_cpu=True
while_op = layers.While(cond)
# array states will be stored for each step.
ids = layers.array_write(
layers.reshape(start_tokens, (-1, 1)), step_idx)
scores = layers.array_write(init_scores, step_idx)
# cell states will be overwrited at each step.
# caches contains states of history steps in decoder self-attention
# and static encoder output projections in encoder-decoder attention
# to reduce redundant computation.
caches = [
{
"k": # for self attention
layers.fill_constant_batch_size_like(
input=start_tokens,
shape=[-1, n_head, 0, d_key],
dtype=enc_output.dtype,
value=0),
"v": # for self attention
layers.fill_constant_batch_size_like(
input=start_tokens,
shape=[-1, n_head, 0, d_value],
dtype=enc_output.dtype,
value=0),
"static_k": # for encoder-decoder attention
layers.create_tensor(dtype=enc_output.dtype),
"static_v": # for encoder-decoder attention
layers.create_tensor(dtype=enc_output.dtype)
} for i in range(n_layer)
]
with while_op.block():
pre_ids = layers.array_read(array=ids, i=step_idx)
# Since beam_search_op dosen't enforce pre_ids' shape, we can do
# inplace reshape here which actually change the shape of pre_ids.
pre_ids = layers.reshape(pre_ids, (-1, 1, 1), inplace=True)
pre_scores = layers.array_read(array=scores, i=step_idx)
# gather cell states corresponding to selected parent
pre_src_attn_bias = layers.gather(
trg_src_attn_bias, index=parent_idx)
pre_pos = layers.elementwise_mul(
x=layers.fill_constant_batch_size_like(
input=pre_src_attn_bias, # cann't use lod tensor here
value=1,
shape=[-1, 1, 1],
dtype=pre_ids.dtype),
y=step_idx,
axis=0)
logits = wrap_decoder(
trg_vocab_size,
max_in_len,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
dec_inputs=(pre_ids, pre_pos, None, pre_src_attn_bias),
enc_output=enc_output,
caches=caches,
gather_idx=parent_idx)
# intra-beam topK
topk_scores, topk_indices = layers.topk(
input=layers.softmax(logits), k=beam_size)
accu_scores = layers.elementwise_add(
x=layers.log(topk_scores), y=pre_scores, axis=0)
# beam_search op uses lod to differentiate branches.
topk_indices = layers.lod_reset(topk_indices, pre_ids)
# topK reduction across beams, also contain special handle of
# end beams and end sentences(batch reduction)
selected_ids, selected_scores, gather_idx = layers.beam_search(
pre_ids=pre_ids,
pre_scores=pre_scores,
ids=topk_indices,
scores=accu_scores,
beam_size=beam_size,
end_id=eos_idx,
return_parent_idx=True)
layers.increment(x=step_idx, value=1.0, in_place=True)
# cell states(caches) have been updated in wrap_decoder,
# only need to update beam search states here.
layers.array_write(selected_ids, i=step_idx, array=ids)
layers.array_write(selected_scores, i=step_idx, array=scores)
layers.assign(gather_idx, parent_idx)
layers.assign(pre_src_attn_bias, trg_src_attn_bias)
length_cond = layers.less_than(x=step_idx, y=max_len)
finish_cond = layers.logical_not(layers.is_empty(x=selected_ids))
layers.logical_and(x=length_cond, y=finish_cond, out=cond)
finished_ids, finished_scores = layers.beam_search_decode(
ids, scores, beam_size=beam_size, end_id=eos_idx)
return finished_ids, finished_scores
finished_ids, finished_scores = beam_search()
return finished_ids, finished_scores, reader if use_py_reader else None
| {
"repo_name": "kuke/models",
"path": "fluid/PaddleNLP/neural_machine_translation/transformer/model.py",
"copies": "1",
"size": "32421",
"license": "apache-2.0",
"hash": 3025806600157278000,
"line_mean": 34.8243093923,
"line_max": 84,
"alpha_frac": 0.5460658215,
"autogenerated": false,
"ratio": 3.9595749877870055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5005640809287005,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import paddle.v2 as paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from config import TrainTaskConfig, input_data_names, pos_enc_param_names
# FIXME(guosheng): Remove out the batch_size from the model.
batch_size = TrainTaskConfig.batch_size
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
position_enc = np.array([[
pos / np.power(10000, 2 * (j // 2) / d_pos_vec)
for j in range(d_pos_vec)
] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc.astype("float32")
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
num_heads=1,
dropout_rate=0.):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, num_heads, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * num_heads,
bias_attr=False,
num_flatten_dims=2)
k = layers.fc(input=keys,
size=d_key * num_heads,
bias_attr=False,
num_flatten_dims=2)
v = layers.fc(input=values,
size=d_value * num_heads,
bias_attr=False,
num_flatten_dims=2)
return q, k, v
def __split_heads(x, num_heads):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, num_heads * hidden_dim] then output a tensor
with shape [bs, num_heads, max_sequence_length, hidden_dim].
"""
if num_heads == 1:
return x
hidden_size = x.shape[-1]
# FIXME(guosheng): Decouple the program desc with batch_size.
reshaped = layers.reshape(
x=x, shape=[batch_size, -1, num_heads, hidden_size // num_heads])
# permuate the dimensions into:
# [batch_size, num_heads, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# FIXME(guosheng): Decouple the program desc with batch_size.
return layers.reshape(
x=trans_x,
shape=map(int,
[batch_size, -1, trans_x.shape[2] * trans_x.shape[3]]))
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
# FIXME(guosheng): Optimize the shape in reshape_op or softmax_op.
# The current implementation of softmax_op only supports 2D tensor,
# consequently it cannot be directly used here.
# If to use the reshape_op, Besides, the shape of product inferred in
# compile-time is not the actual shape in run-time. It cann't be used
# to set the attribute of reshape_op.
# So, here define the softmax for temporary solution.
def __softmax(x, eps=1e-9):
exp_out = layers.exp(x=x)
sum_out = layers.reduce_sum(exp_out, dim=-1, keep_dim=False)
return layers.elementwise_div(x=exp_out, y=sum_out, axis=0)
scaled_q = layers.scale(x=q, scale=d_key**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
weights = __softmax(layers.elementwise_add(x=product, y=attn_bias))
if dropout_rate:
weights = layers.dropout(
weights, dropout_prob=dropout_rate, is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, num_heads, d_key, d_value)
q = __split_heads(q, num_heads)
k = __split_heads(k, num_heads)
v = __split_heads(v, num_heads)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu")
out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(out, begin_norm_axis=len(out.shape) - 1)
elif cmd == "d": # add dropout
if dropout:
out = layers.dropout(out, dropout_prob=dropout, is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def prepare_encoder(src_word,
src_pos,
src_vocab_size,
src_emb_dim,
src_pad_idx,
src_max_len,
dropout=0.,
pos_pad_idx=0,
pos_enc_param_name=None):
"""Add word embeddings and position encodings.
The output tensor has a shape of:
[batch_size, max_src_length_in_batch, d_model].
This module is used at the bottom of the encoder stacks.
"""
src_word_emb = layers.embedding(
src_word, size=[src_vocab_size, src_emb_dim], padding_idx=src_pad_idx)
src_pos_enc = layers.embedding(
src_pos,
size=[src_max_len, src_emb_dim],
padding_idx=pos_pad_idx,
param_attr=fluid.ParamAttr(
name=pos_enc_param_name, trainable=False))
enc_input = src_word_emb + src_pos_enc
# FIXME(guosheng): Decouple the program desc with batch_size.
enc_input = layers.reshape(x=enc_input, shape=[batch_size, -1, src_emb_dim])
return layers.dropout(
enc_input, dropout_prob=dropout,
is_test=False) if dropout else enc_input
prepare_encoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[0])
prepare_decoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[1])
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(enc_input, enc_input, enc_input,
attn_bias, d_key, d_value, d_model,
n_head, dropout_rate)
attn_output = post_process_layer(enc_input, attn_output, "dan",
dropout_rate)
ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model)
return post_process_layer(attn_output, ffd_output, "dan", dropout_rate)
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head, d_key, d_value,
d_model, d_inner_hid, dropout_rate)
enc_input = enc_output
return enc_output
def decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
""" The layer to be stacked in decoder part.
The structure of this module is similar to that in the encoder part except
a multi-head attention is added to implement encoder-decoder attention.
"""
slf_attn_output = multi_head_attention(
dec_input,
dec_input,
dec_input,
slf_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate, )
slf_attn_output = post_process_layer(
dec_input,
slf_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
enc_attn_output = multi_head_attention(
slf_attn_output,
enc_output,
enc_output,
dec_enc_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate, )
enc_attn_output = post_process_layer(
slf_attn_output,
enc_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
ffd_output = positionwise_feed_forward(
enc_attn_output,
d_inner_hid,
d_model, )
dec_output = post_process_layer(
enc_attn_output,
ffd_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
return dec_output
def decoder(dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The decoder is composed of a stack of identical decoder_layer layers.
"""
for i in range(n_layer):
dec_output = decoder_layer(
dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
dec_input = dec_output
return dec_output
def transformer(
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
src_pad_idx,
trg_pad_idx,
pos_pad_idx, ):
# The shapes here act as placeholder.
# The shapes set here is to pass the infer-shape in compile time. The actual
# shape of src_word in run time is:
# [batch_size * max_src_length_in_a_batch, 1].
src_word = layers.data(
name=input_data_names[0],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
# The actual shape of src_pos in runtime is:
# [batch_size * max_src_length_in_a_batch, 1].
src_pos = layers.data(
name=input_data_names[1],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
# The actual shape of trg_word is in runtime is:
# [batch_size * max_trg_length_in_a_batch, 1].
trg_word = layers.data(
name=input_data_names[2],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
# The actual shape of trg_pos in runtime is:
# [batch_size * max_trg_length_in_a_batch, 1].
trg_pos = layers.data(
name=input_data_names[3],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
# The actual shape of src_slf_attn_bias in runtime is:
# [batch_size, n_head, max_src_length_in_a_batch, max_src_length_in_a_batch].
# This input is used to remove attention weights on paddings.
src_slf_attn_bias = layers.data(
name=input_data_names[4],
shape=[batch_size, n_head, max_length, max_length],
dtype="float32",
append_batch_size=False)
# The actual shape of trg_slf_attn_bias in runtime is:
# [batch_size, n_head, max_trg_length_in_batch, max_trg_length_in_batch].
# This is used to remove attention weights on paddings and subsequent words.
trg_slf_attn_bias = layers.data(
name=input_data_names[5],
shape=[batch_size, n_head, max_length, max_length],
dtype="float32",
append_batch_size=False)
# The actual shape of trg_src_attn_bias in runtime is:
# [batch_size, n_head, max_trg_length_in_batch, max_src_length_in_batch].
# This is used to remove attention weights on paddings.
trg_src_attn_bias = layers.data(
name=input_data_names[6],
shape=[batch_size, n_head, max_length, max_length],
dtype="float32",
append_batch_size=False)
enc_input = prepare_encoder(
src_word,
src_pos,
src_vocab_size,
d_model,
src_pad_idx,
max_length,
dropout_rate, )
enc_output = encoder(
enc_input,
src_slf_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
dec_input = prepare_decoder(
trg_word,
trg_pos,
trg_vocab_size,
d_model,
trg_pad_idx,
max_length,
dropout_rate, )
dec_output = decoder(
dec_input,
enc_output,
trg_slf_attn_bias,
trg_src_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
# TODO(guosheng): Share the weight matrix between the embedding layers and
# the pre-softmax linear transformation.
predict = layers.reshape(
x=layers.fc(input=dec_output,
size=trg_vocab_size,
bias_attr=False,
num_flatten_dims=2),
shape=[-1, trg_vocab_size],
act="softmax")
# The actual shape of gold in runtime is:
# [batch_size * max_trg_length_in_a_batch, 1].
gold = layers.data(
name=input_data_names[7],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
cost = layers.cross_entropy(input=predict, label=gold)
# The actual shape of weights in runtime is:
# [batch_size * max_trg_length_in_a_batch, 1].
# Padding index do not contribute to the total loss. This Weight is used to
# cancel padding index in calculating the loss.
weights = layers.data(
name=input_data_names[8],
shape=[batch_size * max_length, 1],
dtype="float32",
append_batch_size=False)
weighted_cost = cost * weights
return layers.reduce_sum(weighted_cost)
| {
"repo_name": "Superjom/models-1",
"path": "fluid/transformer/model.py",
"copies": "1",
"size": "16695",
"license": "apache-2.0",
"hash": 7324230512600988000,
"line_mean": 33.2813141684,
"line_max": 81,
"alpha_frac": 0.5635220126,
"autogenerated": false,
"ratio": 3.6191198786039456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9681746420324668,
"avg_score": 0.0001790941758554987,
"num_lines": 487
} |
from functools import partial
import numpy as np
def bbox_overlap_area(a, b):
max_overlap = np.min([a.max(axis=0), b.max(axis=0)], axis=0)
min_overlap = np.max([a.min(axis=0), b.min(axis=0)], axis=0)
overlap_size = max_overlap - min_overlap
if np.any(overlap_size < 0):
return 0
else:
return overlap_size.prod()
def bbox_proportion_overlap(a, b):
overlap = bbox_overlap_area(a, b)
return overlap / bbox_area(a)
def bbox_area(b):
return (b.max(axis=0) - b.min(axis=0)).prod()
def bbox_area_ratio(a, b):
return bbox_area(a) / bbox_area(b)
def bbox_overlap_acceptable(gt, d):
return (bbox_proportion_overlap(gt, d) > 0.5 and
bbox_area_ratio(gt, d) > 0.5)
def detect_and_check(img, detector, group=None):
gt = img.landmarks[group].lms.bounding_box()
bad_fits = []
for detection in detector(img):
if bbox_overlap_acceptable(gt.points, detection.points):
return {'d': detection, 'gt': gt}
else:
bad_fits.append('prop: {:.2f}, area: {:.2f}'.format(
bbox_proportion_overlap(gt.points, detection.points),
bbox_area_ratio(gt.points, detection.points)))
return {'d': None, 'gt': gt}
def normalize(gt):
from menpo.transform import Translation, NonUniformScale
t = Translation(gt.centre()).pseudoinverse()
s = NonUniformScale(gt.range()).pseudoinverse()
return t.compose_before(s)
def random_instance(pca):
weights = np.random.multivariate_normal(np.zeros_like(pca.eigenvalues),
np.diag(pca.eigenvalues))
return pca.instance(weights)
def load_dlib_detector():
from menpodetect import load_dlib_frontal_face_detector
detector = load_dlib_frontal_face_detector()
return partial(detector, greyscale=False)
def load_opencv_detector():
from menpodetect import load_opencv_frontal_face_detector
detector = load_opencv_frontal_face_detector()
return partial(detector, greyscale=False)
def load_pico_detector():
from menpodetect import load_pico_frontal_face_detector
detector = load_pico_frontal_face_detector()
return partial(detector, greyscale=False)
_DETECTORS = {
'dlib': load_dlib_detector,
# 'pico': load_pico_detector,
# 'opencv': load_opencv_detector
}
def save_bounding_boxes(pattern, detector_type, group=None,
sythesize_problematic=False, overwrite=False):
import menpo.io as mio
from menpo.landmark import LandmarkGroup
from menpo.model import PCAModel
try:
detector = _DETECTORS[detector_type]()
except KeyError:
detector_list = ', '.join(list(_DETECTORS.keys()))
raise ValueError('Valid detector types are: {}'.format(detector_list))
print('Running {} detector on {}'.format(detector_type, pattern))
bboxes = {img.path: detect_and_check(img, detector, group=group)
for img in mio.import_images(pattern, normalise=False,
verbose=True)}
# find all the detections that failed
problematic = filter(lambda x: x[1]['d'] is None, bboxes.items())
print('Failed to detect {} objects'.format(len(problematic)))
if len(problematic) > 0 and sythesize_problematic:
print('Learning detector traits and sythesizing fits for {} '
'images'.format(len(problematic)))
# get the good detections
detections = filter(lambda x: x['d'] is not None, bboxes.values())
# normalize these to size [1, 1], centred on origin
normed_detections = [normalize(r['gt']).apply(r['d'])
for r in detections]
# build a PCA model from good detections
pca = PCAModel(normed_detections)
for p, r in problematic:
# generate a new bbox offset in the normalized space by using
# our learnt PCA basis
d = random_instance(pca)
# apply an inverse transform to place it on the image
bboxes[p]['d'] = normalize(r['gt']).pseudoinverse().apply(d)
to_save = len(bboxes)
if not sythesize_problematic:
to_save = to_save - len(problematic)
print('Saving out {} {} detections'.format(to_save, detector_type))
# All done, save out results
for p, r in bboxes.items():
if r['d'] is not None:
lg = LandmarkGroup.init_with_all_label(r['d'])
mio.export_landmark_file(lg, p.parent /
(p.stem + '_{}.ljson'.format(detector_type)),
overwrite=overwrite)
| {
"repo_name": "nontas/menpobench",
"path": "menpobench/bbox.py",
"copies": "2",
"size": "4629",
"license": "bsd-3-clause",
"hash": 7501703693361502000,
"line_mean": 35.1640625,
"line_max": 82,
"alpha_frac": 0.6208684381,
"autogenerated": false,
"ratio": 3.5362872421695952,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5157155680269595,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy
from skimage import transform
EPS = 1e-66
RESOLUTION = 0.001
num_grids = int(1/RESOLUTION+0.5)
def generate_lut(img):
"""
linear approximation of CDF & marginal
:param density_img:
:return: lut_y, lut_x
"""
density_img = transform.resize(img, (num_grids, num_grids))
x_accumlation = numpy.sum(density_img, axis=1)
sum_xy = numpy.sum(x_accumlation)
y_cdf_of_accumulated_x = [[0., 0.]]
accumulated = 0
for ir, i in enumerate(range(num_grids-1, -1, -1)):
accumulated += x_accumlation[i]
if accumulated == 0:
y_cdf_of_accumulated_x[0][0] = float(ir+1)/float(num_grids)
elif EPS < accumulated < sum_xy - EPS:
y_cdf_of_accumulated_x.append([float(ir+1)/float(num_grids), accumulated/sum_xy])
else:
break
y_cdf_of_accumulated_x.append([float(ir+1)/float(num_grids), 1.])
y_cdf_of_accumulated_x = numpy.array(y_cdf_of_accumulated_x)
x_cdfs = []
for j in range(num_grids):
x_freq = density_img[num_grids-j-1]
sum_x = numpy.sum(x_freq)
x_cdf = [[0., 0.]]
accumulated = 0
for i in range(num_grids):
accumulated += x_freq[i]
if accumulated == 0:
x_cdf[0][0] = float(i+1) / float(num_grids)
elif EPS < accumulated < sum_xy - EPS:
x_cdf.append([float(i+1)/float(num_grids), accumulated/sum_x])
else:
break
x_cdf.append([float(i+1)/float(num_grids), 1.])
if accumulated > EPS:
x_cdf = numpy.array(x_cdf)
x_cdfs.append(x_cdf)
else:
x_cdfs.append(None)
y_lut = partial(numpy.interp, xp=y_cdf_of_accumulated_x[:, 1], fp=y_cdf_of_accumulated_x[:, 0])
x_luts = [partial(numpy.interp, xp=x_cdfs[i][:, 1], fp=x_cdfs[i][:, 0]) if x_cdfs[i] is not None else None for i in range(num_grids)]
return y_lut, x_luts
def sample_2d(lut, N):
y_lut, x_luts = lut
u_rv = numpy.random.random((N, 2))
samples = numpy.zeros(u_rv.shape)
for i, (x, y) in enumerate(u_rv):
ys = y_lut(y)
x_bin = int(ys/RESOLUTION)
xs = x_luts[x_bin](x)
samples[i][0] = xs
samples[i][1] = ys
return samples
if __name__ == '__main__':
from skimage import io
density_img = io.imread('inputs/random.jpg', True)
lut_2d = generate_lut(density_img)
samples = sample_2d(lut_2d, 10000)
from matplotlib import pyplot
fig, (ax0, ax1) = pyplot.subplots(ncols=2, figsize=(9, 4))
fig.canvas.set_window_title('Test 2D Sampling')
ax0.imshow(density_img, cmap='gray')
ax0.xaxis.set_major_locator(pyplot.NullLocator())
ax0.yaxis.set_major_locator(pyplot.NullLocator())
ax1.axis('equal')
ax1.axis([0, 1, 0, 1])
ax1.plot(samples[:, 0], samples[:, 1], 'k,')
pyplot.show()
| {
"repo_name": "frombeijingwithlove/dlcv_for_beginners",
"path": "random_bonus/gan_n_cgan_2d_example/sampler.py",
"copies": "1",
"size": "2899",
"license": "bsd-3-clause",
"hash": -8451797065231053000,
"line_mean": 32.7093023256,
"line_max": 137,
"alpha_frac": 0.5774404967,
"autogenerated": false,
"ratio": 2.8646245059288535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8933234872776522,
"avg_score": 0.0017660259704663035,
"num_lines": 86
} |
from functools import partial
import numpy
import os
import re
import random
import signal
from collections import OrderedDict
from scipy.misc import imread
from unicsv import DictUnicodeReader
from multiprocessing import Pool, cpu_count
from multiprocessing.pool import ThreadPool
from scipy.ndimage.interpolation import zoom
class AbstractSegmentation:
def all_names(self, category, j):
raise NotImplementedError
def size(self, split=None):
return 0
def filename(self, i):
raise NotImplementedError
def metadata(self, i):
return self.filename(i)
@classmethod
def resolve_segmentation(cls, m):
return {}
def name(self, category, i):
'''
Default implemtnation for segmentation_data,
utilizing all_names.
'''
all_names = self.all_names(category, i)
return all_names[0] if len(all_names) else ''
def segmentation_data(self, category, i, c=0, full=False):
'''
Default implemtnation for segmentation_data,
utilizing metadata and resolve_segmentation.
'''
segs = self.resolve_segmentation(
self.metadata(i), categories=[category])
if category not in segs:
return 0
data = segs[category]
if not full and len(data.shape) >= 3:
return data[0]
return data
class SegmentationData(AbstractSegmentation):
'''
Represents and loads a multi-channel segmentation represented with
a series of csv files: index.csv lists the images together with
any label data avilable in each category; category.csv lists
the categories of segmentations available; and label.csv lists the
numbers used to describe each label class. In addition, the categories
each have a separate c_*.csv file describing a dense coding of labels.
'''
def __init__(self, directory, categories=None, require_all=False):
directory = os.path.expanduser(directory)
self.directory = directory
with open(os.path.join(directory, 'index.csv')) as f:
self.image = [decode_index_dict(r) for r in DictUnicodeReader(f)]
with open(os.path.join(directory, 'category.csv')) as f:
self.category = OrderedDict()
for row in DictUnicodeReader(f):
self.category[row['name']] = row
with open(os.path.join(directory, 'label.csv')) as f:
label_data = [decode_label_dict(r) for r in DictUnicodeReader(f)]
self.label = build_dense_label_array(label_data)
if categories is not None:
# Filter out unused categories
categories = set([c for c in categories if c in self.category])
for cat in self.category.keys():
if cat not in categories:
del self.category[cat]
else:
categories = self.category.keys()
# Filter out images with insufficient data
filter_fn = partial(
index_has_all_data if require_all else index_has_any_data,
categories=categories)
self.image = [row for row in self.image if filter_fn(row)]
# Build dense remapping arrays for labels, so that you can
# get dense ranges of labels for each category.
self.category_map = {}
self.category_unmap = {}
self.category_label = {}
for cat in self.category:
with open(os.path.join(directory, 'c_%s.csv' % cat)) as f:
c_data = [decode_label_dict(r) for r in DictUnicodeReader(f)]
self.category_unmap[cat], self.category_map[cat] = (
build_numpy_category_map(c_data))
self.category_label[cat] = build_dense_label_array(
c_data, key='code')
def all_names(self, category, j):
'''All English synonyms for the given label'''
if category is not None:
j = self.category_unmap[category][j]
return [self.label[j]['name']] + self.label[i]['syns']
def size(self, split=None):
'''The number of images in this data set.'''
if split is None:
return len(self.image)
return len([im for im in self.image if im['split'] == split])
def filename(self, i):
'''The filename of the ith jpeg (original image).'''
return os.path.join(self.directory, 'images', self.image[i]['image'])
def split(self, i):
'''Which split contains item i.'''
return self.image[i]['split']
def metadata(self, i):
'''Extract metadata for image i, For efficient data loading.'''
return self.directory, self.image[i]
meta_categories = ['image', 'split', 'ih', 'iw', 'sh', 'sw']
@classmethod
def resolve_segmentation(cls, m, categories=None):
'''
Resolves a full segmentation, potentially in a differenct process,
for efficient multiprocess data loading.
'''
directory, row = m
result = {}
for cat, d in row.items():
if cat in cls.meta_categories:
continue
if not wants(cat, categories):
continue
if all(isinstance(data, int) for data in d):
result[cat] = d
continue
out = numpy.empty(
(len(d), row['sh'], row['sw']), dtype=numpy.int16)
for i, channel in enumerate(d):
if isinstance(channel, int):
out[i] = channel
else:
rgb = imread(os.path.join(directory, 'images', channel))
out[i] = rgb[:, :, 0] + rgb[:, :, 1] * 256
result[cat] = out
return result, (row['sh'], row['sw'])
def label_size(self, category=None):
'''
Returns the number of distinct labels (plus zero), i.e., one
more than the maximum label number. If a category is specified,
returns the number of distinct labels within that category.
'''
if category is None:
return len(self.label)
else:
return len(self.category_unmap[category])
def name(self, category, j):
'''
Returns an English name for the jth label. If a category is
specified, returns the name for the category-specific nubmer j.
If category=None, then treats j as a fully unified index number.
'''
if category is not None:
j = self.category_unmap[category][j]
return self.label[j]['name']
def frequency(self, category, j):
'''
Returns the number of images for which the label appears.
'''
if category is not None:
return self.category_label[category][j]['frequency']
return self.label[j]['frequency']
def coverage(self, category, j):
'''
Returns the pixel coverage of the label in units of whole-images.
'''
if category is not None:
return self.category_label[category][j]['coverage']
return self.label[j]['coverage']
def category_names(self):
'''
Returns the set of category names.
'''
return self.category.keys()
def category_frequency(self, category):
'''
Returns the number of images touched by a category.
'''
return float(self.category[category]['frequency'])
def primary_categories_per_index(self, categories=None):
'''
Returns an array of primary category numbers for each label, where
catagories are indexed according to the list of categories passed,
or self.category_names() if none.
'''
if categories is None:
categories = self.category_names()
# Make lists which are nonzero for labels in a category
catmap = {}
for cat in categories:
imap = self.category_index_map(cat)
if len(imap) < self.label_size(None):
imap = numpy.concatenate((imap, numpy.zeros(
self.label_size(None) - len(imap), dtype=imap.dtype)))
catmap[cat] = imap
# For each label, find the category with maximum coverage.
result = []
for i in range(self.label_size(None)):
maxcov, maxcat = max(
(self.coverage(cat, catmap[cat][i])
if catmap[cat][i] else 0, ic)
for ic, cat in enumerate(categories))
result.append(maxcat)
# Return the max-coverage cateogry for each label.
return numpy.array(result)
def segmentation_data(self, category, i, c=0, full=False, out=None):
'''
Returns a 2-d numpy matrix with segmentation data for the ith image,
restricted to the given category. By default, maps all label numbers
to the category-specific dense mapping described in the c_*.csv
listing; but can be asked to expose the fully unique indexing by
using full=True.
'''
row = self.image[i]
data_channels = row.get(category, ())
if c >= len(data_channels):
channel = 0 # Deal with unlabeled data in this category
else:
channel = data_channels[c]
if out is None:
out = numpy.empty((row['sh'], row['sw']), dtype=numpy.int16)
if isinstance(channel, int):
if not full:
channel = self.category_map[category][channel]
out[:, :] = channel # Single-label for the whole image
return out
png = imread(os.path.join(self.directory, 'images', channel))
if full:
# Full case: just combine png channels.
out[...] = png[:, :, 0] + png[:, :, 1] * 256
else:
# Dense case: combine png channels and apply the category map.
catmap = self.category_map[category]
out[...] = catmap[png[:, :, 0] + png[:, :, 1] * 256]
return out
def full_segmentation_data(self, i,
categories=None, max_depth=None, out=None):
'''
Returns a 3-d numpy tensor with segmentation data for the ith image,
with multiple layers represnting multiple lables for each pixel.
The depth is variable depending on available data but can be
limited to max_depth.
'''
row = self.image[i]
if categories:
groups = [d for cat, d in row.items() if cat in categories and d]
else:
groups = [d for cat, d in row.items() if d and (
cat not in self.meta_categories)]
depth = sum(len(c) for c in groups)
if max_depth is not None:
depth = min(depth, max_depth)
# Allocate an array if not already allocated.
if out is None:
out = numpy.empty((depth, row['sh'], row['sw']), dtype=numpy.int16)
i = 0
# Stack up the result segmentation one channel at a time
for group in groups:
for channel in group:
if isinstance(channel, int):
out[i] = channel
else:
png = imread(
os.path.join(self.directory, 'images', channel))
out[i] = png[:, :, 0] + png[:, :, 1] * 256
i += 1
if i == depth:
return out
# Return above when we get up to depth
assert False
def category_index_map(self, category):
return numpy.array(self.category_map[category])
def build_dense_label_array(label_data, key='number', allow_none=False):
'''
Input: set of rows with 'number' fields (or another field name key).
Output: array such that a[number] = the row with the given number.
'''
result = [None] * (max([d[key] for d in label_data]) + 1)
for d in label_data:
result[d[key]] = d
# Fill in none
if not allow_none:
example = label_data[0]
def make_empty(k):
return dict((c, k if c is key else type(v)())
for c, v in example.items())
for i, d in enumerate(result):
if d is None:
result[i] = dict(make_empty(i))
return result
def build_numpy_category_map(map_data, key1='code', key2='number'):
'''
Input: set of rows with 'number' fields (or another field name key).
Output: array such that a[number] = the row with the given number.
'''
results = list(numpy.zeros((max([d[key] for d in map_data]) + 1),
dtype=numpy.int16) for key in (key1, key2))
for d in map_data:
results[0][d[key1]] = d[key2]
results[1][d[key2]] = d[key1]
return results
def decode_label_dict(row):
result = {}
for key, val in row.items():
if key == 'category':
result[key] = dict((c, int(n))
for c, n in [re.match('^([^(]*)\(([^)]*)\)$', f).groups()
for f in val.split(';')])
elif key == 'name':
result[key] = val
elif key == 'syns':
result[key] = val.split(';')
elif re.match('^\d+$', val):
result[key] = int(val)
elif re.match('^\d+\.\d*$', val):
result[key] = float(val)
else:
result[key] = val
return result
def decode_index_dict(row):
result = {}
for key, val in row.items():
if key in ['image', 'split']:
result[key] = val
elif key in ['sw', 'sh', 'iw', 'ih']:
result[key] = int(val)
else:
item = [s for s in val.split(';') if s]
for i, v in enumerate(item):
if re.match('^\d+$', v):
item[i] = int(v)
result[key] = item
return result
def index_has_any_data(row, categories):
for c in categories:
for data in row[c]:
if data:
return True
return False
def index_has_all_data(row, categories):
for c in categories:
cat_has = False
for data in row[c]:
if data:
cat_has = True
break
if not cat_has:
return False
return True
class SegmentationPrefetcher:
'''
SegmentationPrefetcher will prefetch a bunch of segmentation
images using a multiprocessing pool, so you do not have to wait
around while the files get opened and decoded. Just request
batches of images and segmentations calling fetch_batch().
'''
def __init__(self, segmentation, split=None, randomize=False,
segmentation_shape=None, categories=None, once=False,
start=None, end=None, batch_size=4, ahead=24, thread=False):
'''
Constructor arguments:
segmentation: The AbstractSegmentation to load.
split: None for no filtering, or 'train' or 'val' etc.
randomize: True to randomly shuffle order, or a random seed.
categories: a list of categories to include in each batch.
batch_size: number of data items for each batch.
ahead: the number of data items to prefetch ahead.
'''
self.segmentation = segmentation
self.split = split
self.randomize = randomize
self.random = random.Random()
if randomize is not True:
self.random.seed(randomize)
self.categories = categories
self.once = once
self.batch_size = batch_size
self.ahead = ahead
# Initialize the multiprocessing pool
n_procs = cpu_count()
if thread:
self.pool = ThreadPool(processes=n_procs)
else:
original_sigint_handler = setup_sigint()
self.pool = Pool(processes=n_procs, initializer=setup_sigint)
restore_sigint(original_sigint_handler)
# Prefilter the image indexes of interest
if start is None:
start = 0
if end is None:
end = segmentation.size()
self.indexes = range(start, end)
if split:
self.indexes = [i for i in self.indexes
if segmentation.split(i) == split]
if self.randomize:
self.random.shuffle(self.indexes)
self.index = 0
self.result_queue = []
self.segmentation_shape = segmentation_shape
# Get dense catmaps
self.catmaps = [
segmentation.category_index_map(cat) if cat != 'image' else None
for cat in categories]
def next_job(self):
if self.index < 0:
return None
j = self.indexes[self.index]
result = (j,
self.segmentation.__class__,
self.segmentation.metadata(j),
self.segmentation.filename(j),
self.categories,
self.segmentation_shape)
self.index += 1
if self.index >= len(self.indexes):
if self.once:
self.index = -1
else:
self.index = 0
if self.randomize:
# Reshuffle every time through
self.random.shuffle(self.indexes)
return result
def batches(self):
'''Iterator for all batches'''
while True:
batch = self.fetch_batch()
if batch is None:
raise StopIteration
yield batch
def fetch_batch(self):
'''Returns a single batch as an array of dictionaries.'''
try:
self.refill_tasks()
if len(self.result_queue) == 0:
return None
result = self.result_queue.pop(0)
return result.get(31536000)
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
self.pool.terminate()
raise
def fetch_tensor_batch(self, bgr_mean=None, global_labels=False):
'''Iterator for batches as arrays of tensors.'''
batch = self.fetch_batch()
return self.form_caffe_tensors(batch, bgr_mean, global_labels)
def tensor_batches(self, bgr_mean=None, global_labels=False):
'''Returns a single batch as an array of tensors, one per category.'''
while True:
batch = self.fetch_tensor_batch(
bgr_mean=bgr_mean, global_labels=global_labels)
if batch is None:
raise StopIteration
yield batch
def form_caffe_tensors(self, batch, bgr_mean=None, global_labels=False):
# Assemble a batch in [{'cat': data,..},..] format into
# an array of batch tensors, the first for the image, and the
# remaining for each category in self.categories, in order.
# This also applies a random flip if needed
if batch is None:
return None
batches = [[] for c in self.categories]
for record in batch:
default_shape = (1, record['sh'], record['sw'])
for c, cat in enumerate(self.categories):
if cat == 'image':
# Normalize image with right RGB order and mean
batches[c].append(normalize_image(
record[cat], bgr_mean))
elif global_labels:
batches[c].append(normalize_label(
record[cat], default_shape, flatten=True))
else:
catmap = self.catmaps[c]
batches[c].append(catmap[normalize_label(
record[cat], default_shape, flatten=True)])
return [numpy.concatenate(tuple(m[numpy.newaxis] for m in b))
for b in batches]
def refill_tasks(self):
# It will call the sequencer to ask for a sequence
# of batch_size jobs (indexes with categories)
# Then it will call pool.map_async
while len(self.result_queue) < self.ahead:
data = []
while len(data) < self.batch_size:
job = self.next_job()
if job is None:
break
data.append(job)
if len(data) == 0:
return
self.result_queue.append(
self.pool.map_async(prefetch_worker, data))
def close():
while len(self.result_queue):
result = self.result_queue.pop(0)
if result is not None:
result.wait(0.001)
self.pool.close()
self.poool.cancel_join_thread()
def prefetch_worker(d):
if d is None:
return None
j, typ, m, fn, categories, segmentation_shape = d
segs, shape = typ.resolve_segmentation(m, categories=categories)
if segmentation_shape is not None:
for k, v in segs.items():
segs[k] = scale_segmentation(v, segmentation_shape)
shape = segmentation_shape
# Some additional metadata to provide
segs['sh'], segs['sw'] = shape
segs['i'] = j
segs['fn'] = fn
if categories is None or 'image' in categories:
segs['image'] = imread(fn)
return segs
def scale_segmentation(segmentation, dims, crop=False):
'''
Zooms a 2d or 3d segmentation to the given dims, using nearest neighbor.
'''
shape = numpy.shape(segmentation)
if len(shape) < 2 or shape[-2:] == dims:
return segmentation
peel = (len(shape) == 2)
if peel:
segmentation = segmentation[numpy.newaxis]
levels = segmentation.shape[0]
result = numpy.zeros((levels, ) + dims,
dtype=segmentation.dtype)
ratio = (1,) + tuple(res / float(orig)
for res, orig in zip(result.shape[1:], segmentation.shape[1:]))
if not crop:
safezoom(segmentation, ratio, output=result, order=0)
else:
ratio = max(ratio[1:])
height = int(round(dims[0] / ratio))
hmargin = (segmentation.shape[0] - height) // 2
width = int(round(dims[1] / ratio))
wmargin = (segmentation.shape[1] - height) // 2
safezoom(segmentation[:, hmargin:hmargin + height,
wmargin:wmargin + width],
(1, ratio, ratio), output=result, order=0)
if peel:
result = result[0]
return result
def safezoom(array, ratio, output=None, order=0):
'''Like numpy.zoom, but does not crash when the first dimension
of the array is of size 1, as happens often with segmentations'''
dtype = array.dtype
if array.dtype == numpy.float16:
array = array.astype(numpy.float32)
if array.shape[0] == 1:
if output is not None:
output = output[0, ...]
result = zoom(array[0, ...], ratio[1:],
output=output, order=order)
if output is None:
output = result[numpy.newaxis]
else:
result = zoom(array, ratio, output=output, order=order)
if output is None:
output = result
return output.astype(dtype)
def setup_sigint():
import threading
if not isinstance(threading.current_thread(), threading._MainThread):
return None
return signal.signal(signal.SIGINT, signal.SIG_IGN)
def restore_sigint(original):
import threading
if not isinstance(threading.current_thread(), threading._MainThread):
return
if original is None:
original = signal.SIG_DFL
signal.signal(signal.SIGINT, original)
def wants(what, option):
if option is None:
return True
return what in option
def normalize_image(rgb_image, bgr_mean):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
img = numpy.array(rgb_image, dtype=numpy.float32)
if (img.ndim == 2):
img = numpy.repeat(img[:, :, None], 3, axis=2)
img = img[:, :, ::-1]
if bgr_mean is not None:
img -= bgr_mean
img = img.transpose((2, 0, 1))
return img
def normalize_label(label_data, shape, flatten=False):
"""
Given a 0, 1, 2, or 3-dimensional label_data and a default
shape of the form (1, y, x), returns a 3d tensor by
"""
dims = len(numpy.shape(label_data))
if dims <= 2:
# Scalar data on this channel: fill shape
if dims == 1:
if flatten:
label_data = label_data[0] if len(label_data) else 0
else:
return (numpy.ones(shape, dtype=numpy.int16) *
numpy.asarray(label_data, dtype=numpy.int16)
[:, numpy.newaxis, numpy.newaxis])
return numpy.full(shape, label_data, dtype=numpy.int16)
else:
if dims == 3:
if flatten:
label_data = label_data[0]
else:
return label_data
return label_data[numpy.newaxis]
if __name__ == '__main__':
import numpy
ds = SegmentationData('~/bulk/small_test')
print 'sample has size', ds.size()
i = ds.size() - 1
print 'last filename is', ds.filename(i)
print 'image shape read was', imread(ds.filename(i)).shape
sd = ds.segmentation_data('texture', i)
print 'texture seg shape read was', sd.shape
fs = ds.full_segmentation_data(i)
print 'texture seg datatype', sd.dtype
found = list(numpy.bincount(sd.ravel()).nonzero()[0])
print 'unique textures seen', ';'.join(
ds.name('texture', f) for f in found)
fs, shape = ds.resolve_segmentation(ds.metadata(i))
print 'full segmentation', shape, ', '.join(fs.keys())
pf = SegmentationPrefetcher(ds, categories=['texture'],
segmentation_shape=(96, 96), once=True)
print ds.size()
for i, dat in enumerate(pf.batches()):
for d in dat:
print i, d['i'], d.keys()
if i >= 5:
break
for i, dat in enumerate(pf.tensor_batches()):
for d in dat:
print i, numpy.shape(d), numpy.max(d)
if i >= 5:
break
print 'count of object labels', ds.label_size('object')
for j in range(min(10, ds.label_size('object'))):
print ds.name('object', j), 'freq', ds.frequency('object', j), (
'cov'), ds.coverage('object', j)
| {
"repo_name": "bonyuta0204/NetDissec",
"path": "src/loadseg.py",
"copies": "1",
"size": "26369",
"license": "mit",
"hash": 4619950005472423000,
"line_mean": 35.5221606648,
"line_max": 88,
"alpha_frac": 0.564716144,
"autogenerated": false,
"ratio": 4.1240225211135435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00025669462913002685,
"num_lines": 722
} |
from functools import partial
import operator
import os
import shutil
import tempfile
import time
import numpy
from numpy.testing import assert_raises, assert_equal
from six.moves import range, cPickle
from fuel import config
from fuel.iterator import DataIterator
from fuel.utils import do_not_pickle_attributes, find_in_data_path, Subset
from fuel.utils.parallel import producer_consumer
class TestSubset(object):
def test_raises_value_error_on_negative_indices(self):
# Subset should not support lists with negative elements.
assert_raises(ValueError, Subset, [0, -1], 2)
def test_raises_value_error_on_too_large_indices(self):
# Subset should not support lists with indices greater or equal to
# the original number of examples.
assert_raises(ValueError, Subset, [0, 10], 2)
def test_raises_value_error_on_negative_slices(self):
# Subset should not support slices with negative start, stop or step.
assert_raises(ValueError, Subset, slice(-1, None, None), 2)
assert_raises(ValueError, Subset, slice(None, -1, None), 2)
assert_raises(ValueError, Subset, slice(None, None, -1), 2)
def test_raises_value_error_on_slice_step_gt_1(self):
assert_raises(ValueError, Subset, slice(0, 4, 2), 4)
def test_raises_value_error_on_slice_out_of_bound(self):
assert_raises(ValueError, Subset, slice(None, 10, None), 2)
assert_raises(ValueError, Subset, slice(13, 18, None), 10)
assert_raises(ValueError, Subset, slice(10, 10, None), 10)
def test_raises_value_error_on_slice_start_gt_stop(self):
assert_raises(ValueError, Subset, slice(11, 10, None), 15)
def test_raises_value_error_on_slice_step_gt_one(self):
assert_raises(ValueError, Subset, slice(5, 10, 2), 15)
def test_list_num_examples(self):
assert_equal(Subset([0, 3, 8, 13], 15).num_examples, 4)
def test_slice_num_examples(self):
assert_equal(Subset(slice(3, 18, 1), 50).num_examples, 15)
def test_is_list_property(self):
assert not Subset(slice(None, None, None), 2).is_list
assert Subset([0, 1, 3], 4).is_list
def test_lists_are_unique_and_sorted(self):
assert_equal(Subset([0, 3, 3, 5], 10).list_or_slice, [0, 3, 5])
assert_equal(Subset([0, 3, 1, 5], 10).list_or_slice, [0, 1, 3, 5])
def test_contiguous_lists_are_transformed_into_slices(self):
assert_equal(Subset([1, 2, 3], 10).list_or_slice, slice(1, 4, None))
def test_none_slice_request(self):
assert_equal(Subset([1, 3, 5, 7], 8)[slice(None)], [1, 3, 5, 7])
assert_equal(Subset(slice(0, 8, 1), 8)[slice(None)], slice(0, 8, 1))
def test_list_subset_list_request(self):
assert_equal(Subset([0, 2, 5, 7, 10, 15], 16)[[3, 2, 4]], [7, 5, 10])
def test_list_subset_slice_request(self):
assert_equal(Subset([0, 2, 5, 7, 10, 15], 16)[slice(1, 4, 2)], [2, 7])
def test_slice_subset_list_request(self):
assert_equal(Subset(slice(1, 14), 16)[[3, 2, 4]], [4, 3, 5])
def test_slice_subset_slice_request(self):
assert_equal(Subset(slice(1, 14), 16)[slice(1, 4, 2)],
slice(2, 5, 2))
def test_add_raises_value_error_when_incompatible(self):
# Adding two Subset instances should only work when they have the same
# number of original examples.
assert_raises(
ValueError, operator.add, Subset([1, 3], 10), Subset([2, 4], 11))
def test_add_list_list(self):
assert_equal((Subset([0, 3, 2, 8], 10) +
Subset([0, 4, 5], 10)).list_or_slice,
[0, 2, 3, 4, 5, 8])
def test_add_list_slice(self):
assert_equal((Subset([0, 3, 2, 8], 10) +
Subset(slice(1, 5), 10)).list_or_slice,
[0, 1, 2, 3, 4, 8])
def test_add_slice_list(self):
assert_equal((Subset(slice(1, 5), 10) +
Subset([0, 3, 2, 8], 10)).list_or_slice,
[0, 1, 2, 3, 4, 8])
def test_add_contiguous_single_step_slice_slice(self):
assert_equal((Subset(slice(0, 4, 1), 10) +
Subset(slice(4, 7, 1), 10)).list_or_slice,
slice(0, 7, 1))
assert_equal((Subset(slice(4, 7, 1), 10) +
Subset(slice(0, 4, 1), 10)).list_or_slice,
slice(0, 7, 1))
def test_add_overlapping_single_step_slice_slice(self):
assert_equal((Subset(slice(0, 6, 1), 10) +
Subset(slice(4, 7, 1), 10)).list_or_slice,
slice(0, 7, 1))
assert_equal((Subset(slice(4, 7, 1), 10) +
Subset(slice(0, 6, 1), 10)).list_or_slice,
slice(0, 7, 1))
def test_adding_slice_slice_falls_back_to_list(self):
# If Subset can't find a way to add two slices together, it must
# return a list-based Subset.
assert_equal((Subset(slice(0, 4), 20) +
Subset(slice(12, 16), 20)).list_or_slice,
[0, 1, 2, 3, 12, 13, 14, 15])
def test_safe_sorted_fancy_indexing_1(self):
indexable = numpy.arange(10)
assert_equal(Subset.sorted_fancy_indexing(indexable, [0]), [0])
def test_safe_sorted_fancy_indexing_gt_1(self):
indexable = numpy.arange(10)
assert_equal(Subset.sorted_fancy_indexing(indexable, [0, 5, 2]),
[0, 5, 2])
def test_list_request_sanity_check_raises_error_on_empty_list(self):
assert_raises(ValueError, Subset([0], 8)._list_request_sanity_check,
[], 1)
def test_list_request_sanity_check_raises_error_on_negative_index(self):
assert_raises(ValueError, Subset([0], 8)._list_request_sanity_check,
[-1], 1)
def test_list_request_sanity_check_raises_error_on_index_geq_num_ex(self):
assert_raises(ValueError, Subset([0], 8)._list_request_sanity_check,
[1], 1)
assert_raises(ValueError, Subset([0], 8)._list_request_sanity_check,
[2], 1)
def test_slice_request_sanity_check_raises_error_on_negative_attr(self):
assert_raises(ValueError, Subset([0], 8)._slice_request_sanity_check,
slice(-1, None, None), 1)
assert_raises(ValueError, Subset([0], 8)._slice_request_sanity_check,
slice(None, -1, None), 1)
assert_raises(ValueError, Subset([0], 8)._slice_request_sanity_check,
slice(None, None, -1), 1)
def test_slice_request_sanity_check_raises_error_on_stop_gt_num_ex(self):
assert_raises(ValueError, Subset([0], 8)._slice_request_sanity_check,
slice(None, 2), 1)
def test_slice_request_sanity_check_raises_error_on_start_geq_num_ex(self):
assert_raises(ValueError, Subset([0], 8)._slice_request_sanity_check,
slice(1, None), 1)
assert_raises(ValueError, Subset([0], 8)._slice_request_sanity_check,
slice(2, None), 1)
def test_slice_request_sanity_check_raises_error_on_start_geq_stop(self):
assert_raises(ValueError,
Subset([0, 1, 2], 8)._slice_request_sanity_check,
slice(1, 1), 3)
assert_raises(ValueError,
Subset([0, 1, 2], 8)._slice_request_sanity_check,
slice(2, 1), 3)
def test_raises_value_error_on_indexing_empty_subset(self):
assert_raises(
ValueError, Subset([], 2).index_within_subset, [1, 2], [1])
assert_raises(
ValueError, Subset([], 2).index_within_subset, [1, 2], slice(1, 2))
assert_raises(
ValueError, Subset(slice(0, 0), 2).index_within_subset,
[1, 2], [1])
assert_raises(
ValueError, Subset(slice(0, 0), 2).index_within_subset,
[1, 2], slice(1, 2))
@do_not_pickle_attributes("non_picklable", "bulky_attr")
class DummyClass(object):
def __init__(self):
self.load()
def load(self):
self.bulky_attr = list(range(100))
self.non_picklable = lambda x: x
class FaultyClass(object):
pass
@do_not_pickle_attributes("iterator")
class UnpicklableClass(object):
def __init__(self):
self.load()
def load(self):
self.iterator = DataIterator(None)
@do_not_pickle_attributes("attribute")
class NonLoadingClass(object):
def load(self):
pass
class TestFindInDataPath(object):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.tempdir, 'dir1'))
os.mkdir(os.path.join(self.tempdir, 'dir2'))
self.original_data_path = config.data_path
config.data_path = os.path.pathsep.join(
[os.path.join(self.tempdir, 'dir1'),
os.path.join(self.tempdir, 'dir2')])
with open(os.path.join(self.tempdir, 'dir1', 'file_1.txt'), 'w'):
pass
with open(os.path.join(self.tempdir, 'dir2', 'file_1.txt'), 'w'):
pass
with open(os.path.join(self.tempdir, 'dir2', 'file_2.txt'), 'w'):
pass
def tearDown(self):
config.data_path = self.original_data_path
shutil.rmtree(self.tempdir)
def test_returns_file_path(self):
assert_equal(find_in_data_path('file_2.txt'),
os.path.join(self.tempdir, 'dir2', 'file_2.txt'))
def test_returns_first_file_found(self):
assert_equal(find_in_data_path('file_1.txt'),
os.path.join(self.tempdir, 'dir1', 'file_1.txt'))
def test_raises_error_on_file_not_found(self):
assert_raises(IOError, find_in_data_path, 'dummy.txt')
class TestDoNotPickleAttributes(object):
def test_load(self):
instance = cPickle.loads(cPickle.dumps(DummyClass()))
assert_equal(instance.bulky_attr, list(range(100)))
assert instance.non_picklable is not None
def test_value_error_no_load_method(self):
assert_raises(ValueError, do_not_pickle_attributes("x"), FaultyClass)
def test_value_error_iterator(self):
assert_raises(ValueError, cPickle.dumps, UnpicklableClass())
def test_value_error_attribute_non_loaded(self):
assert_raises(ValueError, getattr, NonLoadingClass(), 'attribute')
def send_integers(socket, n):
socket.send_pyobj(n)
for i in range(n):
socket.send_pyobj(i ** 2)
# This works around strange bug in (probably) libzmq on
# OS X 10.9 which one of the pyzmq developers couldn't reproduce
# with all the same library versions (albeit OS X 10.10)... real
# workers will never be this trivial.
time.sleep(1e-6)
def receive_integers(socket):
num = socket.recv_pyobj()
total = 0
for i in range(num):
recv = socket.recv_pyobj()
total += recv
return total
def test_producer_consumer():
assert (producer_consumer(partial(send_integers, n=2000),
receive_integers)
== sum(i ** 2 for i in range(2000)))
| {
"repo_name": "markusnagel/fuel",
"path": "tests/test_utils.py",
"copies": "1",
"size": "11136",
"license": "mit",
"hash": 6970396646501087000,
"line_mean": 37.6666666667,
"line_max": 79,
"alpha_frac": 0.5915948276,
"autogenerated": false,
"ratio": 3.341134113411341,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9432728941011341,
"avg_score": 0,
"num_lines": 288
} |
from functools import partial
import operator
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Series,
isna,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
import pandas.core.nanops as nanops
use_bn = nanops._USE_BOTTLENECK
has_c16 = hasattr(np, "complex128")
@pytest.fixture(params=[True, False])
def skipna(request):
"""
Fixture to pass skipna to nanops functions.
"""
return request.param
class TestnanopsDataFrame:
def setup_method(self, method):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
arr_shape = (11, 7)
self.arr_float = np.random.randn(*arr_shape)
self.arr_float1 = np.random.randn(*arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, arr_shape)
self.arr_bool = np.random.randint(0, 2, arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype("S")
self.arr_utf = np.abs(self.arr_float).astype("U")
self.arr_date = np.random.randint(0, 20000, arr_shape).astype("M8[ns]")
self.arr_tdelta = np.random.randint(0, 20000, arr_shape).astype("m8[ns]")
self.arr_nan = np.tile(np.nan, arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf])
self.arr_obj = np.vstack(
[
self.arr_float.astype("O"),
self.arr_int.astype("O"),
self.arr_bool.astype("O"),
self.arr_complex.astype("O"),
self.arr_str.astype("O"),
self.arr_utf.astype("O"),
self.arr_date.astype("O"),
self.arr_tdelta.astype("O"),
]
)
with np.errstate(invalid="ignore"):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj])
self.arr_float_2d = self.arr_float
self.arr_float1_2d = self.arr_float1
self.arr_nan_2d = self.arr_nan
self.arr_float_nan_2d = self.arr_float_nan
self.arr_float1_nan_2d = self.arr_float1_nan
self.arr_nan_float1_2d = self.arr_nan_float1
self.arr_float_1d = self.arr_float[:, 0]
self.arr_float1_1d = self.arr_float1[:, 0]
self.arr_nan_1d = self.arr_nan[:, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0]
def teardown_method(self, method):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, "asm8", res)
if (
axis != 0
and hasattr(targ, "shape")
and targ.ndim
and targ.shape != res.shape
):
res = np.split(res, [targ.shape[0]], axis=0)[0]
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except AssertionError:
# handle timedelta dtypes
if hasattr(targ, "dtype") and targ.dtype == "m8[ns]":
raise
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == "O":
if targ.dtype.kind != "O":
res = res.astype(targ.dtype)
else:
cast_dtype = "c16" if has_c16 else "f8"
res = res.astype(cast_dtype)
targ = targ.astype(cast_dtype)
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == "O":
raise
tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype)
tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype)
def check_fun_data(
self,
testfunc,
targfunc,
testarval,
targarval,
skipna,
check_dtype=True,
empty_targfunc=None,
**kwargs,
):
for axis in list(range(targarval.ndim)) + [None]:
targartempval = targarval if skipna else testarval
if skipna and empty_targfunc and isna(targartempval).all():
targ = empty_targfunc(targartempval, axis=axis, **kwargs)
else:
targ = targfunc(targartempval, axis=axis, **kwargs)
res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if testarval.ndim <= 1:
return
# Recurse on lower-dimension
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
self.check_fun_data(
testfunc,
targfunc,
testarval2,
targarval2,
skipna=skipna,
check_dtype=check_dtype,
empty_targfunc=empty_targfunc,
**kwargs,
)
def check_fun(
self, testfunc, targfunc, testar, skipna, empty_targfunc=None, **kwargs
):
targar = testar
if testar.endswith("_nan") and hasattr(self, testar[:-4]):
targar = testar[:-4]
testarval = getattr(self, testar)
targarval = getattr(self, targar)
self.check_fun_data(
testfunc,
targfunc,
testarval,
targarval,
skipna=skipna,
empty_targfunc=empty_targfunc,
**kwargs,
)
def check_funs(
self,
testfunc,
targfunc,
skipna,
allow_complex=True,
allow_all_nan=True,
allow_date=True,
allow_tdelta=True,
allow_obj=True,
**kwargs,
):
self.check_fun(testfunc, targfunc, "arr_float", skipna, **kwargs)
self.check_fun(testfunc, targfunc, "arr_float_nan", skipna, **kwargs)
self.check_fun(testfunc, targfunc, "arr_int", skipna, **kwargs)
self.check_fun(testfunc, targfunc, "arr_bool", skipna, **kwargs)
objs = [
self.arr_float.astype("O"),
self.arr_int.astype("O"),
self.arr_bool.astype("O"),
]
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan", skipna, **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, "arr_complex", skipna, **kwargs)
self.check_fun(testfunc, targfunc, "arr_complex_nan", skipna, **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan_nanj", skipna, **kwargs)
objs += [self.arr_complex.astype("O")]
if allow_date:
targfunc(self.arr_date)
self.check_fun(testfunc, targfunc, "arr_date", skipna, **kwargs)
objs += [self.arr_date.astype("O")]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, "arr_tdelta", skipna, **kwargs)
objs += [self.arr_tdelta.astype("O")]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == "convert":
targfunc = partial(
self._badobj_wrap, func=targfunc, allow_complex=allow_complex
)
self.check_fun(testfunc, targfunc, "arr_obj", skipna, **kwargs)
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == "O":
if allow_complex:
value = value.astype("c16")
else:
value = value.astype("f8")
return func(value, **kwargs)
@pytest.mark.xfail(reason="GH12863: numpy result won't match for object type")
@pytest.mark.parametrize(
"nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)]
)
def test_nan_funcs(self, nan_op, np_op, skipna):
self.check_funs(nan_op, np_op, skipna, allow_all_nan=False, allow_date=False)
def test_nansum(self, skipna):
self.check_funs(
nanops.nansum,
np.sum,
skipna,
allow_date=False,
check_dtype=False,
empty_targfunc=np.nansum,
)
def test_nanmean(self, skipna):
self.check_funs(
nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False
)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
for a in [2 ** 55, -(2 ** 55), 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
assert result == a
assert result == np_result
assert result.dtype == np.float64
@pytest.mark.parametrize(
"dtype",
[
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
getattr(np, "float128", None),
],
)
def test_returned_dtype(self, dtype):
if dtype is None:
# no float128 available
return
s = Series(range(10), dtype=dtype)
group_a = ["mean", "std", "var", "skew", "kurt"]
group_b = ["min", "max"]
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
assert result.dtype == np.float64
else:
assert result.dtype == dtype
def test_nanmedian(self, skipna):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
self.check_funs(
nanops.nanmedian,
np.median,
skipna,
allow_complex=False,
allow_date=False,
allow_obj="convert",
)
@pytest.mark.parametrize("ddof", range(3))
def test_nanvar(self, ddof, skipna):
self.check_funs(
nanops.nanvar,
np.var,
skipna,
allow_complex=False,
allow_date=False,
allow_obj="convert",
ddof=ddof,
)
@pytest.mark.parametrize("ddof", range(3))
def test_nanstd(self, ddof, skipna):
self.check_funs(
nanops.nanstd,
np.std,
skipna,
allow_complex=False,
allow_date=False,
allow_obj="convert",
ddof=ddof,
)
@td.skip_if_no_scipy
@pytest.mark.parametrize("ddof", range(3))
def test_nansem(self, ddof, skipna):
from scipy.stats import sem
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nansem,
sem,
skipna,
allow_complex=False,
allow_date=False,
allow_tdelta=False,
allow_obj="convert",
ddof=ddof,
)
@pytest.mark.parametrize(
"nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)]
)
def test_nanops_with_warnings(self, nan_op, np_op, skipna):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
self.check_funs(nan_op, np_op, skipna, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isna(nans)
if res.ndim:
res[nullnan] = -1
elif (
hasattr(nullnan, "all")
and nullnan.all()
or not hasattr(nullnan, "all")
and nullnan
):
res = -1
return res
def test_nanargmax(self, skipna):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False)
def test_nanargmin(self, skipna):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmin)
self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype("f8")
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.0
return result
@td.skip_if_no_scipy
def test_nanskew(self, skipna):
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nanskew,
func,
skipna,
allow_complex=False,
allow_date=False,
allow_tdelta=False,
)
@td.skip_if_no_scipy
def test_nankurt(self, skipna):
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nankurt,
func,
skipna,
allow_complex=False,
allow_date=False,
allow_tdelta=False,
)
def test_nanprod(self, skipna):
self.check_funs(
nanops.nanprod,
np.prod,
skipna,
allow_date=False,
allow_tdelta=False,
empty_targfunc=np.nanprod,
)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(
self.arr_float_2d,
self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs,
)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs)
res11 = checkfun(
self.arr_float_nan_2d,
self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs,
)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs)
res24 = checkfun(
self.arr_float_nan_2d,
self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs,
)
res25 = checkfun(
self.arr_float_2d,
self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1,
**kwargs,
)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(
self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs,
)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs)
res11 = checkfun(
self.arr_float_nan_1d,
self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs,
)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs)
res24 = checkfun(
self.arr_float_nan_1d,
self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs,
)
res25 = checkfun(
self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1,
**kwargs,
)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson")
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
@td.skip_if_no_scipy
def test_nancorr_kendall(self):
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall")
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")
@td.skip_if_no_scipy
def test_nancorr_spearman(self):
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman")
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")
@td.skip_if_no_scipy
def test_invalid_method(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'"
with pytest.raises(ValueError, match=msg):
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="foo")
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
# Lower dimension for next step in the loop
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
@pytest.mark.parametrize(
"op,nanop",
[
(operator.eq, nanops.naneq),
(operator.ne, nanops.nanne),
(operator.gt, nanops.nangt),
(operator.ge, nanops.nange),
(operator.lt, nanops.nanlt),
(operator.le, nanops.nanle),
],
)
def test_nan_comparison(self, op, nanop):
targ0 = op(self.arr_float, self.arr_float1)
self.check_nancomp(nanop, targ0)
def check_bool(self, func, value, correct):
while getattr(value, "ndim", True):
res0 = func(value)
if correct:
assert res0
else:
assert not res0
if not hasattr(value, "ndim"):
break
# Reduce dimension for next step in the loop
value = np.take(value, 0, axis=-1)
def test__has_infs(self):
pairs = [
("arr_complex", False),
("arr_int", False),
("arr_bool", False),
("arr_str", False),
("arr_utf", False),
("arr_complex", False),
("arr_complex_nan", False),
("arr_nan_nanj", False),
("arr_nan_infj", True),
("arr_complex_nan_infj", True),
]
pairs_float = [
("arr_float", False),
("arr_nan", False),
("arr_float_nan", False),
("arr_nan_nan", False),
("arr_float_inf", True),
("arr_inf", True),
("arr_nan_inf", True),
("arr_float_nan_inf", True),
("arr_nan_nan_inf", True),
]
for arr, correct in pairs:
val = getattr(self, arr)
self.check_bool(nanops._has_infs, val, correct)
for arr, correct in pairs_float:
val = getattr(self, arr)
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype("f4"), correct)
self.check_bool(nanops._has_infs, val.astype("f2"), correct)
def test__bn_ok_dtype(self):
assert nanops._bn_ok_dtype(self.arr_float.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_complex.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_int.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_bool.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_str.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_utf.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_date.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_obj.dtype, "test")
class TestEnsureNumeric:
def test_numeric_values(self):
# Test integer
assert nanops._ensure_numeric(1) == 1
# Test float
assert nanops._ensure_numeric(1.1) == 1.1
# Test complex
assert nanops._ensure_numeric(1 + 2j) == 1 + 2j
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
assert np.allclose(nanops._ensure_numeric(values), values)
# Test object ndarray
o_values = values.astype(object)
assert np.allclose(nanops._ensure_numeric(o_values), values)
# Test convertible string ndarray
s_values = np.array(["1", "2", "3"], dtype=object)
assert np.allclose(nanops._ensure_numeric(s_values), values)
# Test non-convertible string ndarray
s_values = np.array(["foo", "bar", "baz"], dtype=object)
msg = r"Could not convert .* to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric(s_values)
def test_convertable_values(self):
assert np.allclose(nanops._ensure_numeric("1"), 1.0)
assert np.allclose(nanops._ensure_numeric("1.1"), 1.1)
assert np.allclose(nanops._ensure_numeric("1+1j"), 1 + 1j)
def test_non_convertable_values(self):
msg = "Could not convert foo to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric("foo")
# with the wrong type, python raises TypeError for us
msg = "argument must be a string or a number"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric({})
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric([])
class TestNanvarFixedValues:
# xref GH10242
def setup_method(self, method):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance, rtol=1e-2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance, rtol=1e-2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, rtol=1e-2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5, rtol=1e-2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan, rtol=1e-2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(
actual_variance, np.array([self.variance, 1.0 / 12]), rtol=1e-2
)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var, rtol=1e-2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, rtol=1e-2)
# The overestimated variance.
tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var, rtol=1e-2)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array(
[
[0.97303362, 0.21869576, 0.55560287],
[0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292],
]
)
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array(
[
[
[0.13762259, 0.05619224, 0.11568816],
[0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449],
],
[
[0.09519783, 0.16435395, 0.05082054],
[0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163],
],
]
)
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
assert np.isnan(var[3])
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
assert np.isnan(std[3])
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
assert result == 0.0
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues:
# xref GH 11974
def setup_method(self, method):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
assert skew == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(right_tailed) > 0
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
tm.assert_almost_equal(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
assert np.isnan(skew)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues:
# xref GH 11974
def setup_method(self, method):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
assert kurt == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(right_tailed) > 0
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
tm.assert_almost_equal(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
assert np.isnan(kurt)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
class TestDatetime64NaNOps:
# Enabling mean changes the behavior of DataFrame.mean
# See https://github.com/pandas-dev/pandas/issues/24752
def test_nanmean(self):
dti = pd.date_range("2016-01-01", periods=3)
expected = dti[1]
for obj in [dti, DatetimeArray(dti), Series(dti)]:
result = nanops.nanmean(obj)
assert result == expected
dti2 = dti.insert(1, pd.NaT)
for obj in [dti2, DatetimeArray(dti2), Series(dti2)]:
result = nanops.nanmean(obj)
assert result == expected
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_nanmean_skipna_false(self, dtype):
arr = np.arange(12).astype(np.int64).view(dtype).reshape(4, 3)
arr[-1, -1] = "NaT"
result = nanops.nanmean(arr, skipna=False)
assert result is pd.NaT
result = nanops.nanmean(arr, axis=0, skipna=False)
expected = np.array([4, 5, "NaT"], dtype=arr.dtype)
tm.assert_numpy_array_equal(result, expected)
result = nanops.nanmean(arr, axis=1, skipna=False)
expected = np.array([arr[0, 1], arr[1, 1], arr[2, 1], arr[-1, -1]])
tm.assert_numpy_array_equal(result, expected)
def test_use_bottleneck():
if nanops._BOTTLENECK_INSTALLED:
pd.set_option("use_bottleneck", True)
assert pd.get_option("use_bottleneck")
pd.set_option("use_bottleneck", False)
assert not pd.get_option("use_bottleneck")
pd.set_option("use_bottleneck", use_bn)
@pytest.mark.parametrize(
"numpy_op, expected",
[
(np.sum, 10),
(np.nansum, 10),
(np.mean, 2.5),
(np.nanmean, 2.5),
(np.median, 2.5),
(np.nanmedian, 2.5),
(np.min, 1),
(np.max, 4),
(np.nanmin, 1),
(np.nanmax, 4),
],
)
def test_numpy_ops(numpy_op, expected):
# GH8383
result = numpy_op(Series([1, 2, 3, 4]))
assert result == expected
@pytest.mark.parametrize(
"operation",
[
nanops.nanany,
nanops.nanall,
nanops.nansum,
nanops.nanmean,
nanops.nanmedian,
nanops.nanstd,
nanops.nanvar,
nanops.nansem,
nanops.nanargmax,
nanops.nanargmin,
nanops.nanmax,
nanops.nanmin,
nanops.nanskew,
nanops.nankurt,
nanops.nanprod,
],
)
def test_nanops_independent_of_mask_param(operation):
# GH22764
s = Series([1, 2, np.nan, 3, np.nan, 4])
mask = s.isna()
median_expected = operation(s)
median_result = operation(s, mask=mask)
assert median_expected == median_result
| {
"repo_name": "datapythonista/pandas",
"path": "pandas/tests/test_nanops.py",
"copies": "2",
"size": "38532",
"license": "bsd-3-clause",
"hash": -6876011179655665000,
"line_mean": 34.3829201102,
"line_max": 88,
"alpha_frac": 0.5658413786,
"autogenerated": false,
"ratio": 3.2885550908935732,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9852596642386484,
"avg_score": 0.00035996542141779813,
"num_lines": 1089
} |
from functools import partial
import operator
def compare(op, expected_val, err_str):
def check(obj):
if not op(obj, expected_val):
return [err_str]
else:
return []
return check
equals = partial(compare, operator.eq)
not_equals = partial(compare, operator.ne)
is_ = partial(compare, operator.is_)
is_not = partial(compare, operator.is_not)
def attribute(attr, check_func):
def check(obj):
original_obj = obj
attributes = attr.split('.')
try:
for name in attributes:
obj = getattr(obj, name)
return check_func(obj)
except AttributeError:
return ["Attribute {} from {} for obj {} raised AttributeError".format(name, attr, original_obj)]
return check
def key(key, check_func):
def check(obj):
try:
return check_func(obj[key])
except KeyError:
return ["{} has no key {}".format(obj, key)]
return check
def compose(*checks):
def check(obj):
errors = []
for check in checks:
errors += check(obj)
return errors
return check
| {
"repo_name": "AbletonAG/abl.util",
"path": "abl/util/checks.py",
"copies": "1",
"size": "1162",
"license": "mit",
"hash": 3175455239623694300,
"line_mean": 21.7843137255,
"line_max": 109,
"alpha_frac": 0.578313253,
"autogenerated": false,
"ratio": 4.091549295774648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006684491978609625,
"num_lines": 51
} |
from functools import partial
import operator
from peewee import *
from playhouse.db_url import connect as db_url_connect
from huey.api import Huey
from huey.constants import EmptyData
from huey.exceptions import ConfigurationError
from huey.storage import BaseStorage
class BytesBlobField(BlobField):
def python_value(self, value):
return value if isinstance(value, bytes) else bytes(value)
class SqlStorage(BaseStorage):
def __init__(self, name='huey', database=None, **kwargs):
super(SqlStorage, self).__init__(name)
if database is None:
raise ConfigurationError('Use of SqlStorage requires a '
'database= argument, which should be a '
'peewee database or a connection string.')
if isinstance(database, Database):
self.database = database
else:
# Treat database argument as a URL connection string.
self.database = db_url_connect(database)
self.KV, self.Schedule, self.Task = self.create_models()
self.create_tables()
def create_models(self):
class Base(Model):
class Meta:
database = self.database
class KV(Base):
queue = CharField()
key = CharField()
value = BytesBlobField()
class Meta:
primary_key = CompositeKey('queue', 'key')
class Schedule(Base):
queue = CharField()
data = BytesBlobField()
timestamp = TimestampField(resolution=1000)
class Meta:
indexes = ((('queue', 'timestamp'), False),)
class Task(Base):
queue = CharField()
data = BytesBlobField()
priority = FloatField(default=0.0)
Task.add_index(Task.priority.desc(), Task.id)
return (KV, Schedule, Task)
def create_tables(self):
with self.database:
self.database.create_tables([self.KV, self.Schedule, self.Task])
def drop_tables(self):
with self.database:
self.database.drop_tables([self.KV, self.Schedule, self.Task])
def close(self):
return self.database.close()
def tasks(self, *columns):
return self.Task.select(*columns).where(self.Task.queue == self.name)
def schedule(self, *columns):
return (self.Schedule.select(*columns)
.where(self.Schedule.queue == self.name))
def kv(self, *columns):
return self.KV.select(*columns).where(self.KV.queue == self.name)
def enqueue(self, data, priority=None):
self.Task.create(queue=self.name, data=data, priority=priority or 0)
def dequeue(self):
query = (self.tasks(self.Task.id, self.Task.data)
.order_by(self.Task.priority.desc(), self.Task.id)
.limit(1))
if self.database.for_update:
query = query.for_update()
with self.database.atomic():
try:
task = query.get()
except self.Task.DoesNotExist:
return
nrows = self.Task.delete().where(self.Task.id == task.id).execute()
if nrows == 1:
return task.data
def queue_size(self):
return self.tasks().count()
def enqueued_items(self, limit=None):
query = self.tasks(self.Task.data).order_by(self.Task.priority.desc(),
self.Task.id)
if limit is not None:
query = query.limit(limit)
return list(map(operator.itemgetter(0), query.tuples()))
def flush_queue(self):
self.Task.delete().where(self.Task.queue == self.name).execute()
def add_to_schedule(self, data, timestamp, utc):
self.Schedule.create(queue=self.name, data=data, timestamp=timestamp)
def read_schedule(self, timestamp):
query = (self.schedule(self.Schedule.id, self.Schedule.data)
.where(self.Schedule.timestamp <= timestamp)
.tuples())
if self.database.for_update:
query = query.for_update()
with self.database.atomic():
results = list(query)
if not results:
return []
id_list, data = zip(*results)
(self.Schedule
.delete()
.where(self.Schedule.id.in_(id_list))
.execute())
return list(data)
def schedule_size(self):
return self.schedule().count()
def scheduled_items(self):
tasks = (self.schedule(self.Schedule.data)
.order_by(self.Schedule.timestamp)
.tuples())
return list(map(operator.itemgetter(0), tasks))
def flush_schedule(self):
(self.Schedule
.delete()
.where(self.Schedule.queue == self.name)
.execute())
def put_data(self, key, value, is_result=False):
if isinstance(self.database, PostgresqlDatabase):
(self.KV
.insert(queue=self.name, key=key, value=value)
.on_conflict(conflict_target=[self.KV.queue, self.KV.key],
preserve=[self.KV.value])
.execute())
else:
self.KV.replace(queue=self.name, key=key, value=value).execute()
def peek_data(self, key):
try:
kv = self.kv(self.KV.value).where(self.KV.key == key).get()
except self.KV.DoesNotExist:
return EmptyData
else:
return kv.value
def pop_data(self, key):
query = self.kv().where(self.KV.key == key)
if self.database.for_update:
query = query.for_update()
with self.database.atomic():
try:
kv = query.get()
except self.KV.DoesNotExist:
return EmptyData
else:
dq = self.KV.delete().where(
(self.KV.queue == self.name) &
(self.KV.key == key))
return kv.value if dq.execute() == 1 else EmptyData
def has_data_for_key(self, key):
return self.kv().where(self.KV.key == key).exists()
def put_if_empty(self, key, value):
try:
with self.database.atomic():
self.KV.insert(queue=self.name, key=key, value=value).execute()
except IntegrityError:
return False
else:
return True
def result_store_size(self):
return self.kv().count()
def result_items(self):
query = self.kv(self.KV.key, self.KV.value).tuples()
return dict((k, v) for k, v in query.iterator())
def flush_results(self):
self.KV.delete().where(self.KV.queue == self.name).execute()
SqlHuey = partial(Huey, storage_class=SqlStorage)
| {
"repo_name": "coleifer/huey",
"path": "huey/contrib/sql_huey.py",
"copies": "2",
"size": "6855",
"license": "mit",
"hash": -1680070728710509000,
"line_mean": 31.1830985915,
"line_max": 79,
"alpha_frac": 0.5622173596,
"autogenerated": false,
"ratio": 4.034726309593879,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00039123630672926443,
"num_lines": 213
} |
from functools import partial
import os
from ..core.compute import Compute
from ..taxbrain.mock_compute import (MockCompute,
MockFailedCompute,
NodeDownCompute,
)
import requests_mock
requests_mock.Mocker.TEST_PREFIX = 'dropq'
btax_workers = os.environ.get('BTAX_WORKERS', '')
BTAX_WORKERS = btax_workers.split(",")
def package_up_vars(self, user_mods, first_budget_year):
# TODO - is first_budget_year important here?
user_mods = {k: v for k, v in user_mods.items()
if k.startswith(('btax_', 'start_year'))}
user_mods = {k: (v[0] if hasattr(v, '__getitem__') else v)
for k, v in user_mods.items()}
return user_mods
def mock_submit_calculation(self, *args, **kwargs):
return (list(args), 1)
def mock_dropq_results_ready(arg, self, *args, **kwargs):
return [arg]
def mock_dropq_get_results(is_error, self, *args, **kwargs):
if is_error:
ret = {0: 'Error expected in test'}
return ret
ret = {0: {'mY_dec': None,
'mX_dec': None,
'df_dec': None,
'pdf_dec': None,
'cdf_dec': None,
'mY_bin': None,
'mX_bin': None,
'df_bin': None,
'pdf_bin': None,
'cdf_bin': None,
'fiscal_tot_diffs': None,
'fiscal_tot_base': None,
'fiscal_tot_ref': None, }}
return ret
class DropqComputeBtax(Compute):
num_budget_years = 1
package_up_vars = package_up_vars
def submit_btax_calculation(self, user_mods, first_budget_year):
url_template = "http://{hn}/btax_start_job"
data = {}
user_mods = self.package_up_vars(user_mods, first_budget_year)
if not bool(user_mods):
return False
user_mods = {first_budget_year: user_mods}
data['user_mods'] = user_mods
data['start_year'] = int(first_budget_year)
print('submitting btax data:', data)
return self.submit([data], url_template,
increment_counter=False,
use_wnc_offset=False)
def btax_get_results(self, job_ids, job_failure=False):
return self._get_results_base(job_ids, job_failure=job_failure)
class MockComputeBtax(MockCompute, DropqComputeBtax):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = partial(mock_dropq_get_results, 'YES')
submit_calculation = mock_submit_calculation
dropq_results_ready = partial(mock_dropq_results_ready, "YES")
class MockFailedComputeBtax(MockFailedCompute, DropqComputeBtax):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = partial(mock_dropq_get_results, 'Failure message')
submit_calculation = mock_submit_calculation
dropq_results_ready = partial(mock_dropq_results_ready, "FAIL")
class NodeDownComputeBtax(NodeDownCompute, DropqComputeBtax):
num_budget_years = 1
package_up_vars = package_up_vars
dropq_get_results = partial(mock_dropq_get_results, 'Failure message')
submit_calculation = mock_submit_calculation
dropq_results_ready = partial(mock_dropq_results_ready, "FAIL")
| {
"repo_name": "OpenSourcePolicyCenter/PolicyBrain",
"path": "webapp/apps/btax/compute.py",
"copies": "2",
"size": "3307",
"license": "mit",
"hash": 6849730317821656000,
"line_mean": 34.1808510638,
"line_max": 74,
"alpha_frac": 0.6011490777,
"autogenerated": false,
"ratio": 3.514346439957492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 94
} |
from functools import partial
import os
from datetime import datetime, timedelta
import numpy as np
import torch
import neptune
from torch.autograd import Variable
from torch.optim.lr_scheduler import ExponentialLR
from tempfile import TemporaryDirectory
from steppy.base import Step, IdentityOperation
from steppy.adapter import Adapter, E
from toolkit.pytorch_transformers.utils import Averager, persist_torch_model
from toolkit.pytorch_transformers.validation import score_model
from .utils import (
get_logger,
sigmoid,
softmax,
make_apply_transformer,
read_masks,
get_list_of_image_predictions,
)
from .metrics import intersection_over_union, intersection_over_union_thresholds
from .postprocessing import crop_image, resize_image, binarize
logger = get_logger()
Y_COLUMN = "file_path_mask"
ORIGINAL_SIZE = (101, 101)
THRESHOLD = 0.5
class Callback:
def __init__(self):
self.epoch_id = None
self.batch_id = None
self.model = None
self.optimizer = None
self.loss_function = None
self.output_names = None
self.validation_datagen = None
self.lr_scheduler = None
def set_params(self, transformer, validation_datagen, *args, **kwargs):
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.output_names = transformer.output_names
self.validation_datagen = validation_datagen
self.transformer = transformer
def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0
def on_train_end(self, *args, **kwargs):
pass
def on_epoch_begin(self, *args, **kwargs):
pass
def on_epoch_end(self, *args, **kwargs):
self.epoch_id += 1
def training_break(self, *args, **kwargs):
return False
def on_batch_begin(self, *args, **kwargs):
pass
def on_batch_end(self, *args, **kwargs):
self.batch_id += 1
def get_validation_loss(self):
if self.epoch_id not in self.transformer.validation_loss.keys():
self.transformer.validation_loss[self.epoch_id] = score_model(
self.model, self.loss_function, self.validation_datagen
)
return self.transformer.validation_loss[self.epoch_id]
class CallbackList:
def __init__(self, callbacks=None):
if callbacks is None:
self.callbacks = []
elif isinstance(callbacks, Callback):
self.callbacks = [callbacks]
else:
self.callbacks = callbacks
def __len__(self):
return len(self.callbacks)
def set_params(self, *args, **kwargs):
for callback in self.callbacks:
callback.set_params(*args, **kwargs)
def on_train_begin(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_train_begin(*args, **kwargs)
def on_train_end(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_train_end(*args, **kwargs)
def on_epoch_begin(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_epoch_begin(*args, **kwargs)
def on_epoch_end(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_epoch_end(*args, **kwargs)
def training_break(self, *args, **kwargs):
callback_out = [
callback.training_break(*args, **kwargs) for callback in self.callbacks
]
return any(callback_out)
def on_batch_begin(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_batch_begin(*args, **kwargs)
def on_batch_end(self, *args, **kwargs):
for callback in self.callbacks:
callback.on_batch_end(*args, **kwargs)
class TrainingMonitor(Callback):
def __init__(self, epoch_every=None, batch_every=None):
super().__init__()
self.epoch_loss_averagers = {}
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
if batch_every == 0:
self.batch_every = False
else:
self.batch_every = batch_every
def on_train_begin(self, *args, **kwargs):
self.epoch_loss_averagers = {}
self.epoch_id = 0
self.batch_id = 0
def on_epoch_end(self, *args, **kwargs):
for name, averager in self.epoch_loss_averagers.items():
epoch_avg_loss = averager.value
averager.reset()
if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):
logger.info(
"epoch {0} {1}: {2:.5f}".format(
self.epoch_id, name, epoch_avg_loss
)
)
self.epoch_id += 1
def on_batch_end(self, metrics, *args, **kwargs):
for name, loss in metrics.items():
loss = loss.data.cpu().numpy()[0]
if name in self.epoch_loss_averagers.keys():
self.epoch_loss_averagers[name].send(loss)
else:
self.epoch_loss_averagers[name] = Averager()
self.epoch_loss_averagers[name].send(loss)
if self.batch_every and ((self.batch_id % self.batch_every) == 0):
logger.info(
"epoch {0} batch {1} {2}: {3:.5f}".format(
self.epoch_id, self.batch_id, name, loss
)
)
self.batch_id += 1
class ExponentialLRScheduler(Callback):
def __init__(self, gamma, epoch_every=1, batch_every=None):
super().__init__()
self.gamma = gamma
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
if batch_every == 0:
self.batch_every = False
else:
self.batch_every = batch_every
def set_params(self, transformer, validation_datagen, *args, **kwargs):
self.validation_datagen = validation_datagen
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.lr_scheduler = ExponentialLR(self.optimizer, self.gamma, last_epoch=-1)
def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0
logger.info(
"initial lr: {0}".format(
self.optimizer.state_dict()["param_groups"][0]["initial_lr"]
)
)
def on_epoch_end(self, *args, **kwargs):
if self.epoch_every and (((self.epoch_id + 1) % self.epoch_every) == 0):
self.lr_scheduler.step()
logger.info(
"epoch {0} current lr: {1}".format(
self.epoch_id + 1,
self.optimizer.state_dict()["param_groups"][0]["lr"],
)
)
self.epoch_id += 1
def on_batch_end(self, *args, **kwargs):
if self.batch_every and ((self.batch_id % self.batch_every) == 0):
self.lr_scheduler.step()
logger.info(
"epoch {0} batch {1} current lr: {2}".format(
self.epoch_id + 1,
self.batch_id + 1,
self.optimizer.state_dict()["param_groups"][0]["lr"],
)
)
self.batch_id += 1
class ExperimentTiming(Callback):
def __init__(self, epoch_every=None, batch_every=None):
super().__init__()
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
if batch_every == 0:
self.batch_every = False
else:
self.batch_every = batch_every
self.batch_start = None
self.epoch_start = None
self.current_sum = None
self.current_mean = None
def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0
logger.info("starting training...")
def on_train_end(self, *args, **kwargs):
logger.info("training finished")
def on_epoch_begin(self, *args, **kwargs):
if self.epoch_id > 0:
epoch_time = datetime.now() - self.epoch_start
if self.epoch_every:
if (self.epoch_id % self.epoch_every) == 0:
logger.info(
"epoch {0} time {1}".format(
self.epoch_id - 1, str(epoch_time)[:-7]
)
)
self.epoch_start = datetime.now()
self.current_sum = timedelta()
self.current_mean = timedelta()
logger.info("epoch {0} ...".format(self.epoch_id))
def on_batch_begin(self, *args, **kwargs):
if self.batch_id > 0:
current_delta = datetime.now() - self.batch_start
self.current_sum += current_delta
self.current_mean = self.current_sum / self.batch_id
if self.batch_every:
if self.batch_id > 0 and (((self.batch_id - 1) % self.batch_every) == 0):
logger.info(
"epoch {0} average batch time: {1}".format(
self.epoch_id, str(self.current_mean)[:-5]
)
)
if self.batch_every:
if self.batch_id == 0 or self.batch_id % self.batch_every == 0:
logger.info(
"epoch {0} batch {1} ...".format(self.epoch_id, self.batch_id)
)
self.batch_start = datetime.now()
class NeptuneMonitor(Callback):
def __init__(self, image_nr, image_resize, model_name):
super().__init__()
self.model_name = model_name
self.ctx = neptune.Context()
self.epoch_loss_averager = Averager()
self.image_nr = image_nr
self.image_resize = image_resize
def on_train_begin(self, *args, **kwargs):
self.epoch_loss_averagers = {}
self.epoch_id = 0
self.batch_id = 0
def on_batch_end(self, metrics, *args, **kwargs):
for name, loss in metrics.items():
loss = loss.data.cpu().numpy()[0]
if name in self.epoch_loss_averagers.keys():
self.epoch_loss_averagers[name].send(loss)
else:
self.epoch_loss_averagers[name] = Averager()
self.epoch_loss_averagers[name].send(loss)
self.ctx.channel_send(
"{} batch {} loss".format(self.model_name, name),
x=self.batch_id,
y=loss,
)
self.batch_id += 1
def on_epoch_end(self, *args, **kwargs):
self._send_numeric_channels()
self.epoch_id += 1
def _send_numeric_channels(self, *args, **kwargs):
for name, averager in self.epoch_loss_averagers.items():
epoch_avg_loss = averager.value
averager.reset()
self.ctx.channel_send(
"{} epoch {} loss".format(self.model_name, name),
x=self.epoch_id,
y=epoch_avg_loss,
)
self.model.eval()
val_loss = self.get_validation_loss()
self.model.train()
for name, loss in val_loss.items():
loss = loss.data.cpu().numpy()[0]
self.ctx.channel_send(
"{} epoch_val {} loss".format(self.model_name, name),
x=self.epoch_id,
y=loss,
)
class ValidationMonitor(Callback):
def __init__(self, data_dir, loader_mode, epoch_every=None, batch_every=None):
super().__init__()
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
if batch_every == 0:
self.batch_every = False
else:
self.batch_every = batch_every
self.data_dir = data_dir
self.validation_pipeline = postprocessing_pipeline_simplified
self.loader_mode = loader_mode
self.meta_valid = None
self.y_true = None
self.activation_func = None
def set_params(
self, transformer, validation_datagen, meta_valid=None, *args, **kwargs
):
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.output_names = transformer.output_names
self.validation_datagen = validation_datagen
self.meta_valid = meta_valid
self.y_true = read_masks(self.meta_valid[Y_COLUMN].values)
self.activation_func = transformer.activation_func
self.transformer = transformer
def get_validation_loss(self):
return self._get_validation_loss()
def on_epoch_end(self, *args, **kwargs):
if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):
self.model.eval()
val_loss = self.get_validation_loss()
self.model.train()
for name, loss in val_loss.items():
loss = loss.data.cpu().numpy()[0]
logger.info(
"epoch {0} validation {1}: {2:.5f}".format(
self.epoch_id, name, loss
)
)
self.epoch_id += 1
def _get_validation_loss(self):
output, epoch_loss = self._transform()
y_pred = self._generate_prediction(output)
logger.info("Calculating IOU and IOUT Scores")
iou_score = intersection_over_union(self.y_true, y_pred)
iout_score = intersection_over_union_thresholds(self.y_true, y_pred)
logger.info("IOU score on validation is {}".format(iou_score))
logger.info("IOUT score on validation is {}".format(iout_score))
if not self.transformer.validation_loss:
self.transformer.validation_loss = {}
self.transformer.validation_loss.setdefault(
self.epoch_id,
{
"sum": epoch_loss,
"iou": Variable(torch.Tensor([iou_score])),
"iout": Variable(torch.Tensor([iout_score])),
},
)
return self.transformer.validation_loss[self.epoch_id]
def _transform(self):
self.model.eval()
batch_gen, steps = self.validation_datagen
partial_batch_losses = []
outputs = {}
for batch_id, data in enumerate(batch_gen):
X = data[0]
targets_tensors = data[1:]
if torch.cuda.is_available():
X = Variable(X, volatile=True).cuda()
targets_var = []
for target_tensor in targets_tensors:
targets_var.append(Variable(target_tensor, volatile=True).cuda())
else:
X = Variable(X, volatile=True)
targets_var = []
for target_tensor in targets_tensors:
targets_var.append(Variable(target_tensor, volatile=True))
outputs_batch = self.model(X)
if len(self.output_names) == 1:
for (name, loss_function_one, weight), target in zip(
self.loss_function, targets_var
):
loss_sum = loss_function_one(outputs_batch, target) * weight
outputs.setdefault(self.output_names[0], []).append(
outputs_batch.data.cpu().numpy()
)
else:
batch_losses = []
for (name, loss_function_one, weight), output, target in zip(
self.loss_function, outputs_batch, targets_var
):
loss = loss_function_one(output, target) * weight
batch_losses.append(loss)
partial_batch_losses.setdefault(name, []).append(loss)
output_ = output.data.cpu().numpy()
outputs.setdefault(name, []).append(output_)
loss_sum = sum(batch_losses)
partial_batch_losses.append(loss_sum)
if batch_id == steps:
break
self.model.train()
average_losses = sum(partial_batch_losses) / steps
outputs = {
"{}_prediction".format(name): get_list_of_image_predictions(outputs_)
for name, outputs_ in outputs.items()
}
for name, prediction in outputs.items():
if self.activation_func == "softmax":
outputs[name] = [
softmax(single_prediction, axis=0)
for single_prediction in prediction
]
elif self.activation_func == "sigmoid":
outputs[name] = [sigmoid(np.squeeze(mask)) for mask in prediction]
else:
raise Exception("Only softmax and sigmoid activations are allowed")
return outputs, average_losses
def _generate_prediction(self, outputs):
data = {
"callback_input": {"meta": self.meta_valid, "meta_valid": None},
"unet_output": {**outputs},
}
with TemporaryDirectory() as cache_dirpath:
pipeline = self.validation_pipeline(cache_dirpath, self.loader_mode)
output = pipeline.transform(data)
y_pred = output["y_pred"]
return y_pred
class ModelCheckpoint(Callback):
def __init__(self, filepath, metric_name="sum", epoch_every=1, minimize=True):
self.filepath = filepath
self.minimize = minimize
self.best_score = None
if epoch_every == 0:
self.epoch_every = False
else:
self.epoch_every = epoch_every
self.metric_name = metric_name
def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0
os.makedirs(os.path.dirname(self.filepath), exist_ok=True)
def on_epoch_end(self, *args, **kwargs):
if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):
self.model.eval()
val_loss = self.get_validation_loss()
loss_sum = val_loss[self.metric_name]
loss_sum = loss_sum.data.cpu().numpy()[0]
self.model.train()
if self.best_score is None:
self.best_score = loss_sum
if (
(self.minimize and loss_sum < self.best_score)
or (not self.minimize and loss_sum > self.best_score)
or (self.epoch_id == 0)
):
self.best_score = loss_sum
persist_torch_model(self.model, self.filepath)
logger.info(
"epoch {0} model saved to {1}".format(self.epoch_id, self.filepath)
)
self.epoch_id += 1
class EarlyStopping(Callback):
def __init__(self, metric_name="sum", patience=1000, minimize=True):
super().__init__()
self.patience = patience
self.minimize = minimize
self.best_score = None
self.epoch_since_best = 0
self._training_break = False
self.metric_name = metric_name
def training_break(self, *args, **kwargs):
return self._training_break
def on_epoch_end(self, *args, **kwargs):
self.model.eval()
val_loss = self.get_validation_loss()
loss_sum = val_loss[self.metric_name]
loss_sum = loss_sum.data.cpu().numpy()[0]
self.model.train()
if not self.best_score:
self.best_score = loss_sum
if (self.minimize and loss_sum < self.best_score) or (
not self.minimize and loss_sum > self.best_score
):
self.best_score = loss_sum
self.epoch_since_best = 0
else:
self.epoch_since_best += 1
if self.epoch_since_best > self.patience:
self._training_break = True
self.epoch_id += 1
def postprocessing_pipeline_simplified(cache_dirpath, loader_mode):
if loader_mode == "resize_and_pad":
size_adjustment_function = partial(crop_image, target_size=ORIGINAL_SIZE)
elif loader_mode == "resize":
size_adjustment_function = partial(resize_image, target_size=ORIGINAL_SIZE)
else:
raise NotImplementedError
mask_resize = Step(
name="mask_resize",
transformer=make_apply_transformer(
size_adjustment_function, output_name="resized_images", apply_on=["images"]
),
input_data=["unet_output"],
adapter=Adapter({"images": E("unet_output", "mask_prediction")}),
experiment_directory=cache_dirpath,
)
binarizer = Step(
name="binarizer",
transformer=make_apply_transformer(
partial(binarize, threshold=THRESHOLD),
output_name="binarized_images",
apply_on=["images"],
),
input_steps=[mask_resize],
adapter=Adapter({"images": E(mask_resize.name, "resized_images")}),
experiment_directory=cache_dirpath,
)
output = Step(
name="output",
transformer=IdentityOperation(),
input_steps=[binarizer],
adapter=Adapter({"y_pred": E(binarizer.name, "binarized_images")}),
experiment_directory=cache_dirpath,
)
return output
| {
"repo_name": "Diyago/Machine-Learning-scripts",
"path": "DEEP LEARNING/segmentation/Kaggle TGS Salt Identification Challenge/v2/common_blocks/callbacks.py",
"copies": "1",
"size": "21327",
"license": "apache-2.0",
"hash": 3932554816071721000,
"line_mean": 33.7345276873,
"line_max": 87,
"alpha_frac": 0.5541801472,
"autogenerated": false,
"ratio": 3.944331422230442,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4998511569430442,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import os
import logging
import pandas as pd
from .example_filetype_format import FileTypeFormat
from . import process_functions
logger = logging.getLogger(__name__)
def validateSymbol(x, bedDf, returnMappedDf=True):
valid=False
gene = x['HUGO_SYMBOL']
if sum(bedDf['Hugo_Symbol'] == gene) > 0:
valid=True
elif sum(bedDf['ID'] == gene) > 0:
mismatch = bedDf[bedDf['ID'] == gene]
mismatch.drop_duplicates(inplace=True)
logger.info("%s will be remapped to %s" % (gene, mismatch['Hugo_Symbol'].values[0]))
x['HUGO_SYMBOL'] = mismatch['Hugo_Symbol'].values[0]
#else:
# logger.warning("%s cannot be remapped and will not be released. The symbol must exist in your seq assay ids (bed files) and must be mappable to a gene." % gene)
# x['HUGO_SYMBOL'] = float('nan')
#x['FUSION'] = x['FUSION'].replace("%s-" % gene,"%s-" % x['HUGO_SYMBOL'])
#x['COMMENTS'] = str(x['COMMENTS']).replace("-%s" % gene,"-%s" % str(x['COMMENTS']))
if returnMappedDf:
return(x)
else:
return(valid)
# Remap fusion's FUSION column
def remapFusion(gene_dict, DF, col):
nonmapped = []
total = []
for each in DF[col]:
for key in gene_dict:
value = gene_dict[key]
if value == False:
nonmapped.append(key)
else:
each = each.replace("%s-" % key, "%s-" % value)
each = each.replace("-%s fusion" % key, "-%s fusion" % value)
total.append(each)
DF[col] = total
return(DF, nonmapped)
class fusions(FileTypeFormat):
_fileType = "fusions"
_process_kwargs = ["newPath", "databaseSynId", "databaseToSynIdMappingDf"]
_validation_kwargs = ['nosymbol_check', 'project_id']
#VALIDATE FILENAME
def _validateFilename(self, filePath):
assert os.path.basename(filePath[0]) == "data_fusions_%s.txt" % self.center
def _process(self, fusion, databaseToSynIdMappingDf):
fusion.columns = [col.upper() for col in fusion.columns]
fusion['CENTER'] = self.center
newsamples = [process_functions.checkGenieId(i,self.center) for i in fusion['TUMOR_SAMPLE_BARCODE']]
fusion['TUMOR_SAMPLE_BARCODE'] = newsamples
#This is temporary, because comments column will be removed
#if fusion.get("COMMENTS") is None:
# fusion['COMMENTS'] = ""
# #Will remove comments column
# fusion['COMMENTS'] = ""
fusion['ENTREZ_GENE_ID'] = fusion['ENTREZ_GENE_ID'].fillna(0)
fusion = fusion.drop_duplicates()
fusion['ID'] = fusion['HUGO_SYMBOL'].copy()
bedSynId = process_functions.getDatabaseSynId(self.syn, "bed",
databaseToSynIdMappingDf=databaseToSynIdMappingDf)
bed = self.syn.tableQuery("select Hugo_Symbol, ID from %s where CENTER = '%s'" % (bedSynId, self.center))
bedDf = bed.asDataFrame()
fusion = fusion.apply(lambda x: validateSymbol(x, bedDf), axis=1)
#Create nonmapped gene dict
temp = fusion[fusion['HUGO_SYMBOL'] != fusion['ID']]
foo = temp[~temp.HUGO_SYMBOL.isnull()]
temp = foo[['HUGO_SYMBOL','ID']]
temp.drop_duplicates(inplace=True)
temp.index=temp.ID
del temp['ID']
# fusion = fusion[~fusion['HUGO_SYMBOL'].isnull()]
fusion['FUSION'] = fusion['FUSION'].fillna("")
fusion, nonmapped = remapFusion(temp.to_dict()['HUGO_SYMBOL'], fusion, "FUSION")
#fusion, nonmapped = remapFusion(temp.to_dict()['HUGO_SYMBOL'], fusion, "COMMENTS")
fusion['ENTREZ_GENE_ID'] = [int(float(i)) for i in fusion['ENTREZ_GENE_ID']]
return(fusion)
#PROCESSING
def process_steps(self, fusion, databaseSynId, newPath, databaseToSynIdMappingDf):
fusion = self._process(fusion, databaseToSynIdMappingDf)
process_functions.updateData(self.syn, databaseSynId, fusion, self.center, toDelete=True)
fusion.to_csv(newPath, sep="\t",index=False)
return(newPath)
def _validate(self, fusionDF, nosymbol_check, project_id):
total_error = ""
warning = ""
# Frame: "in-frame" or "frameshift".
# Fusion_Status (OPTIONAL): An assessment of the mutation type (i.e., "SOMATIC", "GERMLINE", "UNKNOWN", or empty)
fusionDF.columns = [col.upper() for col in fusionDF.columns]
REQUIRED_HEADERS = pd.Series(['HUGO_SYMBOL','ENTREZ_GENE_ID','CENTER','TUMOR_SAMPLE_BARCODE','FUSION','DNA_SUPPORT','RNA_SUPPORT','METHOD','FRAME'])
if fusionDF.get("COMMENTS") is None:
fusionDF['COMMENTS'] = float('nan')
if not all(REQUIRED_HEADERS.isin(fusionDF.columns)):
total_error += "Your fusion file must at least have these headers: %s.\n" % ",".join(REQUIRED_HEADERS[~REQUIRED_HEADERS.isin(fusionDF.columns)])
if process_functions.checkColExist(fusionDF, "HUGO_SYMBOL") and not nosymbol_check:
# logger.info("VALIDATING %s GENE SYMBOLS" % os.path.basename(filePath))
#invalidated_genes = fusionDF["HUGO_SYMBOL"].drop_duplicates().apply(validateSymbol)
databaseToSynIdMappingDf = process_functions.get_synid_database_mappingdf(self.syn, project_id)
bedSynId = process_functions.getDatabaseSynId(self.syn, "bed",
databaseToSynIdMappingDf=databaseToSynIdMappingDf)
bed = self.syn.tableQuery("select Hugo_Symbol, ID from %s where CENTER = '%s'" % (bedSynId, self.center))
bedDf = bed.asDataFrame()
#invalidated_genes = self.pool.map(process_functions.validateSymbol, fusionDF["HUGO_SYMBOL"].drop_duplicates())
if fusionDF["HUGO_SYMBOL"].isnull().any():
total_error += "Your fusion file should not have any NA/blank Hugo Symbols.\n"
# fusionDF = fusionDF.drop_duplicates("HUGO_SYMBOL").apply(lambda x: validateSymbol(x, bedDf), axis=1)
# if process_functions.checkColExist(fusionDF, "DNA_SUPPORT"):
# if not fusionDF.DNA_SUPPORT.isin(["yes","no","unknown"]).all():
# total_error += "Your fusion file's DNA_SUPPORT column must be 'yes', 'no', or 'unknown'"
# if process_functions.checkColExist(fusionDF, "RNA_SUPPORT"):
# if not fusionDF.RNA_SUPPORT.isin(["yes","no","unknown"]).all():
# total_error += "Your fusion file's RNA_SUPPORT column must be 'yes', 'no', or 'unknown'"
# if process_functions.checkColExist(fusionDF, "FRAME"):
# if not fusionDF.FRAME.isin(["in-frame","frameshift"]).all():
# total_error += "Your fusion file's FRAME column must be 'in-frame', or 'frameshift'"
return(total_error, warning)
| {
"repo_name": "thomasyu888/Genie",
"path": "genie/fusions.py",
"copies": "1",
"size": "6813",
"license": "mit",
"hash": -7618228003008167000,
"line_mean": 46.6433566434,
"line_max": 169,
"alpha_frac": 0.6186701893,
"autogenerated": false,
"ratio": 3.3561576354679805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9422838422362065,
"avg_score": 0.010397880481183053,
"num_lines": 143
} |
from functools import partial
import os
import re
import sys
import time
import threading
import sublime
import sublime_plugin
from sublimelinter.loader import Loader
from sublimelinter.modules.base_linter import INPUT_METHOD_FILE
LINTERS = {} # mapping of language name to linter module
QUEUE = {} # views waiting to be processed by linter
ERRORS = {} # error messages on given line obtained from linter; they are
# displayed in the status bar when cursor is on line with error
VIOLATIONS = {} # violation messages, they are displayed in the status bar
WARNINGS = {} # warning messages, they are displayed in the status bar
UNDERLINES = {} # underline regions related to each lint message
TIMES = {} # collects how long it took the linting to complete
MOD_LOAD = Loader(os.getcwdu(), LINTERS) # utility to load (and reload
# if necessary) linter modules [useful when working on plugin]
# For snappier linting, different delays are used for different linting times:
# (linting time, delays)
DELAYS = (
(50, (50, 100)),
(100, (100, 300)),
(200, (200, 500)),
(400, (400, 1000)),
(800, (800, 2000)),
(1600, (1600, 3000)),
)
# Select one of the predefined gutter mark themes, the options are:
# "alpha", "bright", "dark", "hard" and "simple"
MARK_THEMES = ('alpha', 'bright', 'dark', 'hard', 'simple')
# The path to the built-in gutter mark themes
MARK_THEMES_PATH = os.path.join('..', 'SublimeLinter', 'gutter_mark_themes')
# The original theme for anyone interested the previous minimalist approach
ORIGINAL_MARK_THEME = {
'violation': 'dot',
'warning': 'dot',
'illegal': 'circle'
}
# All available settings for SublimeLinter;
# only these are inherited from SublimeLinter.sublime-settings
ALL_SETTINGS = [
'annotations',
'csslint_options',
'gjslint_ignore',
'gjslint_options',
'javascript_linter',
'jshint_options',
'jslint_options',
'pep8',
'pep8_ignore',
'perl_linter',
'pyflakes_ignore',
'pyflakes_ignore_import_*',
'sublimelinter',
'sublimelinter_delay',
'sublimelinter_disable',
'sublimelinter_executable_map',
'sublimelinter_fill_outlines',
'sublimelinter_gutter_marks',
'sublimelinter_gutter_marks_theme',
'sublimelinter_mark_style',
'sublimelinter_notes',
'sublimelinter_objj_check_ascii',
'sublimelinter_popup_errors_on_save',
'sublimelinter_syntax_map',
'sublimelinter_wrap_find',
]
WHITESPACE_RE = re.compile(r'\s+')
def get_delay(t, view):
delay = 0
for _t, d in DELAYS:
if _t <= t:
delay = d
else:
break
delay = delay or DELAYS[0][1]
# If the user specifies a delay greater than the built in delay,
# figure they only want to see marks when idle.
minDelay = int(view.settings().get('sublimelinter_delay', 0) * 1000)
if minDelay > delay[1]:
erase_lint_marks(view)
return (minDelay, minDelay) if minDelay > delay[1] else delay
def last_selected_lineno(view):
viewSel = view.sel()
if not viewSel:
return None
return view.rowcol(viewSel[0].end())[0]
def update_statusbar(view):
vid = view.id()
lineno = last_selected_lineno(view)
errors = []
if lineno is not None:
if vid in ERRORS and lineno in ERRORS[vid]:
errors.extend(ERRORS[vid][lineno])
if vid in VIOLATIONS and lineno in VIOLATIONS[vid]:
errors.extend(VIOLATIONS[vid][lineno])
if vid in WARNINGS and lineno in WARNINGS[vid]:
errors.extend(WARNINGS[vid][lineno])
if errors:
view.set_status('Linter', '; '.join(errors))
else:
view.erase_status('Linter')
def run_once(linter, view, **kwargs):
'''run a linter on a given view regardless of user setting'''
if not linter:
return
vid = view.id()
ERRORS[vid] = {}
VIOLATIONS[vid] = {}
WARNINGS[vid] = {}
start = time.time()
text = view.substr(sublime.Region(0, view.size())).encode('utf-8')
lines, error_underlines, violation_underlines, warning_underlines, ERRORS[vid], VIOLATIONS[vid], WARNINGS[vid] = linter.run(view, text, (view.file_name() or '').encode('utf-8'))
UNDERLINES[vid] = error_underlines[:]
UNDERLINES[vid].extend(violation_underlines)
UNDERLINES[vid].extend(warning_underlines)
add_lint_marks(view, lines, error_underlines, violation_underlines, warning_underlines)
if view.settings().get('sublimelinter_notes'):
highlight_notes(view)
update_statusbar(view)
end = time.time()
TIMES[vid] = (end - start) * 1000 # Keep how long it took to lint
if kwargs.get('event', None) == 'on_post_save' and view.settings().get('sublimelinter_popup_errors_on_save'):
popup_error_list(view)
def popup_error_list(view):
vid = view.id()
errors = ERRORS[vid].copy()
for message_map in [VIOLATIONS[vid], WARNINGS[vid]]:
for line, messages in message_map.items():
if line in errors:
errors[line].extend(messages)
else:
errors[line] = messages
# Flatten the errors into a list
error_list = []
for line in sorted(errors.keys()):
for index, message in enumerate(errors[line]):
error_list.append({'line': line, 'message': message})
panel_items = []
for error in error_list:
line_text = view.substr(view.full_line(view.text_point(error['line'], 0)))
item = [error['message'], u'{0}: {1}'.format(error['line'] + 1, line_text.strip())]
panel_items.append(item)
def on_done(selected_item):
if selected_item == -1:
return
selected = view.sel()
selected.clear()
error = error_list[selected_item]
region_begin = view.text_point(error['line'], 0)
# Go to the first non-whitespace character of the line
line_text = view.substr(view.full_line(region_begin))
match = WHITESPACE_RE.match(line_text)
if (match):
region_begin += len(match.group(0))
selected.add(sublime.Region(region_begin, region_begin))
# We have to force a move to update the cursor position
view.run_command('move', {'by': 'characters', 'forward': True})
view.run_command('move', {'by': 'characters', 'forward': False})
view.show_at_center(region_begin)
view.window().show_quick_panel(panel_items, on_done)
def add_lint_marks(view, lines, error_underlines, violation_underlines, warning_underlines):
'''Adds lint marks to view.'''
vid = view.id()
erase_lint_marks(view)
types = {'warning': warning_underlines, 'violation': violation_underlines, 'illegal': error_underlines}
for type_name, underlines in types.items():
if underlines:
view.add_regions('lint-underline-' + type_name, underlines, 'sublimelinter.underline.' + type_name, sublime.DRAW_EMPTY_AS_OVERWRITE)
if lines:
outline_style = view.settings().get('sublimelinter_mark_style', 'outline')
# This test is for the legacy "fill" setting; it will be removed
# in a future version (likely v1.7).
if view.settings().get('sublimelinter_fill_outlines', False):
outline_style = 'fill'
gutter_mark_enabled = True if view.settings().get('sublimelinter_gutter_marks', False) else False
gutter_mark_theme = view.settings().get('sublimelinter_gutter_marks_theme', 'simple')
outlines = {'warning': [], 'violation': [], 'illegal': []}
for line in ERRORS[vid]:
outlines['illegal'].append(view.full_line(view.text_point(line, 0)))
for line in WARNINGS[vid]:
outlines['warning'].append(view.full_line(view.text_point(line, 0)))
for line in VIOLATIONS[vid]:
outlines['violation'].append(view.full_line(view.text_point(line, 0)))
for lint_type in outlines:
if outlines[lint_type]:
args = [
'lint-outlines-{0}'.format(lint_type),
outlines[lint_type],
'sublimelinter.outline.{0}'.format(lint_type)
]
gutter_mark_image = ''
if gutter_mark_enabled:
if gutter_mark_theme == 'original':
gutter_mark_image = ORIGINAL_MARK_THEME[lint_type]
elif gutter_mark_theme in MARK_THEMES:
gutter_mark_image = os.path.join(MARK_THEMES_PATH, gutter_mark_theme + '-' + lint_type)
else:
gutter_mark_image = gutter_mark_theme + '-' + lint_type
args.append(gutter_mark_image)
if outline_style == 'none':
args.append(sublime.HIDDEN)
elif outline_style == 'fill':
pass # outlines are filled by default
else:
args.append(sublime.DRAW_OUTLINED)
view.add_regions(*args)
def erase_lint_marks(view):
'''erase all "lint" error marks from view'''
view.erase_regions('lint-underline-illegal')
view.erase_regions('lint-underline-violation')
view.erase_regions('lint-underline-warning')
view.erase_regions('lint-outlines-illegal')
view.erase_regions('lint-outlines-violation')
view.erase_regions('lint-outlines-warning')
view.erase_regions('lint-annotations')
def get_lint_regions(view, reverse=False, coalesce=False):
vid = view.id()
underlines = UNDERLINES.get(vid, [])[:]
if (coalesce):
# Each of these regions is one character, so transform it into the character points
points = sorted([region.begin() for region in underlines])
# Now coalesce adjacent characters into a single region
underlines = []
last_point = -999
for point in points:
if point != last_point + 1:
underlines.append(sublime.Region(point, point))
else:
region = underlines[-1]
underlines[-1] = sublime.Region(region.begin(), point)
last_point = point
# Now get all outlines, which includes the entire line where underlines are
outlines = view.get_regions('lint-outlines-illegal')
outlines.extend(view.get_regions('lint-outlines-violation'))
outlines.extend(view.get_regions('lint-outlines-warning'))
outlines.extend(view.get_regions('lint-annotations'))
# If an outline region contains an underline region, use only the underline
regions = underlines
for outline in outlines:
contains_underlines = False
for underline in underlines:
if outline.contains(underline):
contains_underlines = True
break
if not contains_underlines:
regions.append(outline)
return sorted(regions, key=lambda x: x.begin(), reverse=reverse)
def select_lint_region(view, region):
selected = view.sel()
selected.clear()
# Find the first underline region within the region to select.
# If there are none, put the cursor at the beginning of the line.
underlineRegion = find_underline_within(view, region)
if underlineRegion is None:
underlineRegion = sublime.Region(region.begin(), region.begin())
selected.add(underlineRegion)
view.show(underlineRegion, True)
def find_underline_within(view, region):
underlines = view.get_regions('lint-underline-illegal')
underlines.extend(view.get_regions('lint-underline-violation'))
underlines.extend(view.get_regions('lint-underline-warning'))
underlines.sort(key=lambda x: x.begin())
for underline in underlines:
if region.contains(underline):
return underline
return None
def syntax_name(view):
syntax = os.path.basename(view.settings().get('syntax'))
syntax = os.path.splitext(syntax)[0]
return syntax
def select_linter(view, ignore_disabled=False):
'''selects the appropriate linter to use based on language in current view'''
syntax = syntax_name(view)
lc_syntax = syntax.lower()
language = None
linter = None
syntaxMap = view.settings().get('sublimelinter_syntax_map', {})
if syntax in syntaxMap:
language = syntaxMap.get(syntax, '').lower()
elif lc_syntax in syntaxMap:
language = syntaxMap.get(lc_syntax, '').lower()
elif lc_syntax in LINTERS:
language = lc_syntax
if language:
if ignore_disabled:
disabled = []
else:
disabled = view.settings().get('sublimelinter_disable', [])
if language not in disabled:
linter = LINTERS.get(language)
# If the enabled state is False, it must be checked.
# Enabled checking has to be deferred to first view use because
# user settings cannot be loaded during plugin startup.
if linter is not None and not linter.enabled:
enabled, message = linter.check_enabled(view)
print 'SublimeLinter: {0} {1} ({2})'.format(language, 'enabled' if enabled else 'disabled', message)
if not enabled:
del LINTERS['' + language]
linter = None
return linter
def highlight_notes(view):
'''highlight user-specified annotations in a file'''
view.erase_regions('lint-annotations')
text = view.substr(sublime.Region(0, view.size()))
regions = LINTERS['annotations'].built_in_check(view, text, '')
if regions:
view.add_regions('lint-annotations', regions, 'sublimelinter.annotations', sublime.DRAW_EMPTY_AS_OVERWRITE)
def _update_view(view, filename, **kwargs):
# It is possible that by the time the queue is run,
# the original file is no longer being displayed in the view,
# or the view may be gone. This happens especially when
# viewing files temporarily by single-clicking on a filename
# in the sidebar or when selecting a file through the choose file palette.
valid_view = False
view_id = view.id()
for window in sublime.windows():
for v in window.views():
if v.id() == view_id:
valid_view = True
break
if not valid_view or view.is_loading() or (view.file_name() or '').encode('utf-8') != filename:
return
try:
run_once(select_linter(view), view, **kwargs)
except RuntimeError, ex:
print ex
def queue_linter(linter, view, timeout=-1, preemptive=False, event=None):
'''Put the current view in a queue to be examined by a linter'''
if linter is None:
erase_lint_marks(view) # may have changed file type and left marks behind
# No point in queuing anything if no linters will run
if not view.settings().get('sublimelinter_notes'):
return
if preemptive:
timeout = busy_timeout = 0
elif timeout == -1:
timeout, busy_timeout = get_delay(TIMES.get(view.id(), 100), view)
else:
busy_timeout = timeout
kwargs = {'timeout': timeout, 'busy_timeout': busy_timeout, 'preemptive': preemptive, 'event': event}
queue(view, partial(_update_view, view, (view.file_name() or '').encode('utf-8'), **kwargs), kwargs)
def _callback(view, filename, kwargs):
kwargs['callback'](view, filename, **kwargs)
def background_linter():
__lock_.acquire()
try:
callbacks = QUEUE.values()
QUEUE.clear()
finally:
__lock_.release()
for callback in callbacks:
sublime.set_timeout(callback, 0)
################################################################################
# Queue dispatcher system:
queue_dispatcher = background_linter
queue_thread_name = 'background linter'
MAX_DELAY = 10
def queue_loop():
'''An infinite loop running the linter in a background thread meant to
update the view after user modifies it and then does no further
modifications for some time as to not slow down the UI with linting.'''
global __signaled_, __signaled_first_
while __loop_:
#print 'acquire...'
__semaphore_.acquire()
__signaled_first_ = 0
__signaled_ = 0
#print 'DISPATCHING!', len(QUEUE)
queue_dispatcher()
def queue(view, callback, kwargs):
global __signaled_, __signaled_first_
now = time.time()
__lock_.acquire()
try:
QUEUE[view.id()] = callback
timeout = kwargs['timeout']
busy_timeout = kwargs['busy_timeout']
if now < __signaled_ + timeout * 4:
timeout = busy_timeout or timeout
__signaled_ = now
_delay_queue(timeout, kwargs['preemptive'])
if not __signaled_first_:
__signaled_first_ = __signaled_
#print 'first',
#print 'queued in', (__signaled_ - now)
finally:
__lock_.release()
def _delay_queue(timeout, preemptive):
global __signaled_, __queued_
now = time.time()
if not preemptive and now <= __queued_ + 0.01:
return # never delay queues too fast (except preemptively)
__queued_ = now
_timeout = float(timeout) / 1000
if __signaled_first_:
if MAX_DELAY > 0 and now - __signaled_first_ + _timeout > MAX_DELAY:
_timeout -= now - __signaled_first_
if _timeout < 0:
_timeout = 0
timeout = int(round(_timeout * 1000, 0))
new__signaled_ = now + _timeout - 0.01
if __signaled_ >= now - 0.01 and (preemptive or new__signaled_ >= __signaled_ - 0.01):
__signaled_ = new__signaled_
#print 'delayed to', (preemptive, __signaled_ - now)
def _signal():
if time.time() < __signaled_:
return
__semaphore_.release()
sublime.set_timeout(_signal, timeout)
def delay_queue(timeout):
__lock_.acquire()
try:
_delay_queue(timeout, False)
finally:
__lock_.release()
# only start the thread once - otherwise the plugin will get laggy
# when saving it often.
__semaphore_ = threading.Semaphore(0)
__lock_ = threading.Lock()
__queued_ = 0
__signaled_ = 0
__signaled_first_ = 0
# First finalize old standing threads:
__loop_ = False
__pre_initialized_ = False
def queue_finalize(timeout=None):
global __pre_initialized_
for thread in threading.enumerate():
if thread.isAlive() and thread.name == queue_thread_name:
__pre_initialized_ = True
thread.__semaphore_.release()
thread.join(timeout)
queue_finalize()
# Initialize background thread:
__loop_ = True
__active_linter_thread = threading.Thread(target=queue_loop, name=queue_thread_name)
__active_linter_thread.__semaphore_ = __semaphore_
__active_linter_thread.start()
################################################################################
UNRECOGNIZED = '''
* Unrecognized option * : %s
==============================================
'''
def view_in_tab(view, title, text, file_type):
'''Helper function to display information in a tab.
'''
tab = view.window().new_file()
tab.set_name(title)
_id = tab.buffer_id()
tab.set_scratch(_id)
tab.settings().set('gutter', True)
tab.settings().set('line_numbers', False)
tab.set_syntax_file(file_type)
ed = tab.begin_edit()
tab.insert(ed, 0, text)
tab.end_edit(ed)
return tab, _id
def lint_views(linter):
if not linter:
return
viewsToLint = []
for window in sublime.windows():
for view in window.views():
viewLinter = select_linter(view)
if viewLinter == linter:
viewsToLint.append(view)
for view in viewsToLint:
queue_linter(linter, view, preemptive=True)
def reload_view_module(view):
for name, linter in LINTERS.items():
module = sys.modules[linter.__module__]
if module.__file__.encode('utf-8') == (view.file_name() or '').encode('utf-8'):
print 'SublimeLinter: reloading language:', linter.language
MOD_LOAD.reload_module(module)
lint_views(linter)
break
def settings_changed():
for window in sublime.windows():
for view in window.views():
linter = select_linter(view)
if (linter):
reload_settings(view)
def reload_settings(view):
'''Restores user settings.'''
settings = sublime.load_settings(__name__ + '.sublime-settings')
settings.clear_on_change(__name__)
settings.add_on_change(__name__, settings_changed)
for setting in ALL_SETTINGS:
if settings.get(setting) != None:
view.settings().set(setting, settings.get(setting))
if view.settings().get('sublimelinter') == None:
view.settings().set('sublimelinter', True)
class LintCommand(sublime_plugin.TextCommand):
'''command to interact with linters'''
def __init__(self, view):
self.view = view
self.help_called = False
def run_(self, action):
'''method called by default via view.run_command;
used to dispatch to appropriate method'''
if not action:
return
try:
lc_action = action.lower()
except AttributeError:
return
if lc_action == 'reset':
self.reset()
elif lc_action == 'on':
self.on()
elif lc_action == 'load-save':
self.enable_load_save()
elif lc_action == 'save-only':
self.enable_save_only()
elif lc_action == 'off':
self.off()
elif action.lower() in LINTERS:
self._run(lc_action)
def reset(self):
'''Removes existing lint marks and restores user settings.'''
erase_lint_marks(self.view)
reload_settings(self.view)
def on(self):
'''Turns background linting on.'''
self.view.settings().set('sublimelinter', True)
queue_linter(select_linter(self.view), self.view, preemptive=True)
def enable_load_save(self):
'''Turns load-save linting on.'''
self.view.settings().set('sublimelinter', 'load-save')
erase_lint_marks(self.view)
def enable_save_only(self):
'''Turns save-only linting on.'''
self.view.settings().set('sublimelinter', 'save-only')
erase_lint_marks(self.view)
def off(self):
'''Turns background linting off.'''
self.view.settings().set('sublimelinter', False)
erase_lint_marks(self.view)
def _run(self, name):
'''runs an existing linter'''
run_once(LINTERS[name.lower()], self.view)
class BackgroundLinter(sublime_plugin.EventListener):
'''This plugin controls a linter meant to work in the background
to provide interactive feedback as a file is edited. It can be
turned off via a setting.
'''
def __init__(self):
super(BackgroundLinter, self).__init__()
self.lastSelectedLineNo = -1
def on_modified(self, view):
if view.is_scratch():
return
if view.settings().get('sublimelinter') != True:
erase_lint_marks(view)
return
linter = select_linter(view)
# File-based linters are not invoked during a modify
if linter and linter.input_method == INPUT_METHOD_FILE:
erase_lint_marks(view)
return
# Reset the last selected line number so that the current line will show error messages
# when update_statusbar is called.
self.lastSelectedLineNo = -1
queue_linter(linter, view)
def on_load(self, view):
reload_settings(view)
sublimelinter_setting = view.settings().get('sublimelinter')
if view.is_scratch() or sublimelinter_setting == False or sublimelinter_setting == 'save-only':
return
queue_linter(select_linter(view), view, event='on_load')
def on_post_save(self, view):
sublimelinter_setting = view.settings().get('sublimelinter')
if sublimelinter_setting == None:
reload_settings(view)
if view.is_scratch() or sublimelinter_setting == False:
return
reload_view_module(view)
queue_linter(select_linter(view), view, preemptive=True, event='on_post_save')
def on_selection_modified(self, view):
if view.is_scratch():
return
delay_queue(1000) # on movement, delay queue (to make movement responsive)
# We only display errors in the status bar for the last line in the current selection.
# If that line number has not changed, there is no point in updating the status bar.
lastSelectedLineNo = last_selected_lineno(view)
if lastSelectedLineNo != self.lastSelectedLineNo:
self.lastSelectedLineNo = lastSelectedLineNo
update_statusbar(view)
class FindLintErrorCommand(sublime_plugin.TextCommand):
'''This command is just a superclass for other commands, it is never enabled.'''
def is_enabled(self):
return select_linter(self.view) is not None
def find_lint_error(self, forward):
linter = select_linter(self.view, ignore_disabled=True)
if not linter:
return
self.view.run_command('lint', linter.language)
regions = get_lint_regions(self.view, reverse=not forward, coalesce=True)
if len(regions) == 0:
sublime.error_message('No lint errors.')
return
selected = self.view.sel()
point = selected[0].begin() if forward else selected[-1].end()
regionToSelect = None
# If going forward, find the first region beginning after the point.
# If going backward, find the first region ending before the point.
# If nothing is found in the given direction, wrap to the first/last region.
if forward:
for index, region in enumerate(regions):
if point < region.begin():
regionToSelect = region
break
else:
for index, region in enumerate(regions):
if point > region.end():
regionToSelect = region
break
# If there is only one error line and the cursor is in that line, we cannot move.
# Otherwise wrap to the first/last error line unless settings disallow that.
if regionToSelect is None and (len(regions) > 1 or not regions[0].contains(point)):
if self.view.settings().get('sublimelinter_wrap_find', True):
regionToSelect = regions[0]
if regionToSelect is not None:
select_lint_region(self.view, regionToSelect)
else:
sublime.error_message('No {0} lint errors.'.format('next' if forward else 'previous'))
return regionToSelect
class FindNextLintErrorCommand(FindLintErrorCommand):
def run(self, edit):
'''
Move the cursor to the next lint error in the current view.
The search will wrap to the top unless the sublimelinter_wrap_find
setting is set to false.
'''
self.find_lint_error(forward=True)
class FindPreviousLintErrorCommand(FindLintErrorCommand):
def run(self, edit):
'''
Move the cursor to the previous lint error in the current view.
The search will wrap to the bottom unless the sublimelinter_wrap_find
setting is set to false.
'''
self.find_lint_error(forward=False)
class SublimelinterWindowCommand(sublime_plugin.WindowCommand):
def is_enabled(self):
view = self.window.active_view()
if view:
if view.is_scratch():
return False
else:
return True
else:
return False
def run_(self, args):
pass
class SublimelinterAnnotationsCommand(SublimelinterWindowCommand):
'''Commands to extract annotations and display them in
a file
'''
def run_(self, args):
linter = LINTERS.get('annotations', None)
if linter is None:
return
view = self.window.active_view()
if not view:
return
text = view.substr(sublime.Region(0, view.size())).encode('utf-8')
filename = (view.file_name() or '').encode('utf-8')
notes = linter.extract_annotations(text, view, filename)
_, filename = os.path.split(filename)
annotations_view, _id = view_in_tab(view, 'Annotations from {0}'.format(filename), notes, '')
class SublimelinterCommand(SublimelinterWindowCommand):
def is_enabled(self):
enabled = super(SublimelinterCommand, self).is_enabled()
if not enabled:
return False
linter = select_linter(self.window.active_view(), ignore_disabled=True)
return linter is not None
def run_(self, args={}):
view = self.window.active_view()
action = args.get('action', '')
if view and action:
if action == 'lint':
self.lint_view(view, show_popup_list=args.get('show_popup', False))
else:
view.run_command('lint', action)
def lint_view(self, view, show_popup_list):
linter = select_linter(view, ignore_disabled=True)
if linter:
view.run_command('lint', linter.language)
regions = get_lint_regions(view, coalesce=True)
if regions:
if show_popup_list:
popup_error_list(view)
else:
sublime.error_message('{0} lint error{1}.'.format(len(regions), 's' if len(regions) != 1 else ''))
else:
sublime.error_message('No lint errors.')
else:
syntax = syntax_name(view)
sublime.error_message('No linter for the syntax "{0}"'.format(syntax))
class SublimelinterLintCommand(SublimelinterCommand):
def is_enabled(self):
enabled = super(SublimelinterLintCommand, self).is_enabled()
if enabled:
view = self.window.active_view()
if view and view.settings().get('sublimelinter') == True:
return False
return enabled
class SublimelinterShowErrorsCommand(SublimelinterCommand):
def is_enabled(self):
return super(SublimelinterShowErrorsCommand, self).is_enabled()
class SublimelinterEnableLoadSaveCommand(SublimelinterCommand):
def is_enabled(self):
enabled = super(SublimelinterEnableLoadSaveCommand, self).is_enabled()
if enabled:
view = self.window.active_view()
if view and view.settings().get('sublimelinter') == 'load-save':
return False
return enabled
class SublimelinterEnableSaveOnlyCommand(SublimelinterCommand):
def is_enabled(self):
enabled = super(SublimelinterEnableSaveOnlyCommand, self).is_enabled()
if enabled:
view = self.window.active_view()
if view and view.settings().get('sublimelinter') == 'save-only':
return False
return enabled
class SublimelinterDisableCommand(SublimelinterCommand):
def is_enabled(self):
enabled = super(SublimelinterDisableCommand, self).is_enabled()
if enabled:
view = self.window.active_view()
if view and view.settings().get('sublimelinter') == False:
return False
return enabled
| {
"repo_name": "uschmidt83/SublimeLinter-for-ST2",
"path": "SublimeLinter.py",
"copies": "5",
"size": "31563",
"license": "mit",
"hash": 1890449192952797000,
"line_mean": 30.8496468214,
"line_max": 181,
"alpha_frac": 0.6115071444,
"autogenerated": false,
"ratio": 3.8765659543109803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009053814632195292,
"num_lines": 991
} |
from functools import partial
import os
import re
import sys
import time
import threading
import sublime
import sublime_plugin
from .sublimelinter.loader import Loader
from .sublimelinter.modules.base_linter import INPUT_METHOD_FILE
LINTERS = {} # mapping of language name to linter module
QUEUE = {} # views waiting to be processed by linter
ERRORS = {} # error messages on given line obtained from linter; they are
# displayed in the status bar when cursor is on line with error
VIOLATIONS = {} # violation messages, they are displayed in the status bar
WARNINGS = {} # warning messages, they are displayed in the status bar
UNDERLINES = {} # underline regions related to each lint message
TIMES = {} # collects how long it took the linting to complete
PLUGIN_PATH = os.path.abspath(os.path.dirname(__file__))
MOD_LOAD = Loader(PLUGIN_PATH, LINTERS) # utility to load (and reload
# if necessary) linter modules [useful when working on plugin]
# For snappier linting, different delays are used for different linting times:
# (linting time, delays)
DELAYS = (
(50, (50, 100)),
(100, (100, 300)),
(200, (200, 500)),
(400, (400, 1000)),
(800, (800, 2000)),
(1600, (1600, 3000)),
)
# Select one of the predefined gutter mark themes, the options are:
# "alpha", "bright", "dark", "hard" and "simple"
MARK_THEMES = ('alpha', 'bright', 'dark', 'hard', 'simple')
# The path to the built-in gutter mark themes
MARK_THEMES_PATH = os.path.join('Packages', 'SublimeLinter', 'gutter_mark_themes')
# The original theme for anyone interested the previous minimalist approach
ORIGINAL_MARK_THEME = {
'violation': 'dot',
'warning': 'dot',
'illegal': 'circle'
}
# All available settings for SublimeLinter;
# only these are inherited from SublimeLinter.sublime-settings
ALL_SETTINGS = [
'annotations',
'csslint_options',
'gjslint_ignore',
'gjslint_options',
'javascript_linter',
'jshint_options',
'jslint_options',
'pep8',
'pep8_ignore',
'perl_linter',
'pyflakes_ignore',
'pyflakes_ignore_import_*',
'sublimelinter',
'sublimelinter_delay',
'sublimelinter_disable',
'sublimelinter_executable_map',
'sublimelinter_fill_outlines',
'sublimelinter_gutter_marks',
'sublimelinter_gutter_marks_theme',
'sublimelinter_mark_style',
'sublimelinter_notes',
'sublimelinter_objj_check_ascii',
'sublimelinter_popup_errors_on_save',
'sublimelinter_syntax_map',
'sublimelinter_wrap_find',
]
WHITESPACE_RE = re.compile(r'\s+')
def get_delay(t, view):
delay = 0
for _t, d in DELAYS:
if _t <= t:
delay = d
else:
break
delay = delay or DELAYS[0][1]
# If the user specifies a delay greater than the built in delay,
# figure they only want to see marks when idle.
minDelay = int(view.settings().get('sublimelinter_delay', 0) * 1000)
if minDelay > delay[1]:
erase_lint_marks(view)
return (minDelay, minDelay) if minDelay > delay[1] else delay
def last_selected_lineno(view):
viewSel = view.sel()
if not viewSel:
return None
return view.rowcol(viewSel[0].end())[0]
def update_statusbar(view):
vid = view.id()
lineno = last_selected_lineno(view)
errors = []
if lineno is not None:
if vid in ERRORS and lineno in ERRORS[vid]:
errors.extend(ERRORS[vid][lineno])
if vid in VIOLATIONS and lineno in VIOLATIONS[vid]:
errors.extend(VIOLATIONS[vid][lineno])
if vid in WARNINGS and lineno in WARNINGS[vid]:
errors.extend(WARNINGS[vid][lineno])
if errors:
view.set_status('Linter', '; '.join(errors))
else:
view.erase_status('Linter')
def run_once(linter, view, **kwargs):
'''run a linter on a given view regardless of user setting'''
if not linter:
return
vid = view.id()
ERRORS[vid] = {}
VIOLATIONS[vid] = {}
WARNINGS[vid] = {}
start = time.time()
text = view.substr(sublime.Region(0, view.size()))
lines, error_underlines, violation_underlines, warning_underlines, ERRORS[vid], VIOLATIONS[vid], WARNINGS[vid] = linter.run(view, text, view.file_name() or '')
UNDERLINES[vid] = error_underlines[:]
UNDERLINES[vid].extend(violation_underlines)
UNDERLINES[vid].extend(warning_underlines)
add_lint_marks(view, lines, error_underlines, violation_underlines, warning_underlines)
if view.settings().get('sublimelinter_notes'):
highlight_notes(view)
update_statusbar(view)
end = time.time()
TIMES[vid] = (end - start) * 1000 # Keep how long it took to lint
if kwargs.get('event', None) == 'on_post_save' and view.settings().get('sublimelinter_popup_errors_on_save'):
popup_error_list(view)
def popup_error_list(view):
vid = view.id()
errors = ERRORS[vid].copy()
for message_map in [VIOLATIONS[vid], WARNINGS[vid]]:
for line, messages in list(message_map.items()):
if line in errors:
errors[line].extend(messages)
else:
errors[line] = messages
# Flatten the errors into a list
error_list = []
for line in sorted(errors.keys()):
for index, message in enumerate(errors[line]):
error_list.append({'line': line, 'message': message})
panel_items = []
for error in error_list:
line_text = view.substr(view.full_line(view.text_point(error['line'], 0)))
item = [error['message'], '{0}: {1}'.format(error['line'] + 1, line_text.strip())]
panel_items.append(item)
def on_done(selected_item):
if selected_item == -1:
return
selected = view.sel()
selected.clear()
error = error_list[selected_item]
region_begin = view.text_point(error['line'], 0)
# Go to the first non-whitespace character of the line
line_text = view.substr(view.full_line(region_begin))
match = WHITESPACE_RE.match(line_text)
if (match):
region_begin += len(match.group(0))
selected.add(sublime.Region(region_begin, region_begin))
# We have to force a move to update the cursor position
view.run_command('move', {'by': 'characters', 'forward': True})
view.run_command('move', {'by': 'characters', 'forward': False})
view.show_at_center(region_begin)
view.window().show_quick_panel(panel_items, on_done)
def add_lint_marks(view, lines, error_underlines, violation_underlines, warning_underlines):
'''Adds lint marks to view.'''
vid = view.id()
erase_lint_marks(view)
types = {'warning': warning_underlines, 'violation': violation_underlines, 'illegal': error_underlines}
for type_name, underlines in list(types.items()):
if underlines:
view.add_regions('lint-underline-' + type_name, underlines, 'sublimelinter.underline.' + type_name, flags=sublime.DRAW_EMPTY_AS_OVERWRITE)
if lines:
outline_style = view.settings().get('sublimelinter_mark_style', 'outline')
# This test is for the legacy "fill" setting; it will be removed
# in a future version (likely v1.7).
if view.settings().get('sublimelinter_fill_outlines', False):
outline_style = 'fill'
gutter_mark_enabled = True if view.settings().get('sublimelinter_gutter_marks', False) else False
gutter_mark_theme = view.settings().get('sublimelinter_gutter_marks_theme', 'simple')
outlines = {'warning': [], 'violation': [], 'illegal': []}
for line in ERRORS[vid]:
outlines['illegal'].append(view.full_line(view.text_point(line, 0)))
for line in WARNINGS[vid]:
outlines['warning'].append(view.full_line(view.text_point(line, 0)))
for line in VIOLATIONS[vid]:
outlines['violation'].append(view.full_line(view.text_point(line, 0)))
for lint_type in outlines:
if outlines[lint_type]:
args = [
'lint-outlines-{0}'.format(lint_type),
outlines[lint_type],
'sublimelinter.outline.{0}'.format(lint_type)
]
gutter_mark_image = ''
if gutter_mark_enabled:
if gutter_mark_theme == 'original':
gutter_mark_image = ORIGINAL_MARK_THEME[lint_type]
elif gutter_mark_theme in MARK_THEMES:
gutter_mark_image = os.path.join(MARK_THEMES_PATH, gutter_mark_theme + '-' + lint_type)
else:
gutter_mark_image = gutter_mark_theme + '-' + lint_type
gutter_mark_image += '.png'
args.append(gutter_mark_image)
if outline_style == 'none':
args.append(sublime.HIDDEN)
elif outline_style == 'fill':
pass # outlines are filled by default
else:
args.append(sublime.DRAW_OUTLINED)
view.add_regions(*args)
def erase_lint_marks(view):
'''erase all "lint" error marks from view'''
view.erase_regions('lint-underline-illegal')
view.erase_regions('lint-underline-violation')
view.erase_regions('lint-underline-warning')
view.erase_regions('lint-outlines-illegal')
view.erase_regions('lint-outlines-violation')
view.erase_regions('lint-outlines-warning')
view.erase_regions('lint-annotations')
def get_lint_regions(view, reverse=False, coalesce=False):
vid = view.id()
underlines = UNDERLINES.get(vid, [])[:]
if (coalesce):
# Each of these regions is one character, so transform it into the character points
points = sorted([region.begin() for region in underlines])
# Now coalesce adjacent characters into a single region
underlines = []
last_point = -999
for point in points:
if point != last_point + 1:
underlines.append(sublime.Region(point, point))
else:
region = underlines[-1]
underlines[-1] = sublime.Region(region.begin(), point)
last_point = point
# Now get all outlines, which includes the entire line where underlines are
outlines = view.get_regions('lint-outlines-illegal')
outlines.extend(view.get_regions('lint-outlines-violation'))
outlines.extend(view.get_regions('lint-outlines-warning'))
outlines.extend(view.get_regions('lint-annotations'))
# If an outline region contains an underline region, use only the underline
regions = underlines
for outline in outlines:
contains_underlines = False
for underline in underlines:
if outline.contains(underline):
contains_underlines = True
break
if not contains_underlines:
regions.append(outline)
return sorted(regions, key=lambda x: x.begin(), reverse=reverse)
def select_lint_region(view, region):
selected = view.sel()
selected.clear()
# Find the first underline region within the region to select.
# If there are none, put the cursor at the beginning of the line.
underlineRegion = find_underline_within(view, region)
if underlineRegion is None:
underlineRegion = sublime.Region(region.begin(), region.begin())
selected.add(underlineRegion)
view.show(underlineRegion, True)
def find_underline_within(view, region):
underlines = view.get_regions('lint-underline-illegal')
underlines.extend(view.get_regions('lint-underline-violation'))
underlines.extend(view.get_regions('lint-underline-warning'))
underlines.sort(key=lambda x: x.begin())
for underline in underlines:
if region.contains(underline):
return underline
return None
def syntax_name(view):
syntax = os.path.basename(view.settings().get('syntax'))
syntax = os.path.splitext(syntax)[0]
return syntax
def select_linter(view, ignore_disabled=False):
'''selects the appropriate linter to use based on language in current view'''
syntax = syntax_name(view)
lc_syntax = syntax.lower()
language = None
linter = None
syntaxMap = view.settings().get('sublimelinter_syntax_map', {})
if syntax in syntaxMap:
language = syntaxMap.get(syntax, '').lower()
elif lc_syntax in syntaxMap:
language = syntaxMap(lc_syntax, '').lower()
elif lc_syntax in LINTERS:
language = lc_syntax
if language:
if ignore_disabled:
disabled = []
else:
disabled = view.settings().get('sublimelinter_disable', [])
if language not in disabled:
linter = LINTERS.get(language)
# If the enabled state is False, it must be checked.
# Enabled checking has to be deferred to first view use because
# user settings cannot be loaded during plugin startup.
if linter is not None and not linter.enabled:
enabled, message = linter.check_enabled(view)
print('SublimeLinter: {0} {1} ({2})'.format(language, 'enabled' if enabled else 'disabled', message))
if not enabled:
del LINTERS[language]
linter = None
return linter
def highlight_notes(view):
'''highlight user-specified annotations in a file'''
view.erase_regions('lint-annotations')
text = view.substr(sublime.Region(0, view.size()))
regions = LINTERS['annotations'].built_in_check(view, text, '')
if regions:
view.add_regions('lint-annotations', regions, 'sublimelinter.annotations', flags=sublime.DRAW_EMPTY_AS_OVERWRITE)
def _update_view(view, filename, **kwargs):
# It is possible that by the time the queue is run,
# the original file is no longer being displayed in the view,
# or the view may be gone. This happens especially when
# viewing files temporarily by single-clicking on a filename
# in the sidebar or when selecting a file through the choose file palette.
valid_view = False
view_id = view.id()
for window in sublime.windows():
for v in window.views():
if v.id() == view_id:
valid_view = True
break
if not valid_view or view.is_loading() or view.file_name() != filename:
return
try:
run_once(select_linter(view), view, **kwargs)
except RuntimeError as ex:
print(ex)
def queue_linter(linter, view, timeout=-1, preemptive=False, event=None):
'''Put the current view in a queue to be examined by a linter'''
if linter is None:
erase_lint_marks(view) # may have changed file type and left marks behind
# No point in queuing anything if no linters will run
if not view.settings().get('sublimelinter_notes'):
return
if preemptive:
timeout = busy_timeout = 0
elif timeout == -1:
timeout, busy_timeout = get_delay(TIMES.get(view.id(), 100), view)
else:
busy_timeout = timeout
kwargs = {'timeout': timeout, 'busy_timeout': busy_timeout, 'preemptive': preemptive, 'event': event}
queue(view, partial(_update_view, view, view.file_name(), **kwargs), kwargs)
def _callback(view, filename, kwargs):
kwargs['callback'](view, filename, **kwargs)
def background_linter():
__lock_.acquire()
try:
callbacks = list(QUEUE.values())
QUEUE.clear()
finally:
__lock_.release()
for callback in callbacks:
sublime.set_timeout(callback, 0)
################################################################################
# Queue dispatcher system:
queue_dispatcher = background_linter
queue_thread_name = 'background linter'
MAX_DELAY = 10
def queue_loop():
'''An infinite loop running the linter in a background thread meant to
update the view after user modifies it and then does no further
modifications for some time as to not slow down the UI with linting.'''
global __signaled_, __signaled_first_
while __loop_:
#print 'acquire...'
__semaphore_.acquire()
__signaled_first_ = 0
__signaled_ = 0
#print 'DISPATCHING!', len(QUEUE)
queue_dispatcher()
def queue(view, callback, kwargs):
global __signaled_, __signaled_first_
now = time.time()
__lock_.acquire()
try:
QUEUE[view.id()] = callback
timeout = kwargs['timeout']
busy_timeout = kwargs['busy_timeout']
if now < __signaled_ + timeout * 4:
timeout = busy_timeout or timeout
__signaled_ = now
_delay_queue(timeout, kwargs['preemptive'])
if not __signaled_first_:
__signaled_first_ = __signaled_
#print 'first',
#print 'queued in', (__signaled_ - now)
finally:
__lock_.release()
def _delay_queue(timeout, preemptive):
global __signaled_, __queued_
now = time.time()
if not preemptive and now <= __queued_ + 0.01:
return # never delay queues too fast (except preemptively)
__queued_ = now
_timeout = float(timeout) / 1000
if __signaled_first_:
if MAX_DELAY > 0 and now - __signaled_first_ + _timeout > MAX_DELAY:
_timeout -= now - __signaled_first_
if _timeout < 0:
_timeout = 0
timeout = int(round(_timeout * 1000, 0))
new__signaled_ = now + _timeout - 0.01
if __signaled_ >= now - 0.01 and (preemptive or new__signaled_ >= __signaled_ - 0.01):
__signaled_ = new__signaled_
#print 'delayed to', (preemptive, __signaled_ - now)
def _signal():
if time.time() < __signaled_:
return
__semaphore_.release()
sublime.set_timeout(_signal, timeout)
def delay_queue(timeout):
__lock_.acquire()
try:
_delay_queue(timeout, False)
finally:
__lock_.release()
# only start the thread once - otherwise the plugin will get laggy
# when saving it often.
__semaphore_ = threading.Semaphore(0)
__lock_ = threading.Lock()
__queued_ = 0
__signaled_ = 0
__signaled_first_ = 0
# First finalize old standing threads:
__loop_ = False
__pre_initialized_ = False
def queue_finalize(timeout=None):
global __pre_initialized_
for thread in threading.enumerate():
if thread.isAlive() and thread.name == queue_thread_name:
__pre_initialized_ = True
thread.__semaphore_.release()
thread.join(timeout)
queue_finalize()
# Initialize background thread:
__loop_ = True
__active_linter_thread = threading.Thread(target=queue_loop, name=queue_thread_name)
__active_linter_thread.__semaphore_ = __semaphore_
__active_linter_thread.start()
################################################################################
UNRECOGNIZED = '''
* Unrecognized option * : %s
==============================================
'''
def view_in_tab(view, title, text, file_type):
'''Helper function to display information in a tab.
'''
tab = view.window().new_file()
tab.set_name(title)
_id = tab.buffer_id()
tab.set_scratch(_id)
tab.settings().set('gutter', True)
tab.settings().set('line_numbers', False)
tab.set_syntax_file(file_type)
ed = tab.begin_edit()
tab.insert(ed, 0, text)
tab.end_edit(ed)
return tab, _id
def lint_views(linter):
if not linter:
return
viewsToLint = []
for window in sublime.windows():
for view in window.views():
viewLinter = select_linter(view)
if viewLinter == linter:
viewsToLint.append(view)
for view in viewsToLint:
queue_linter(linter, view, preemptive=True)
def reload_view_module(view):
for name, linter in list(LINTERS.items()):
module = sys.modules[linter.__module__]
if module.__file__ == view.file_name():
print('SublimeLinter: reloading language:', linter.language)
MOD_LOAD.reload_module(module)
lint_views(linter)
break
def settings_changed():
for window in sublime.windows():
for view in window.views():
linter = select_linter(view)
if (linter):
reload_settings(view)
def reload_settings(view):
'''Restores user settings.'''
settings_name = 'SublimeLinter'
settings = sublime.load_settings(settings_name + '.sublime-settings')
settings.clear_on_change(settings_name)
settings.add_on_change(settings_name, settings_changed)
for setting in ALL_SETTINGS:
if settings.get(setting) is not None:
view.settings().set(setting, settings.get(setting))
if view.settings().get('sublimelinter') is not None:
view.settings().set('sublimelinter', True)
class LintCommand(sublime_plugin.TextCommand):
'''command to interact with linters'''
def __init__(self, view):
self.view = view
self.help_called = False
def run(self, edit, action=''):
'''method called by default via view.run_command;
used to dispatch to appropriate method'''
if not action:
return
try:
lc_action = action.lower()
except AttributeError:
return
if lc_action == 'reset':
self.reset()
elif lc_action == 'on':
self.on()
elif lc_action == 'load-save':
self.enable_load_save()
elif lc_action == 'save-only':
self.enable_save_only()
elif lc_action == 'off':
self.off()
elif action.lower() in LINTERS:
self._run(lc_action)
def reset(self):
'''Removes existing lint marks and restores user settings.'''
erase_lint_marks(self.view)
reload_settings(self.view)
def on(self):
'''Turns background linting on.'''
self.view.settings().set('sublimelinter', True)
queue_linter(select_linter(self.view), self.view, preemptive=True)
def enable_load_save(self):
'''Turns load-save linting on.'''
self.view.settings().set('sublimelinter', 'load-save')
erase_lint_marks(self.view)
def enable_save_only(self):
'''Turns save-only linting on.'''
self.view.settings().set('sublimelinter', 'save-only')
erase_lint_marks(self.view)
def off(self):
'''Turns background linting off.'''
self.view.settings().set('sublimelinter', False)
erase_lint_marks(self.view)
def _run(self, name):
'''runs an existing linter'''
run_once(LINTERS[name.lower()], self.view)
class BackgroundLinter(sublime_plugin.EventListener):
'''This plugin controls a linter meant to work in the background
to provide interactive feedback as a file is edited. It can be
turned off via a setting.
'''
def __init__(self):
super(BackgroundLinter, self).__init__()
self.lastSelectedLineNo = -1
def on_modified(self, view):
if view.is_scratch():
return
if view.settings().get('sublimelinter') is not True:
erase_lint_marks(view)
return
linter = select_linter(view)
# File-based linters are not invoked during a modify
if linter and linter.input_method == INPUT_METHOD_FILE:
erase_lint_marks(view)
return
# Reset the last selected line number so that the current line will show error messages
# when update_statusbar is called.
self.lastSelectedLineNo = -1
queue_linter(linter, view)
def on_load(self, view):
reload_settings(view)
sublimelinter_setting = view.settings().get('sublimelinter')
if view.is_scratch() or sublimelinter_setting is False or sublimelinter_setting == 'save-only':
return
queue_linter(select_linter(view), view, event='on_load')
def on_post_save(self, view):
sublimelinter_setting = view.settings().get('sublimelinter')
if sublimelinter_setting == None:
reload_settings(view)
if view.is_scratch() or sublimelinter_setting is False:
return
reload_view_module(view)
queue_linter(select_linter(view), view, preemptive=True, event='on_post_save')
def on_selection_modified(self, view):
if view.is_scratch():
return
delay_queue(1000) # on movement, delay queue (to make movement responsive)
# We only display errors in the status bar for the last line in the current selection.
# If that line number has not changed, there is no point in updating the status bar.
lastSelectedLineNo = last_selected_lineno(view)
if lastSelectedLineNo != self.lastSelectedLineNo:
self.lastSelectedLineNo = lastSelectedLineNo
update_statusbar(view)
class FindLintErrorCommand(sublime_plugin.TextCommand):
'''This command is just a superclass for other commands, it is never enabled.'''
def is_enabled(self):
return select_linter(self.view) is not None
def find_lint_error(self, forward):
linter = select_linter(self.view, ignore_disabled=True)
if not linter:
return
self.view.run_command('lint', {'action': linter.language})
regions = get_lint_regions(self.view, reverse=not forward, coalesce=True)
if len(regions) == 0:
sublime.error_message('No lint errors.')
return
selected = self.view.sel()
point = selected[0].begin() if forward else selected[-1].end()
regionToSelect = None
# If going forward, find the first region beginning after the point.
# If going backward, find the first region ending before the point.
# If nothing is found in the given direction, wrap to the first/last region.
if forward:
for index, region in enumerate(regions):
if point < region.begin():
regionToSelect = region
break
else:
for index, region in enumerate(regions):
if point > region.end():
regionToSelect = region
break
# If there is only one error line and the cursor is in that line, we cannot move.
# Otherwise wrap to the first/last error line unless settings disallow that.
if regionToSelect is None and (len(regions) > 1 or not regions[0].contains(point)):
if self.view.settings().get('sublimelinter_wrap_find', True):
regionToSelect = regions[0]
if regionToSelect is not None:
select_lint_region(self.view, regionToSelect)
else:
sublime.error_message('No {0} lint errors.'.format('next' if forward else 'previous'))
return regionToSelect
class FindNextLintErrorCommand(FindLintErrorCommand):
def run(self, edit, **args):
'''
Move the cursor to the next lint error in the current view.
The search will wrap to the top unless the sublimelinter_wrap_find
setting is set to false.
'''
self.find_lint_error(forward=True)
class FindPreviousLintErrorCommand(FindLintErrorCommand):
def run(self, edit, **args):
'''
Move the cursor to the previous lint error in the current view.
The search will wrap to the bottom unless the sublimelinter_wrap_find
setting is set to false.
'''
self.find_lint_error(forward=False)
class SublimelinterWindowCommand(sublime_plugin.WindowCommand):
def is_enabled(self):
view = self.window.active_view()
if view:
if view.is_scratch():
return False
else:
return True
else:
return False
def run(self, **args):
pass
class SublimelinterAnnotationsCommand(SublimelinterWindowCommand):
'''Commands to extract annotations and display them in
a file
'''
def run(self, **args):
linter = LINTERS.get('annotations', None)
if linter is None:
return
view = self.window.active_view()
if not view:
return
text = view.substr(sublime.Region(0, view.size()))
filename = view.file_name()
notes = linter.extract_annotations(text, view, filename)
_, filename = os.path.split(filename)
annotations_view, _id = view_in_tab(view, 'Annotations from {0}'.format(filename), notes, '')
class SublimelinterCommand(SublimelinterWindowCommand):
def is_enabled(self):
enabled = super(SublimelinterCommand, self).is_enabled()
if not enabled:
return False
linter = select_linter(self.window.active_view(), ignore_disabled=True)
return linter is not None
def run(self, **args):
view = self.window.active_view()
action = args.get('action', '')
if view and action:
if action == 'lint':
self.lint_view(view, show_popup_list=args.get('show_popup', False))
else:
view.run_command('lint', {'action': action})
def lint_view(self, view, show_popup_list):
linter = select_linter(view, ignore_disabled=True)
if linter:
view.run_command('lint', {'action': linter.language})
regions = get_lint_regions(view, coalesce=True)
if regions:
if show_popup_list:
popup_error_list(view)
else:
sublime.error_message('{0} lint error{1}.'.format(len(regions), 's' if len(regions) != 1 else ''))
else:
sublime.error_message('No lint errors.')
else:
syntax = syntax_name(view)
sublime.error_message('No linter for the syntax "{0}"'.format(syntax))
class SublimelinterLintCommand(SublimelinterCommand):
def is_enabled(self):
enabled = super(SublimelinterLintCommand, self).is_enabled()
if enabled:
view = self.window.active_view()
if view and view.settings().get('sublimelinter') is True:
return False
return enabled
class SublimelinterShowErrorsCommand(SublimelinterCommand):
def is_enabled(self):
return super(SublimelinterShowErrorsCommand, self).is_enabled()
class SublimelinterEnableLoadSaveCommand(SublimelinterCommand):
def is_enabled(self):
enabled = super(SublimelinterEnableLoadSaveCommand, self).is_enabled()
if enabled:
view = self.window.active_view()
if view and view.settings().get('sublimelinter') == 'load-save':
return False
return enabled
class SublimelinterEnableSaveOnlyCommand(SublimelinterCommand):
def is_enabled(self):
enabled = super(SublimelinterEnableSaveOnlyCommand, self).is_enabled()
if enabled:
view = self.window.active_view()
if view and view.settings().get('sublimelinter') == 'save-only':
return False
return enabled
class SublimelinterDisableCommand(SublimelinterCommand):
def is_enabled(self):
enabled = super(SublimelinterDisableCommand, self).is_enabled()
if enabled:
view = self.window.active_view()
if view and view.settings().get('sublimelinter') is False:
return False
return enabled
| {
"repo_name": "benesch/sublime-linter",
"path": "SublimeLinter.py",
"copies": "1",
"size": "31667",
"license": "mit",
"hash": 612613231846596000,
"line_mean": 30.8581488934,
"line_max": 163,
"alpha_frac": 0.612814602,
"autogenerated": false,
"ratio": 3.8821870785828123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9991093936078378,
"avg_score": 0.0007815489008869164,
"num_lines": 994
} |
from functools import partial
import os
import shutil
from subprocess import check_call
import sys
from nose.plugins import Plugin
from funfactory import manage
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
PLAYDOH_ROOT = '.playdoh'
PLAYDOH = os.path.join(ROOT, PLAYDOH_ROOT, 'funtestapp')
ENVIRONMENT_NOTE = os.path.join(ROOT, PLAYDOH_ROOT, 'last-env.txt')
shell = partial(check_call, shell=True)
DB_USER = os.environ.get('FF_DB_USER', 'root')
DB_PASS = os.environ.get('FF_DB_PASS', '')
DB_HOST = os.environ.get('FF_DB_HOST', '')
DB_NAME = os.environ.get('FF_DB_NAME', '_funfactory_test')
FF_PLAYDOH_REMOTE = os.environ.get('FF_PLAYDOH_REMOTE',
'git://github.com/mozilla/playdoh.git')
FF_PLAYDOH_BRANCH = os.environ.get('FF_PLAYDOH_BRANCH', 'master')
def test_root():
assert os.path.exists(os.path.join(ROOT, 'setup.py')), (
'This does not appear to be the root dir: %s' % ROOT)
class FunFactoryTests(Plugin):
"""Enables the fun factory test suite."""
__test__ = False # Nose: do not collect as test
name = 'ff-tests'
score = 999 # needs to execute early
def options(self, parser, env=os.environ):
super(FunFactoryTests, self).options(parser, env=env)
self.parser = parser
def configure(self, options, conf):
super(FunFactoryTests, self).configure(options, conf)
self.enabled = True # Enables the plugin without a cmd line flag
self.options = options
def _write_last_environment(self):
with open(ENVIRONMENT_NOTE, 'w') as f:
f.write(self._this_environment())
def _read_last_environment(self):
return open(ENVIRONMENT_NOTE).read()
def _this_environment(self):
return FF_PLAYDOH_REMOTE + '\n' + FF_PLAYDOH_BRANCH + '\n'
def begin(self):
if os.path.exists(ENVIRONMENT_NOTE):
if self._read_last_environment() != self._this_environment():
shutil.rmtree(PLAYDOH)
if not os.path.exists(PLAYDOH):
container = os.path.abspath(os.path.join(PLAYDOH, '..'))
if not os.path.exists(container):
os.mkdir(container)
check_call(['git', 'clone', '--recursive',
'--branch', FF_PLAYDOH_BRANCH,
FF_PLAYDOH_REMOTE,
PLAYDOH])
else:
proj_sh = partial(shell, cwd=PLAYDOH)
proj_sh('git pull origin %s' % FF_PLAYDOH_BRANCH)
proj_sh('git submodule sync -q')
proj_sh('git submodule update --init --recursive')
self._write_last_environment()
st = os.path.join(PLAYDOH, 'project', 'settings', 'local.py')
if os.path.exists(st):
os.unlink(st)
shutil.copy(os.path.join(PLAYDOH, 'project', 'settings',
'local.py-dist'),
st)
with open(st, 'r') as f:
new_st = f.read()
new_st = new_st.replace("'USER': 'root'",
"'USER': '%s'" % DB_USER)
new_st = new_st.replace("'PASSWORD': ''",
"'PASSWORD': '%s'" % DB_PASS)
new_st = new_st.replace("'HOST': ''",
"'HOST': '%s'" % DB_HOST)
new_st = new_st.replace("'NAME': 'playdoh_app'",
"'NAME': '%s'" % DB_NAME)
new_st = new_st.replace("SECRET_KEY = ''",
"SECRET_KEY = 'testinglolz'")
new_st = new_st + "\nfrom . import base\nINSTALLED_APPS = list(base.INSTALLED_APPS) + " \
"['django.contrib.admin']\n"
new_st = new_st + "\nSITE_URL = ''\n"
with open(st, 'w') as f:
f.write(new_st)
extra = ''
if DB_PASS:
extra = '--password=%s' % DB_PASS
if DB_HOST:
extra += ' -h %s' % DB_HOST
shell('mysql -u %s %s -e "create database if not exists %s"'
% (DB_USER, extra, DB_NAME))
check_call([sys.executable, 'manage.py', 'syncdb', '--noinput'],
cwd=PLAYDOH)
# For in-process tests:
wd = os.getcwd()
os.chdir(PLAYDOH) # Simulate what happens in a real app.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
try:
manage.setup_environ(os.path.join(PLAYDOH, 'manage.py'))
finally:
os.chdir(wd)
# Puts path back to this dev version of funfactory:
sys.path.insert(0, ROOT)
# simulate what django does, which is to import the root urls.py
# once everything has been set up (e.g. setup_environ())
from funfactory.monkeypatches import patch
patch()
| {
"repo_name": "mozilla/funfactory",
"path": "tests/__init__.py",
"copies": "1",
"size": "4838",
"license": "bsd-3-clause",
"hash": 8374660565356384000,
"line_mean": 36.796875,
"line_max": 101,
"alpha_frac": 0.5438197602,
"autogenerated": false,
"ratio": 3.570479704797048,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4614299464997048,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import os
import sys
import textwrap
from .vendor import six
from .context import Context
from .loader import FilesystemLoader, DEFAULT_COLLECTION_NAME
from .parser import Parser, Context as ParserContext, Argument
from .executor import Executor
from .exceptions import Failure, CollectionNotFound, ParseError, Exit
from .util import debug, pty_size, enable_logging
from ._version import __version__
def task_name_to_key(x):
return (x.count('.'), x)
sort_names = partial(sorted, key=task_name_to_key)
indent_num = 2
indent = " " * indent_num
def print_help(tuples):
"""
Print tabbed columns from (name, help) tuples.
Useful for listing tasks + docstrings, flags + help strings, etc.
"""
padding = 3
# Calculate column sizes: don't wrap flag specs, give what's left over
# to the descriptions.
name_width = max(len(x[0]) for x in tuples)
desc_width = pty_size()[0] - name_width - indent_num - padding - 1
wrapper = textwrap.TextWrapper(width=desc_width)
for name, help_str in tuples:
# Wrap descriptions/help text
help_chunks = wrapper.wrap(help_str)
# Print flag spec + padding
name_padding = name_width - len(name)
spec = ''.join((
indent,
name,
name_padding * ' ',
padding * ' '
))
# Print help text as needed
if help_chunks:
print(spec + help_chunks[0])
for chunk in help_chunks[1:]:
print((' ' * len(spec)) + chunk)
else:
print(spec.rstrip())
print('')
def parse_gracefully(parser, argv):
"""
Run ``parser.parse_argv(argv)`` & gracefully handle ``ParseError``.
'Gracefully' meaning to print a useful human-facing error message instead
of a traceback; the program will still exit if an error is raised.
If no error is raised, returns the result of the ``parse_argv`` call.
"""
try:
return parser.parse_argv(argv)
except ParseError as e:
sys.exit(str(e))
def parse(argv, collection=None, version=None):
"""
Parse ``argv`` list-of-strings into useful core & per-task structures.
:returns:
Three-tuple of ``args`` (core, non-task `.Argument` objects),
``collection`` (compiled `.Collection` of tasks, using defaults or core
arguments affecting collection generation) and ``tasks`` (a list of
`~.parser.context.Context` objects representing the requested task
executions).
"""
# Initial/core parsing (core options can affect the rest of the parsing)
initial_context = ParserContext(args=(
Argument(
names=('collection', 'c'),
help="Specify collection name to load."
),
Argument(
names=('root', 'r'),
help="Change root directory used for finding task modules."
),
Argument(
names=('help', 'h'),
optional=True,
help="Show core or per-task help and exit."
),
Argument(
names=('version', 'V'),
kind=bool,
default=False,
help="Show version and exit."
),
Argument(
names=('list', 'l'),
kind=bool,
default=False,
help="List available tasks."
),
Argument(
names=('no-dedupe',),
kind=bool,
default=False,
help="Disable task deduplication."
),
Argument(
names=('echo', 'e'),
kind=bool,
default=False,
help="Echo executed commands before running.",
),
Argument(
names=('warn-only', 'w'),
kind=bool,
default=False,
help="Warn, instead of failing, when shell commands fail.",
),
Argument(
names=('pty', 'p'),
kind=bool,
default=False,
help="Use a pty when executing shell commands.",
),
Argument(
names=('hide', 'H'),
help="Set default value of run()'s 'hide' kwarg.",
),
Argument(
names=('debug', 'd'),
kind=bool,
default=False,
help="Enable debug output.",
),
))
# 'core' will result an .unparsed attribute with what was left over.
debug("Parsing initial context (core args)")
parser = Parser(initial=initial_context, ignore_unknown=True)
core = parse_gracefully(parser, argv[1:])
debug("After core-args pass, leftover argv: %r" % (core.unparsed,))
args = core[0].args
# Enable debugging from here on out, if debug flag was given.
if args.debug.value:
enable_logging()
# Print version & exit if necessary
if args.version.value:
if version:
print(version)
else:
print("Invoke %s" % __version__)
raise Exit
# Core (no value given) --help output
# TODO: if this wants to display context sensitive help (e.g. a combo help
# and available tasks listing; or core flags modified by plugins/task
# modules) it will have to move farther down.
if args.help.value == True:
program_name = os.path.basename(argv[0])
if program_name == 'invoke' or program_name == 'inv':
program_name = 'inv[oke]'
print("Usage: {0} [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts]".format(program_name))
print("")
print("Core options:")
print_help(initial_context.help_tuples())
raise Exit
# Load collection (default or specified) and parse leftovers
start = args.root.value
loader = FilesystemLoader(start=start)
coll_name = args.collection.value
try:
collection = loader.load(coll_name) if coll_name else loader.load()
except CollectionNotFound:
# TODO: improve sys.exit mocking in tests so we can just raise
# Exit(msg)
name = coll_name or DEFAULT_COLLECTION_NAME
six.print_(
"Can't find any collection named {0!r}!".format(name),
file=sys.stderr
)
raise Exit(1)
parser = Parser(contexts=collection.to_contexts())
debug("Parsing tasks against collection %r" % collection)
tasks = parse_gracefully(parser, core.unparsed)
# Per-task help. Use the parser's contexts dict as that's the easiest way
# to obtain Context objects here - which are what help output needs.
name = args.help.value
if name in parser.contexts:
# Setup
ctx = parser.contexts[name]
tuples = ctx.help_tuples()
docstring = collection[name].__doc__
header = "Usage: inv[oke] [--core-opts] %s %%s[other tasks here ...]" % name
print(header % ("[--options] " if tuples else ""))
print("")
print("Docstring:")
if docstring:
# Really wish textwrap worked better for this.
doclines = textwrap.dedent(docstring.lstrip('\n').rstrip()+'\n').splitlines()
for line in doclines:
if line.strip():
print(indent + line)
else:
print("")
print("")
else:
print(indent + "none")
print("")
print("Options:")
if tuples:
print_help(tuples)
else:
print(indent + "none")
print("")
raise Exit
# Print discovered tasks if necessary
if args.list.value:
# Sort in depth, then alpha, order
task_names = collection.task_names
# Short circuit if no tasks to show
if not task_names:
msg = "No tasks found in collection '{0}'!"
print(msg.format(collection.name))
raise Exit
pairs = []
for primary in sort_names(task_names.keys()):
# Add aliases
aliases = sort_names(task_names[primary])
name = primary
if aliases:
name += " (%s)" % ', '.join(aliases)
# Add docstring 1st lines
task = collection[primary]
help_ = ""
if task.__doc__:
help_ = task.__doc__.lstrip().splitlines()[0]
pairs.append((name, help_))
# Print
print("Available tasks:\n")
print_help(pairs)
raise Exit
# Return to caller so they can handle the results
return args, collection, tasks
def derive_opts(args):
run = {}
if args['warn-only'].value:
run['warn'] = True
if args.pty.value:
run['pty'] = True
if args.hide.value:
run['hide'] = args.hide.value
if args.echo.value:
run['echo'] = True
return {'run': run}
def dispatch(argv, version=None):
try:
args, collection, parser_contexts = parse(argv, version=version)
except Exit as e:
# 'return' here is mostly a concession to testing. Meh :(
# TODO: probably restructure things better so we don't need this?
return sys.exit(e.code)
executor = Executor(collection, Context(**derive_opts(args)))
try:
tasks = []
for context in parser_contexts:
tasks.append((context.name, context.as_kwargs))
dedupe = not args['no-dedupe'].value
return executor.execute(*tasks, dedupe=dedupe)
except Failure as f:
sys.exit(f.result.exited)
def main():
# Parse command line
debug("Base argv from sys: %r" % (sys.argv[1:],))
dispatch(sys.argv)
| {
"repo_name": "ericholscher/invoke",
"path": "invoke/cli.py",
"copies": "1",
"size": "9613",
"license": "bsd-2-clause",
"hash": -5503793811804124000,
"line_mean": 31.4763513514,
"line_max": 108,
"alpha_frac": 0.5674607303,
"autogenerated": false,
"ratio": 4.201486013986014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5268946744286014,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import os
import sys
try:
from configparser import ConfigParser # Python 3
except:
from ConfigParser import ConfigParser # Python 2
from .config import USER_CONFIG
from .error import GeosupportError
from .function_info import FUNCTIONS, function_help, list_functions, input_help
from .io import format_input, parse_output, set_mode
GEOLIB = None
class Geosupport(object):
def __init__(self, geosupport_path=None, geosupport_version=None):
global GEOLIB
self.py_version = sys.version_info[0]
self.platform = sys.platform
self.py_bit = '64' if (sys.maxsize > 2 ** 32) else '32'
if geosupport_version is not None:
config = ConfigParser()
config.read(os.path.expanduser(USER_CONFIG))
versions = dict(config.items('versions'))
geosupport_path = versions[geosupport_version.lower()]
if geosupport_path is not None:
if self.platform.startswith('linux'):
raise GeosupportError(
"geosupport_path and geosupport_version not valid with "
"linux. You must set LD_LIBRARY_PATH and GEOFILES "
"before running python."
)
os.environ['GEOFILES'] = os.path.join(geosupport_path, 'Fls\\')
os.environ['PATH'] = ';'.join([
i for i in os.environ['PATH'].split(';') if
'GEOSUPPORT' not in i.upper()
])
os.environ['PATH'] += ';' + os.path.join(geosupport_path, 'bin')
try:
if self.platform == 'win32':
from ctypes import windll, cdll, WinDLL, wintypes
if GEOLIB is not None:
kernel32 = WinDLL('kernel32')
kernel32.FreeLibrary.argtypes = [wintypes.HMODULE]
kernel32.FreeLibrary(GEOLIB._handle)
if self.py_bit == '64':
self.geolib = cdll.LoadLibrary("NYCGEO.dll")
else:
self.geolib = windll.LoadLibrary("NYCGEO.dll")
elif self.platform.startswith('linux'):
from ctypes import cdll
if GEOLIB is not None:
cdll.LoadLibrary('libdl.so').dlclose(GEOLIB._handle)
self.geolib = cdll.LoadLibrary("libgeo.so")
else:
raise GeosupportError(
'This Operating System is currently not supported.'
)
GEOLIB = self.geolib
except OSError as e:
raise GeosupportError(
'%s\n'
'You are currently using a %s-bit Python interpreter. '
'Is the installed version of Geosupport %s-bit?' % (
e, self.py_bit, self.py_bit
)
)
def _call_geolib(self, wa1, wa2):
"""
Calls the Geosupport libs & encodes/deocodes strings for Python 3.
"""
# encode
if self.py_version == 3:
wa1 = bytes(str(wa1), 'utf8')
wa2 = bytes(str(wa2), 'utf8')
# Call Geosupport libs
if self.platform == 'win32':
self.geolib.NYCgeo(wa1, wa2) # windows
else:
self.geolib.geo(wa1, wa2) # linux
# decode
if self.py_version == 3:
wa1 = str(wa1, 'utf8')
wa2 = str(wa2, 'utf8')
return wa1, wa2
def call(self, kwargs_dict=None, mode=None, **kwargs):
if kwargs_dict is None:
kwargs_dict = {}
kwargs_dict.update(kwargs)
kwargs_dict.update(set_mode(mode))
flags, wa1, wa2 = format_input(kwargs_dict)
wa1, wa2 = self._call_geolib(wa1, wa2)
result = parse_output(flags, wa1, wa2)
return_code = result['Geosupport Return Code (GRC)']
if not return_code.isdigit() or int(return_code) > 1:
raise GeosupportError(
result['Message'] + ' ' + result['Message 2'],
result
)
return result
def __getattr__(self, name):
if name in FUNCTIONS:
p = partial(self.call, function=name)
p.help = partial(function_help, name)
return p
raise AttributeError("'%s' object has no attribute '%s'" %(
self.__class__.__name__, name
))
def __getitem__(self, name):
return self.__getattr__(name)
def help(self, name=None, return_as_string=False):
if name:
if name.upper() == 'INPUT':
return_val = input_help()
try:
return_val = function_help(name, return_as_string)
except KeyError:
return_val = "Function '%s' does not exist." % name
else:
return_val = list_functions()
if return_as_string:
return return_val
elif return_val:
print(return_val)
| {
"repo_name": "ishiland/python-geosupport",
"path": "geosupport/geosupport.py",
"copies": "1",
"size": "4974",
"license": "mit",
"hash": -1025265138202885400,
"line_mean": 33.0684931507,
"line_max": 79,
"alpha_frac": 0.5341777242,
"autogenerated": false,
"ratio": 3.957040572792363,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4991218296992363,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import os
import tempfile
import unittest
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
from nose.tools import eq_
from amo.storage_utils import (walk_storage, copy_stored_file,
move_stored_file, rm_stored_dir)
from amo.utils import rm_local_tmp_dir
def test_storage_walk():
tmp = tempfile.mkdtemp()
jn = partial(os.path.join, tmp)
try:
storage.save(jn('file1.txt'), ContentFile(''))
storage.save(jn('one/file1.txt'), ContentFile(''))
storage.save(jn('one/file2.txt'), ContentFile(''))
storage.save(jn('one/two/file1.txt'), ContentFile(''))
storage.save(jn('one/three/file1.txt'), ContentFile(''))
storage.save(jn('four/five/file1.txt'), ContentFile(''))
storage.save(jn(u'four/kristi\u2603/kristi\u2603.txt'),
ContentFile(''))
results = [(dir, set(subdirs), set(files))
for dir, subdirs, files in sorted(walk_storage(tmp))]
yield (eq_, results.pop(0), (tmp, set(['four', 'one']), set(['file1.txt'])))
yield (eq_, results.pop(0), (jn('four'),
set(['five', 'kristi\xe2\x98\x83']), set([])))
yield (eq_, results.pop(0), (jn('four/five'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0), (jn('four/kristi\xe2\x98\x83'), set([]),
set(['kristi\xe2\x98\x83.txt'])))
yield (eq_, results.pop(0), (jn('one'), set(['three', 'two']),
set(['file1.txt', 'file2.txt'])))
yield (eq_, results.pop(0), (jn('one/three'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0), (jn('one/two'), set([]), set(['file1.txt'])))
yield (eq_, len(results), 0)
finally:
rm_local_tmp_dir(tmp)
def test_rm_stored_dir():
tmp = tempfile.mkdtemp()
jn = partial(os.path.join, tmp)
try:
storage.save(jn('file1.txt'), ContentFile('<stuff>'))
storage.save(jn('one/file1.txt'), ContentFile(''))
storage.save(jn('one/two/file1.txt'), ContentFile('moar stuff'))
storage.save(jn(u'one/kristi\u0107/kristi\u0107.txt'),
ContentFile(''))
rm_stored_dir(jn('one'))
yield (eq_, storage.exists(jn('one')), False)
yield (eq_, storage.exists(jn('one/file1.txt')), False)
yield (eq_, storage.exists(jn('one/two')), False)
yield (eq_, storage.exists(jn('one/two/file1.txt')), False)
yield (eq_, storage.exists(jn(u'one/kristi\u0107/kristi\u0107.txt')),
False)
yield (eq_, storage.exists(jn('file1.txt')), True)
finally:
rm_local_tmp_dir(tmp)
class TestFileOps(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
rm_local_tmp_dir(self.tmp)
def path(self, path):
return os.path.join(self.tmp, path)
def contents(self, path):
with storage.open(path, 'rb') as fp:
return fp.read()
def newfile(self, name, contents):
src = self.path(name)
storage.save(src, ContentFile(contents))
return src
def test_copy(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
def test_self_copy(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('src.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
def test_move(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
move_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
eq_(storage.exists(src), False)
def test_non_ascii(self):
src = self.newfile(u'kristi\u0107.txt',
u'ivan kristi\u0107'.encode('utf8'))
dest = self.path(u'somedir/kristi\u0107.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), 'ivan kristi\xc4\x87')
def test_copy_chunking(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
copy_stored_file(src, dest, chunk_size=1)
eq_(self.contents(dest), '<contents>')
def test_move_chunking(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
move_stored_file(src, dest, chunk_size=1)
eq_(self.contents(dest), '<contents>')
eq_(storage.exists(src), False)
| {
"repo_name": "jinankjain/zamboni",
"path": "apps/amo/tests/test_storage_utils.py",
"copies": "7",
"size": "4706",
"license": "bsd-3-clause",
"hash": 8351431824970932000,
"line_mean": 36.0551181102,
"line_max": 84,
"alpha_frac": 0.5665108372,
"autogenerated": false,
"ratio": 3.3566333808844506,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.742314421808445,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import os
import tempfile
import unittest
from django.core.files.base import ContentFile
from django.test.utils import override_settings
from nose.tools import eq_
from mkt.site.storage_utils import (copy_stored_file, get_private_storage,
get_public_storage, move_stored_file,
private_storage, storage_is_remote,
walk_storage)
from mkt.site.tests import TestCase
from mkt.site.utils import rm_local_tmp_dir
def test_storage_walk():
tmp = tempfile.mkdtemp()
jn = partial(os.path.join, tmp)
try:
private_storage.save(jn('file1.txt'), ContentFile(''))
private_storage.save(jn('one/file1.txt'), ContentFile(''))
private_storage.save(jn('one/file2.txt'), ContentFile(''))
private_storage.save(jn('one/two/file1.txt'), ContentFile(''))
private_storage.save(jn('one/three/file1.txt'), ContentFile(''))
private_storage.save(jn('four/five/file1.txt'), ContentFile(''))
private_storage.save(jn(u'four/kristi\u2603/kristi\u2603.txt'),
ContentFile(''))
results = [(dir, set(subdirs), set(files))
for dir, subdirs, files in
sorted(walk_storage(tmp, storage=private_storage))]
yield (eq_, results.pop(0),
(tmp, set(['four', 'one']), set(['file1.txt'])))
yield (eq_, results.pop(0),
(jn('four'), set(['five', 'kristi\xe2\x98\x83']), set([])))
yield (eq_, results.pop(0),
(jn('four/five'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0),
(jn('four/kristi\xe2\x98\x83'), set([]),
set(['kristi\xe2\x98\x83.txt'])))
yield (eq_, results.pop(0),
(jn('one'), set(['three', 'two']),
set(['file1.txt', 'file2.txt'])))
yield (eq_, results.pop(0),
(jn('one/three'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0),
(jn('one/two'), set([]), set(['file1.txt'])))
yield (eq_, len(results), 0)
finally:
rm_local_tmp_dir(tmp)
class TestFileOps(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
rm_local_tmp_dir(self.tmp)
def path(self, path):
return os.path.join(self.tmp, path)
def contents(self, path):
with private_storage.open(path, 'rb') as fp:
return fp.read()
def newfile(self, name, contents):
src = self.path(name)
private_storage.save(src, ContentFile(contents))
return src
def test_copy(self):
src = self.newfile('src.txt', '<contents>')
dst = self.path('somedir/dst.txt')
copy_stored_file(
src, dst,
src_storage=private_storage, dst_storage=private_storage)
eq_(self.contents(dst), '<contents>')
def test_self_copy(self):
src = self.newfile('src.txt', '<contents>')
dst = self.path('src.txt')
copy_stored_file(
src, dst,
src_storage=private_storage, dst_storage=private_storage)
eq_(self.contents(dst), '<contents>')
def test_move(self):
src = self.newfile('src.txt', '<contents>')
dst = self.path('somedir/dst.txt')
move_stored_file(
src, dst,
src_storage=private_storage, dst_storage=private_storage)
eq_(self.contents(dst), '<contents>')
eq_(private_storage.exists(src), False)
def test_non_ascii(self):
src = self.newfile(u'kristi\u0107.txt',
u'ivan kristi\u0107'.encode('utf8'))
dst = self.path(u'somedir/kristi\u0107.txt')
copy_stored_file(
src, dst,
src_storage=private_storage, dst_storage=private_storage)
eq_(self.contents(dst), 'ivan kristi\xc4\x87')
class TestStorageClasses(TestCase):
@override_settings(
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.S3BotoPrivateStorage')
def test_get_storage_remote(self):
assert storage_is_remote()
eq_(get_private_storage().__class__.__name__, 'S3BotoPrivateStorage')
eq_(get_public_storage().__class__.__name__, 'S3BotoPublicStorage')
@override_settings(
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.LocalFileStorage')
def test_get_storage_local(self):
assert not storage_is_remote()
eq_(get_private_storage().__class__.__name__, 'LocalFileStorage')
eq_(get_public_storage().__class__.__name__, 'LocalFileStorage')
| {
"repo_name": "tsl143/zamboni",
"path": "mkt/site/tests/test_storage_utils.py",
"copies": "1",
"size": "4662",
"license": "bsd-3-clause",
"hash": 1015515176527736400,
"line_mean": 36,
"line_max": 77,
"alpha_frac": 0.5720720721,
"autogenerated": false,
"ratio": 3.613953488372093,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9686025560472094,
"avg_score": 0,
"num_lines": 126
} |
from functools import partial
import os
import tempfile
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
import pytest
from nose.tools import eq_
from olympia.amo.storage_utils import (walk_storage, copy_stored_file,
move_stored_file, rm_stored_dir)
from olympia.amo.tests import BaseTestCase
from olympia.amo.utils import rm_local_tmp_dir
pytestmark = pytest.mark.django_db
def test_storage_walk():
tmp = tempfile.mkdtemp()
jn = partial(os.path.join, tmp)
try:
storage.save(jn('file1.txt'), ContentFile(''))
storage.save(jn('one/file1.txt'), ContentFile(''))
storage.save(jn('one/file2.txt'), ContentFile(''))
storage.save(jn('one/two/file1.txt'), ContentFile(''))
storage.save(jn('one/three/file1.txt'), ContentFile(''))
storage.save(jn('four/five/file1.txt'), ContentFile(''))
storage.save(jn(u'four/kristi\u2603/kristi\u2603.txt'),
ContentFile(''))
results = [(dir, set(subdirs), set(files))
for dir, subdirs, files in sorted(walk_storage(tmp))]
yield (eq_, results.pop(0),
(tmp, set(['four', 'one']), set(['file1.txt'])))
yield (eq_, results.pop(0),
(jn('four'), set(['five', 'kristi\xe2\x98\x83']), set([])))
yield (eq_, results.pop(0),
(jn('four/five'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0), (jn('four/kristi\xe2\x98\x83'), set([]),
set(['kristi\xe2\x98\x83.txt'])))
yield (eq_, results.pop(0), (jn('one'), set(['three', 'two']),
set(['file1.txt', 'file2.txt'])))
yield (eq_, results.pop(0),
(jn('one/three'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0),
(jn('one/two'), set([]), set(['file1.txt'])))
yield (eq_, len(results), 0)
finally:
rm_local_tmp_dir(tmp)
def test_rm_stored_dir():
tmp = tempfile.mkdtemp()
jn = partial(os.path.join, tmp)
try:
storage.save(jn('file1.txt'), ContentFile('<stuff>'))
storage.save(jn('one/file1.txt'), ContentFile(''))
storage.save(jn('one/two/file1.txt'), ContentFile('moar stuff'))
storage.save(jn(u'one/kristi\u0107/kristi\u0107.txt'),
ContentFile(''))
rm_stored_dir(jn('one'))
yield (eq_, storage.exists(jn('one')), False)
yield (eq_, storage.exists(jn('one/file1.txt')), False)
yield (eq_, storage.exists(jn('one/two')), False)
yield (eq_, storage.exists(jn('one/two/file1.txt')), False)
yield (eq_, storage.exists(jn(u'one/kristi\u0107/kristi\u0107.txt')),
False)
yield (eq_, storage.exists(jn('file1.txt')), True)
finally:
rm_local_tmp_dir(tmp)
class TestFileOps(BaseTestCase):
def setUp(self):
super(TestFileOps, self).setUp()
self.tmp = tempfile.mkdtemp()
def tearDown(self):
rm_local_tmp_dir(self.tmp)
super(TestFileOps, self).tearDown()
def path(self, path):
return os.path.join(self.tmp, path)
def contents(self, path):
with storage.open(path, 'rb') as fp:
return fp.read()
def newfile(self, name, contents):
src = self.path(name)
storage.save(src, ContentFile(contents))
return src
def test_copy(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
def test_self_copy(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('src.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
def test_move(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
move_stored_file(src, dest)
eq_(self.contents(dest), '<contents>')
eq_(storage.exists(src), False)
def test_non_ascii(self):
src = self.newfile(u'kristi\u0107.txt',
u'ivan kristi\u0107'.encode('utf8'))
dest = self.path(u'somedir/kristi\u0107.txt')
copy_stored_file(src, dest)
eq_(self.contents(dest), 'ivan kristi\xc4\x87')
def test_copy_chunking(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
copy_stored_file(src, dest, chunk_size=1)
eq_(self.contents(dest), '<contents>')
def test_move_chunking(self):
src = self.newfile('src.txt', '<contents>')
dest = self.path('somedir/dest.txt')
move_stored_file(src, dest, chunk_size=1)
eq_(self.contents(dest), '<contents>')
eq_(storage.exists(src), False)
| {
"repo_name": "jpetto/olympia",
"path": "src/olympia/amo/tests/test_storage_utils.py",
"copies": "1",
"size": "4926",
"license": "bsd-3-clause",
"hash": 6795467902090702000,
"line_mean": 34.9562043796,
"line_max": 77,
"alpha_frac": 0.5667884693,
"autogenerated": false,
"ratio": 3.371663244353183,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9438451713653183,
"avg_score": 0,
"num_lines": 137
} |
from functools import (partial)
import os
import time
import boto.ec2
from fabric.api import (
abort,
env,
hide,
run,
sudo
)
from fabric.context_managers import (
quiet,
settings
)
CLOUD_INIT_TMPL = """#cloud-config
ssh_authorized_keys:
- {0}
packages:
- git
runcmd:
- apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
- sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
- apt-get update
- apt-get install -y lxc-docker
"""
# This is the official Ubuntu 14.04 LTS AMI, PV edition for us-west-2
# TODO: Make a map here of the appropriate AMI's for all regions
AMI = "ami-5b58266b"
remote_settings = partial(settings, user="ubuntu", warn_only=True)
"""Determine if an instance is a running tc-builder"""
def _running_tc_builder(instance):
tc_inst = instance.tags.get("Name") == "tc-builder"
tc_running = instance.state == "running"
return tc_inst and tc_running
def _make_cloud_init():
ssh_dir = os.path.expanduser("~/.ssh/")
pub_file = filter(lambda x: x in ["id_dsa.pub", "id_rsa.pub"],
os.listdir(ssh_dir))
if not pub_file:
abort("No public ssh identity to use on the provisioned host.")
with open(os.path.join(ssh_dir, pub_file[0])) as f:
ssh_key = f.readline().strip()
return CLOUD_INIT_TMPL.format(ssh_key)
"""Locates a running tc builder instance"""
def _locate_running_tc_builder(conn):
instances = conn.get_only_instances()
return filter(_running_tc_builder, instances)
"""Verify a running instance"""
def _verify_running_tc_builder(conn):
instances = conn.get_only_instances()
inst = filter(_running_tc_builder, instances)
if not inst:
abort("Failure to find a tc-builder instance running.")
return inst[0]
"""Provisions a Ubuntu 14.04 AWS instance for container building."""
def provision(region="us-west-2"):
conn = boto.ec2.connect_to_region(region)
# Verify whether a tc-builder is running yet, if it is, abort
if _locate_running_tc_builder(conn):
abort("Found tc-builder instance running.")
# Verify the security group exists, make it if it doesn't
if not filter(lambda x: x.name == "tc-builder",
conn.get_all_security_groups()):
tc_sec = conn.create_security_group(
"tc-builder",
"Travis Container Builder Policy",
)
# Add ssh
tc_sec.authorize("tcp", "22", "22", "0.0.0.0/0")
user_data = _make_cloud_init()
print "Provisioning..."
# Create our instance, and save the instance id
res = conn.run_instances(
AMI,
user_data = user_data,
instance_type = "t1.micro",
security_groups = ["tc-builder"]
)
inst = res.instances[0]
print "Allocated, waiting for running state..."
while inst.update() != 'running':
time.sleep(5)
inst.add_tag("Name", "tc-builder")
print "Running. Checking for SSH availability..."
retry = True
count = 0
while retry and count < 500:
try:
with settings(hide('everything'),
host_string=inst.ip_address,
warn_only=True):
result = run("which -a docker")
if result == "/usr/bin/docker":
retry = False
except:
pass
finally:
count += 1
time.sleep(5)
if count >= 500:
abort("Unable to ssh in.")
print "Fully available."
"""Removes a running tc-builder if one is found."""
def unprovision(region="us-west-2"):
conn = boto.ec2.connect_to_region(region)
# Locate the tc-builder instance
tc_instances = _locate_running_tc_builder(conn)
if not tc_instances:
abort("No tc-builder instance running.")
tc_instances[0].terminate()
"""Checks out a repo on a remote host and updates it to a specific
commit
This runs on a remote host that is assumed to have been configured
and running by ``provision``.
"""
def checkout(repo, commit, region="us-west-2"):
conn = boto.ec2.connect_to_region(region)
# Ensure we have pulled the latest travis-run on the remote host
inst = _verify_running_tc_builder(conn)
with remote_settings(host_string=inst.ip_address):
| {
"repo_name": "loads/tc-builder",
"path": "fabfile.py",
"copies": "1",
"size": "4398",
"license": "apache-2.0",
"hash": 9067656159679115000,
"line_mean": 26.835443038,
"line_max": 110,
"alpha_frac": 0.6252842201,
"autogenerated": false,
"ratio": 3.5640194489465156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9672876229522357,
"avg_score": 0.003285487904831631,
"num_lines": 158
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.