text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import partial
from httplib import HTTPConnection, HTTPSConnection
from itertools import chain
from urlparse import urlparse
import multipart
def request(method, url, data, headers, callback=None):
url = urlparse(url)
# Connect
if url.scheme == 'https':
request = HTTPSConnection(url.netloc)
else:
request = HTTPConnection(url.netloc)
request.connect()
# Initiate request
request.putrequest(method, url.path)
encoded_data = multipart.encode(data)
encoded_data_headers = encoded_data.get_headers()
all_headers = chain(
encoded_data_headers.iteritems(),
headers.iteritems()
)
# Send headers
for name, value in all_headers:
request.putheader(name, value)
request.endheaders()
# Send body
bytes_sent = 0
bytes_total = int(encoded_data_headers['Content-Length'])
for chunk in encoded_data:
request.send(chunk)
bytes_sent += len(chunk)
if callable(callback):
callback(bytes_sent, bytes_total)
# TODO: Wrap the response in a container to allow chunked reading.
response = request.getresponse()
response_status = response.status
response_data = response.read()
request.close()
return response_status, response_data
get = partial(request, 'GET')
post = partial(request, 'POST')
| {
"repo_name": "jhaals/filebutler-upload",
"path": "filebutler_upload/httputils.py",
"copies": "1",
"size": "1372",
"license": "bsd-3-clause",
"hash": 2907101702800709000,
"line_mean": 23.0701754386,
"line_max": 70,
"alpha_frac": 0.6712827988,
"autogenerated": false,
"ratio": 4.170212765957447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5341495564757447,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from ... import documentation_helpers
from ...component_index import component_index
from . import cfc_utils
SIDE_COLOR = "color(var(--bluish) blend(var(--background) 60%))"
def get_inline_documentation(cfml_view, doc_type):
if not cfml_view.project_name:
return None
cfc_path, file_path, dot_path, function_name, regions = cfc_utils.find_cfc(
cfml_view, cfml_view.position
)
if file_path:
if dot_path:
if function_name:
metadata = component_index.get_extended_metadata_by_file_path(
cfml_view.project_name, file_path
)
if function_name in metadata["functions"]:
doc, callback = component_index.get_method_documentation(
cfml_view.view,
cfml_view.project_name,
file_path,
function_name,
dot_path.split(".").pop(),
metadata["functions"][function_name]["name"],
)
return cfml_view.Documentation(regions, doc, callback, 2)
doc, callback = component_index.get_documentation(
cfml_view.view, cfml_view.project_name, file_path, dot_path
)
return cfml_view.Documentation(regions, doc, callback, 2)
doc, callback = get_documentation(
cfml_view.view,
file_path,
documentation_helpers.span_wrap(cfc_path, "entity.name.class"),
)
return cfml_view.Documentation(regions, doc, callback, 2)
return None
def get_method_preview(cfml_view):
if not cfml_view.project_name:
return None
cfc_path, file_path, dot_path, function_name, regions = cfc_utils.find_cfc(
cfml_view, cfml_view.position
)
if file_path and dot_path and function_name:
doc, callback = component_index.get_method_preview(
cfml_view.view, cfml_view.project_name, file_path, function_name
)
return cfml_view.MethodPreview(regions, doc, callback, 2)
return None
def get_goto_cfml_file(cfml_view):
if not cfml_view.project_name:
return None
cfc_path, file_path, dot_path, function_name, region = cfc_utils.find_cfc(
cfml_view, cfml_view.position
)
if file_path:
if function_name:
metadata = component_index.get_extended_metadata_by_file_path(
cfml_view.project_name, file_path
)
if function_name in metadata["functions"]:
return cfml_view.GotoCfmlFile(
metadata["function_file_map"][function_name],
metadata["functions"][function_name]["name"],
)
else:
return cfml_view.GotoCfmlFile(file_path, None)
return None
def get_completions_doc(cfml_view):
if (
not cfml_view.project_name
or not cfml_view.function_call_params
or not cfml_view.function_call_params.method
):
return None
if len(cfml_view.function_call_params.dot_context) != 1:
return None
start_pt = cfml_view.function_call_params.dot_context[0].name_region.begin()
cfc_path, file_path, dot_path, temp_function_name, region = cfc_utils.find_cfc(
cfml_view, start_pt
)
if file_path:
function_name = cfml_view.function_call_params.function_name
metadata = component_index.get_extended_metadata_by_file_path(
cfml_view.project_name, file_path
)
if (
metadata
and cfml_view.function_call_params.function_name in metadata["functions"]
):
doc, callback = component_index.get_function_call_params_doc(
cfml_view.project_name,
file_path,
cfml_view.function_call_params,
dot_path.split(".").pop(),
metadata["functions"][function_name]["name"],
)
return cfml_view.CompletionDoc(None, doc, callback)
return None
def on_navigate(view, file_path, href):
view.window().open_file(file_path)
def get_documentation(view, file_path, header):
cfc_doc = {"side_color": SIDE_COLOR, "html": {}}
cfc_doc["html"]["links"] = []
cfc_doc["html"]["header"] = header
cfc_doc["html"][
"body"
] = """
<div class="path">
<strong>path</strong>: <a href="__go_to_component">{}</a>
</div>
""".strip().format(
file_path
)
callback = partial(on_navigate, view, file_path)
return cfc_doc, callback
| {
"repo_name": "thomasrotter/sublimetext-cfml",
"path": "src/plugins_/dotpaths/documentation.py",
"copies": "2",
"size": "4669",
"license": "mit",
"hash": -7729470483098628000,
"line_mean": 30.5472972973,
"line_max": 85,
"alpha_frac": 0.5776397516,
"autogenerated": false,
"ratio": 3.7322142286171065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002424212695530525,
"num_lines": 148
} |
from functools import partial
from importlib import import_module
import inspect
from flask.ext.restful import Api, abort
import six
from flask_presst.schema import Schema
from flask_presst.resources import PresstResource, ModelResource
from flask_presst.utils.routes import route_from
class PresstApi(Api):
"""
"""
def __init__(self, *args, **kwargs):
super(PresstApi, self).__init__(*args, **kwargs)
self._presst_resources = {}
self._presst_resource_insts = {}
self._model_resource_map = {}
self.has_schema = False
def _init_app(self, app):
super(PresstApi, self)._init_app(app)
app.presst = self
def get_resource_class(self, reference, module_name=None):
"""
Accepts a reference of a resource and returns the matching :class:`PresstResource`.
References can be one of:
- a :class:`PresstResource`
- an endpoint name for the resource
- the full class path of the resource (or class name if :attr:`module` is set)
- the :class:`Model` class of a :class:`ModelResource`
:param reference: The resource reference
:param module_name: module name for lazy loading of class.
:return: :class:`PresstResource`
"""
if isinstance(reference, PresstResource): # pragma: no cover
return reference.__class__
elif inspect.isclass(reference) and issubclass(reference, PresstResource):
return reference
elif reference in self._model_resource_map:
return self._model_resource_map[reference]
elif isinstance(reference, six.string_types):
if reference.lower() in self._presst_resources:
return self._presst_resources[reference.lower()]
else:
if not module_name or ('.' in reference):
module_name, class_name = reference.rsplit('.', 1)
else:
class_name = reference
module = import_module(module_name)
return getattr(module, class_name) # TODO check if this is actually a `Resource`
def parse_resource_uri(self, uri):
if not uri.startswith(self.prefix):
abort(400, message='Resource URI {} does not begin with API prefix'.format(uri))
endpoint, args = route_from(uri)
try:
return self._presst_resources[endpoint], args['id']
except KeyError:
abort(400, message='Resource {} is not defined'.format(uri))
def get_item_for_resource_uri(self, uri, expected_resource=None):
resource_class, id_ = self.parse_resource_uri(uri)
if expected_resource != resource_class:
abort(400, message='Wrong resource item type, expected {0}, got {1}'.format(
expected_resource.resource_name,
resource_class.resource_name
))
return resource_class.get_item_for_id(id_)
def get_resource_for_model(self, model):
try:
return self._model_resource_map[model]
except KeyError:
return None
def enable_schema(self):
if not self.has_schema:
self.has_schema = True
self.app.add_url_rule(self._complete_url('/', ''),
view_func=self.output(Schema.as_view('schema', self)),
endpoint='schema',
methods=['GET'])
def add_resource(self, resource, *urls, **kwargs):
# fallback to Flask-RESTful `add_resource` implementation with regular resources:
if not issubclass(resource, PresstResource):
super(PresstApi, self).add_resource(resource, *urls, **kwargs)
# skip resources that may have previously been (auto-)imported.
if resource in self._presst_resources.values():
return
resource.api = self
resource_name = resource.resource_name
pk_converter = resource._meta.get('pk_converter', 'int')
urls = [
'/{0}'.format(resource_name),
'/{0}/<{1}:id>'.format(resource_name, pk_converter),
]
self._presst_resources[resource_name] = resource
if issubclass(resource, ModelResource):
self._model_resource_map[resource.get_model()] = resource
for name, child in six.iteritems(resource.nested_types):
if child.collection:
url = '/{0}/{1}'.format(resource_name, name)
else:
url = '/{0}/<{1}:parent_id>/{2}'.format(resource_name, pk_converter, name)
child_endpoint = '{0}_{1}_{2}'.format(resource_name, name, child.__class__.__name__.lower())
child_view_func = self.output(child.view_factory(child_endpoint, resource))
# FIXME routing for blueprints; also needs tests
rule = self._complete_url(url, '')
self.app.add_url_rule(rule,
view_func=child_view_func,
endpoint=child_endpoint,
methods=child.methods, **kwargs)
super(PresstApi, self).add_resource(resource, *urls, endpoint=resource_name, **kwargs)
| {
"repo_name": "svenstaro/flask-presst",
"path": "flask_presst/api.py",
"copies": "1",
"size": "5264",
"license": "mit",
"hash": 1913830259911611600,
"line_mean": 35.8111888112,
"line_max": 104,
"alpha_frac": 0.5873860182,
"autogenerated": false,
"ratio": 4.238325281803543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5325711300003543,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from importlib import import_module
def import_string(dotted_path, dotted_attributes=None):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. When a dotted attribute path is also provided, the
dotted attribute path would be applied to the attribute/class retrieved from
the first step, and return the corresponding value designated by the
attribute path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit(".", 1)
except ValueError:
raise ImportError("%s doesn't look like a module path" % dotted_path)
module = import_module(module_path)
try:
result = getattr(module, class_name)
except AttributeError:
raise ImportError(
'Module "%s" does not define a "%s" attribute/class'
% (module_path, class_name)
)
if not dotted_attributes:
return result
else:
attributes = dotted_attributes.split(".")
traveled_attributes = []
try:
for attribute in attributes:
traveled_attributes.append(attribute)
result = getattr(result, attribute)
return result
except AttributeError:
raise ImportError(
'Module "%s" does not define a "%s" attribute inside attribute/class "%s"'
% (module_path, ".".join(traveled_attributes), class_name)
)
def lazy_import(dotted_path, dotted_attributes=None):
return partial(import_string, dotted_path, dotted_attributes)
| {
"repo_name": "graphql-python/graphene",
"path": "graphene/utils/module_loading.py",
"copies": "1",
"size": "1645",
"license": "mit",
"hash": 2134580447413049000,
"line_mean": 34.7608695652,
"line_max": 90,
"alpha_frac": 0.6431610942,
"autogenerated": false,
"ratio": 4.620786516853933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5763947611053933,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from importlib import reload
from io import BytesIO, StringIO
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=td.skip_if_no("bs4")),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "table"}
)
with tm.assert_produces_warning(FutureWarning):
df2 = self.read_html(url, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
df1 = self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "table"}
)
df2 = self.read_html(url, match="Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, match=".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@tm.network
@pytest.mark.slow
def test_invalid_url(self):
msg = (
"Name or service not known|Temporary failure in name resolution|"
"No tables found"
)
with pytest.raises((URLError, ValueError), match=msg):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, match="Water", skiprows=-1)
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, match="Metcalf", attrs={"id": "table"})[
0
]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(
self.banklist_data, match="Gold Canyon", attrs={"id": "table"}
)[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
msg = re.escape(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify the row(s) making up the "
"column names"
)
for arg in [True, False]:
with pytest.raises(TypeError, match=msg):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
| {
"repo_name": "jreback/pandas",
"path": "pandas/tests/io/test_html.py",
"copies": "1",
"size": "39911",
"license": "bsd-3-clause",
"hash": 325153387759483500,
"line_mean": 30.7257551669,
"line_max": 88,
"alpha_frac": 0.4731778207,
"autogenerated": false,
"ratio": 3.6733548090197883,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9645127079862936,
"avg_score": 0.00028110997137020935,
"num_lines": 1258
} |
from functools import partial
from . import Path, PathOp, op
__all__ = [
"difference",
"intersection",
"reverse_difference",
"union",
"xor",
]
def _draw(contours):
path = Path()
pen = path.getPen()
for contour in contours:
contour.draw(pen)
return path
def union(
contours,
outpen,
fix_winding=True,
keep_starting_points=True,
clockwise=False,
):
if not contours:
return
path = _draw(contours)
path.simplify(
fix_winding=fix_winding,
keep_starting_points=keep_starting_points,
clockwise=clockwise,
)
path.draw(outpen)
def _do(
operator,
subject_contours,
clip_contours,
outpen,
fix_winding=True,
keep_starting_points=True,
clockwise=False,
):
one = _draw(subject_contours)
two = _draw(clip_contours)
result = op(
one,
two,
operator,
fix_winding=fix_winding,
keep_starting_points=keep_starting_points,
clockwise=clockwise,
)
result.draw(outpen)
# generate self-similar operations
for operation in PathOp:
if operation == PathOp.UNION:
continue
globals()[operation.name.lower()] = partial(_do, operation)
| {
"repo_name": "fonttools/skia-pathops",
"path": "src/python/pathops/operations.py",
"copies": "1",
"size": "1241",
"license": "bsd-3-clause",
"hash": 3766193227984432600,
"line_mean": 17.803030303,
"line_max": 63,
"alpha_frac": 0.6091861402,
"autogenerated": false,
"ratio": 3.485955056179775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4595141196379775,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from inject import assign_injectables
from getters import with_getters_for
""" Data access objects, representing rows in the database tables. """
class NoneId(Exception):
pass
def get_id_if_not_none(self):
"""
Modified ID getter function that raises an Exception instead of
silently returning None if called on an object not yet in the database.
"""
if self.id is None:
raise NoneId
return self.id
class DatabaseObject(object):
def __init__(self):
self.get_id = partial(get_id_if_not_none, self)
super(DatabaseObject, self).__init__()
class User(DatabaseObject):
def __init__(self, username, hashed_password, salt, id=None):
assign_injectables(self, locals())
super(User, self).__init__()
@classmethod
def from_row(clazz, row):
(id, username, hashed_password, salt) = row
return clazz(username, hashed_password, salt, id=id)
with_getters_for(User, 'username', 'hashed_password', 'salt')
class Player(DatabaseObject):
def __init__(self, created_by_user, currently_in_room, id=None):
assign_injectables(self, locals())
super(Player, self).__init__()
@classmethod
def from_row(clazz, row):
(id, created_by_user, currently_in_room) = row
return clazz(created_by_user, currently_in_room, id=id)
with_getters_for(Player, 'created_by_user', 'currently_in_room')
class GameEntity(DatabaseObject):
"""
Abstract base class for objects in the game with names and descriptions
"""
def __init__(self):
super(GameEntity, self).__init__()
with_getters_for(GameEntity, 'name', 'description')
class Room(GameEntity):
def __init__(self, name, description, final_room=False, id=None):
assign_injectables(self, locals())
super(Room, self).__init__()
def is_final_room(self):
return self.final_room
@classmethod
def from_row(clazz, row):
(id, name, description, final_room) = row
return clazz(name, description, final_room, id=id)
# GameEntity already implements all the necessary getters
class Exit(GameEntity):
def __init__(self, name, description, from_room, to_room, locked, id=None):
assign_injectables(self, locals())
super(Exit, self).__init__()
def is_locked(self):
"""
Getter for locked; with_getters_for would call the function
get_locked which is less natural-sounding for a boolean.
"""
return self.locked
@classmethod
def from_row(clazz, row):
(id, name, description, from_room, to_room, locked) = row
return clazz(name, description, from_room, to_room, locked, id=id)
with_getters_for(Exit, 'from_room', 'to_room')
class Item(GameEntity):
"""
Abstract base class for objects which players can add to their inventory.
"""
def __init__(self):
super(Item, self).__init__()
def is_locked(self):
"""
An Item is locked if the player can not pick it up without
doing some other action first.
"""
return self.locked
with_getters_for(Item, 'use_message', 'owned_by_player', 'in_room')
class ItemUnlockingItem(Item):
def __init__(self, name, description, use_message,
owned_by_player, in_room, locked, unlocks_item, id=None):
assign_injectables(self, locals())
super(ItemUnlockingItem, self).__init__()
@classmethod
def from_row(clazz, row):
(id, name, description, use_message, owned_by_player,
in_room, locked, unlocks_item) = row
return clazz(name, description, use_message, owned_by_player,
in_room, locked, unlocks_item, id=id)
with_getters_for(ItemUnlockingItem, 'unlocks_item')
class ExitUnlockingItem(Item):
def __init__(self, name, description, use_message,
owned_by_player, in_room, locked, unlocks_exit, id=None):
assign_injectables(self, locals())
super(ExitUnlockingItem, self).__init__()
@classmethod
def from_row(clazz, row):
(id, name, description, use_message, owned_by_player,
in_room, locked, unlocks_exit) = row
return clazz(name, description, use_message, owned_by_player,
in_room, locked, unlocks_exit, id=id)
with_getters_for(ExitUnlockingItem, 'unlocks_exit')
| {
"repo_name": "rahulraj/web_projects",
"path": "assignment6/advgame/dataaccess.py",
"copies": "1",
"size": "4098",
"license": "mit",
"hash": -3650735342452284000,
"line_mean": 31.5238095238,
"line_max": 77,
"alpha_frac": 0.6852122987,
"autogenerated": false,
"ratio": 3.412156536219817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.949966654002673,
"avg_score": 0.01954045897861741,
"num_lines": 126
} |
from functools import partial
from inspect import getargspec
from django import template
from django.template import Context
from django.template.base import Node, TemplateSyntaxError
from ..compat import generic_tag_compiler
register = template.Library()
def lazy_tag(self, func=None, takes_context=None, name=None, node_class=None):
"""
A tag function decorator, injected on Django's template tag library, similar to simple_tag().
The decorated function gets called when the template node tree is built and should return
another function, responsible for the output, that later will be called within the rendering phase.
Note: if decorated with takes_context=True, context will not be available in the init phase.
@register.lazy_tag(takes_context=True)
def x(context, a, b, c=True, d=False):
# Init phase (no context)
def render(context):
# Render phase
return u'Content of argument a: %s' % a
return render
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class SimpleNode(Node):
def __init__(self, takes_context, args, kwargs):
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
resolved_args, resolved_kwargs = self.get_resolved_arguments(Context({}))
self.resolved_args = resolved_args
self.resolved_kwargs = resolved_kwargs
self.render_func = func(*resolved_args, **resolved_kwargs)
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = dict((k, v.resolve(context)) for k, v in self.kwargs.items())
return resolved_args, resolved_kwargs
def render(self, context):
return self.render_func(context)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=node_class or SimpleNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
return dec # @register.lazy_tag(...)
elif callable(func):
return dec(func) # @register.lazy_tag
else:
raise TemplateSyntaxError("Invalid arguments provided to lazy_tag")
template.Library.lazy_tag = lazy_tag
| {
"repo_name": "andreif/djedi-cms",
"path": "djedi/templatetags/template.py",
"copies": "1",
"size": "2832",
"license": "bsd-3-clause",
"hash": 5295261771052345000,
"line_mean": 36.76,
"line_max": 103,
"alpha_frac": 0.615819209,
"autogenerated": false,
"ratio": 4.495238095238095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009680546479891573,
"num_lines": 75
} |
from functools import partial
from inspect import getargspec
from django import template
from django.template import Node, TemplateSyntaxError, generic_tag_compiler
register = template.Library()
def lazy_tag(self, func=None, takes_context=None, name=None, node_class=None):
"""
A tag function decorator, injected on Django's template tag library, similar to simple_tag().
The decorated function gets called when the template node tree is built and should return
another function, responsible for the output, that later will be called within the rendering phase.
Note: if decorated with takes_context=True, context will not be available in the init phase.
@register.lazy_tag(takes_context=True)
def x(context, a, b, c=True, d=False):
# Init phase (no context)
def render(context):
# Render phase
return u'Content of argument a: %s' % a
return render
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class SimpleNode(Node):
def __init__(self, takes_context, args, kwargs):
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
resolved_args, resolved_kwargs = self.get_resolved_arguments({})
self.resolved_args = resolved_args
self.resolved_kwargs = resolved_kwargs
self.render_func = func(*resolved_args, **resolved_kwargs)
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = dict((k, v.resolve(context)) for k, v in self.kwargs.items())
return resolved_args, resolved_kwargs
def render(self, context):
return self.render_func(context)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=node_class or SimpleNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
return dec # @register.lazy_tag(...)
elif callable(func):
return dec(func) # @register.lazy_tag
else:
raise TemplateSyntaxError("Invalid arguments provided to lazy_tag")
template.Library.lazy_tag = lazy_tag
| {
"repo_name": "chrippa/djedi-cms",
"path": "djedi/templatetags/template.py",
"copies": "2",
"size": "2761",
"license": "bsd-3-clause",
"hash": 1413380331979321600,
"line_mean": 37.3472222222,
"line_max": 103,
"alpha_frac": 0.6106483158,
"autogenerated": false,
"ratio": 4.504078303425775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010255370347280746,
"num_lines": 72
} |
from functools import partial
from inspect import getfullargspec
from typing import Callable, Iterable, List, Union
from prompt_toolkit.completion import ( # type: ignore
CompleteEvent,
Completer,
WordCompleter,
)
from prompt_toolkit.document import Document # type: ignore
class ArgsCompleter(Completer):
def __init__(self, repl, args: List[Union[Iterable[str], Callable, None]]):
# Bind callables to the instance of Repl, if necessary.
self.args = [
partial(arg, repl) if (callable(arg) and getfullargspec(arg).args) else arg
for arg in args
]
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[str]:
# Get completion choices for the current argument.
parts = document.text_before_cursor.lstrip().split(" ")
try:
completion = self.args[len(parts) - 1]
except IndexError:
completion = None
if completion is None:
return []
completer = WordCompleter(completion, ignore_case=True)
for c in completer.get_completions(document, complete_event):
yield c
| {
"repo_name": "AndreaOrru/Gilgamesh",
"path": "gilgamesh/repl/completers.py",
"copies": "1",
"size": "1181",
"license": "bsd-2-clause",
"hash": -1378940996398734000,
"line_mean": 32.7428571429,
"line_max": 87,
"alpha_frac": 0.6477561389,
"autogenerated": false,
"ratio": 4.310218978102189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.545797511700219,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from inspect import getmembers
from mock import patch, ANY, call
from os import path
from time import sleep
from unittest import TestCase
from fixture.io import TempIO
from pyinotify import WatchManager, Notifier
from testrunner import default_config
from testrunner.configurator import Config
from testrunner.watcher import FileChangeHandler
class TestFunctional(TestCase):
@staticmethod
def _copy_default_config(mocked_config):
for name, value in getmembers(default_config):
if not name.isupper():
continue
setattr(mocked_config, name, value)
@staticmethod
def _event_generator(events, notifier):
if not events:
return True
path_obj, file_name, data = events.pop(0)
path_obj.putfile(file_name, data)
sleep(0.2)
def setUp(self):
self.tmp = TempIO(deferred=True)
self.tmp.conf = "config"
self.tmp.conf.putfile("__init__.py", "#")
self.tmp.conf.putfile("config.py", "# config")
self.tmp.src = "src"
self.tmp.src.putfile("test.me", "data")
self.tmp_output = TempIO(deferred=True)
self.config_file = self.tmp.conf.join("config.py")
@patch.object(FileChangeHandler, "show_notification", autospec=True)
@patch("testrunner.configurator.default_config", autospec=True)
def test_update_code(self, default_config, show_notification):
out_file = self.tmp_output.join("out.log")
command_args = [
"-c", self.config_file,
"-r", "bash -c 'echo a | tee -a {}'".format(out_file),
"-d", unicode(self.tmp.src),
]
events = [
(self.tmp.src, "test_1.me", "some new data"),
(self.tmp.src, "test_2.me", "some new data"),
]
self._copy_default_config(default_config)
default_config.RUNNER_DELAY = -1
wm = WatchManager()
config = Config(watch_manager=wm, command_args=command_args)
handler = FileChangeHandler(config=config)
notifier = Notifier(wm, handler)
notifier.loop(callback=partial(self._event_generator, events))
# There are some stupid race conditions (possibly due to the callbacks)
# Sleep time allows to execute all needed code
sleep(0.2)
self.assertTrue(path.exists(self.tmp.src.join("test_1.me")))
self.assertTrue(path.exists(self.tmp.src.join("test_2.me")))
self.assertTrue(path.exists(out_file))
self.assertEqual(show_notification.call_count, 2)
show_notification.assert_has_calls([call(handler, True, ANY)]*2)
@patch.object(FileChangeHandler, "show_notification", autospec=True)
@patch("testrunner.configurator.default_config", autospec=True)
def test_update_filtered(self, default_config, show_notification):
out_file = self.tmp_output.join("out.log")
command_args = [
"-c", self.config_file,
"-r", "bash -c 'echo a | tee -a {}'".format(out_file),
"-d", unicode(self.tmp.src),
]
events = [
(self.tmp.src, "filtered_1.pyc", "some new data"),
(self.tmp.src, "filtered_2.tmp", "some new data"),
(self.tmp.src, ".hidden", "some new data"),
]
self._copy_default_config(default_config)
default_config.RUNNER_DELAY = -1
wm = WatchManager()
config = Config(watch_manager=wm, command_args=command_args)
handler = FileChangeHandler(config=config)
notifier = Notifier(wm, handler)
notifier.loop(callback=partial(self._event_generator, events))
# There are some stupid race conditions (possibly due to the callbacks)
# Sleep time allows to execute all needed code
sleep(0.2)
self.assertTrue(path.exists(self.tmp.src.join("filtered_1.pyc")))
self.assertTrue(path.exists(self.tmp.src.join("filtered_2.tmp")))
self.assertTrue(path.exists(self.tmp.src.join(".hidden")))
self.assertFalse(path.exists(out_file))
self.assertFalse(show_notification.called)
@patch.object(FileChangeHandler, "show_notification", autospec=True)
@patch("testrunner.configurator.default_config", autospec=True)
def test_update_conf(self, default_config, show_notification):
conf_time_1 = path.getmtime(self.tmp.conf.join("config.py"))
out_file = self.tmp_output.join("out.log")
command_args = [
"-c", self.config_file,
"-r", "bash -c 'echo a | tee -a {}'".format(out_file),
"-d", unicode(self.tmp.src),
]
events = [
(self.tmp.conf, "config.py", "# some new data"),
(self.tmp.conf, "config.py", "# some new data"),
]
self._copy_default_config(default_config)
default_config.RUNNER_DELAY = -1
wm = WatchManager()
config = Config(watch_manager=wm, command_args=command_args)
handler = FileChangeHandler(config=config)
notifier = Notifier(wm, handler, timeout=1000)
notifier.loop(callback=partial(self._event_generator, events))
# There are some stupid race conditions (possibly due to the callbacks)
# Sleep time allows to execute all needed code
sleep(0.2)
conf_time_2 = path.getmtime(self.tmp.conf.join("config.py"))
self.assertNotEqual(conf_time_1, conf_time_2)
self.assertTrue(path.exists(out_file))
self.assertEqual(show_notification.call_count, 2)
def tearDown(self):
del self.tmp
del self.tmp_output
| {
"repo_name": "mrfuxi/testrunner",
"path": "testrunner/tests/test_functional.py",
"copies": "1",
"size": "5619",
"license": "mit",
"hash": -2518644679485369300,
"line_mean": 35.9671052632,
"line_max": 79,
"alpha_frac": 0.6243103755,
"autogenerated": false,
"ratio": 3.7015810276679844,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48258914031679845,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from inspect import isclass
from graphql_relay import from_global_id, to_global_id
from ..types import ID, Field, Interface, ObjectType
from ..types.interface import InterfaceOptions
from ..types.utils import get_type
def is_node(objecttype):
"""
Check if the given objecttype has Node as an interface
"""
if not isclass(objecttype):
return False
if not issubclass(objecttype, ObjectType):
return False
for i in objecttype._meta.interfaces:
if issubclass(i, Node):
return True
return False
class GlobalID(Field):
def __init__(self, node=None, parent_type=None, required=True, *args, **kwargs):
super(GlobalID, self).__init__(ID, required=required, *args, **kwargs)
self.node = node or Node
self.parent_type_name = parent_type._meta.name if parent_type else None
@staticmethod
def id_resolver(parent_resolver, node, root, info, parent_type_name=None, **args):
type_id = parent_resolver(root, info, **args)
parent_type_name = parent_type_name or info.parent_type.name
return node.to_global_id(parent_type_name, type_id) # root._meta.name
def wrap_resolve(self, parent_resolver):
return partial(
self.id_resolver,
parent_resolver,
self.node,
parent_type_name=self.parent_type_name,
)
class NodeField(Field):
def __init__(self, node, type_=False, **kwargs):
assert issubclass(node, Node), "NodeField can only operate in Nodes"
self.node_type = node
self.field_type = type_
super(NodeField, self).__init__(
# If we don's specify a type, the field type will be the node
# interface
type_ or node,
id=ID(required=True, description="The ID of the object"),
**kwargs,
)
def wrap_resolve(self, parent_resolver):
return partial(self.node_type.node_resolver, get_type(self.field_type))
class AbstractNode(Interface):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, **options):
_meta = InterfaceOptions(cls)
_meta.fields = {"id": GlobalID(cls, description="The ID of the object")}
super(AbstractNode, cls).__init_subclass_with_meta__(_meta=_meta, **options)
class Node(AbstractNode):
"""An object with an ID"""
@classmethod
def Field(cls, *args, **kwargs): # noqa: N802
return NodeField(cls, *args, **kwargs)
@classmethod
def node_resolver(cls, only_type, root, info, id):
return cls.get_node_from_global_id(info, id, only_type=only_type)
@classmethod
def get_node_from_global_id(cls, info, global_id, only_type=None):
try:
_type, _id = cls.from_global_id(global_id)
except Exception as e:
raise Exception(
(
f'Unable to parse global ID "{global_id}". '
'Make sure it is a base64 encoded string in the format: "TypeName:id". '
f"Exception message: {str(e)}"
)
)
graphene_type = info.schema.get_type(_type)
if graphene_type is None:
raise Exception(f'Relay Node "{_type}" not found in schema')
graphene_type = graphene_type.graphene_type
if only_type:
assert (
graphene_type == only_type
), f"Must receive a {only_type._meta.name} id."
# We make sure the ObjectType implements the "Node" interface
if cls not in graphene_type._meta.interfaces:
raise Exception(
f'ObjectType "{_type}" does not implement the "{cls}" interface.'
)
get_node = getattr(graphene_type, "get_node", None)
if get_node:
return get_node(info, _id)
@classmethod
def from_global_id(cls, global_id):
return from_global_id(global_id)
@classmethod
def to_global_id(cls, type_, id):
return to_global_id(type_, id)
| {
"repo_name": "graphql-python/graphene",
"path": "graphene/relay/node.py",
"copies": "1",
"size": "4099",
"license": "mit",
"hash": 3513349553710882300,
"line_mean": 30.7751937984,
"line_max": 92,
"alpha_frac": 0.6011222249,
"autogenerated": false,
"ratio": 3.848826291079812,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9947223044903305,
"avg_score": 0.0005450942153013745,
"num_lines": 129
} |
from functools import partial
from inspect import isfunction
from .mock import Mock
from .regex import isregex
from .mock_engine import MockEngine
from .exceptions import PookNoMatches, PookExpiredMock
class Engine(object):
"""
Engine represents the mock interceptor and matcher engine responsible
of triggering interceptors and match outgoing HTTP traffic.
Arguments:
network (bool, optional): enables/disables real networking mode.
Attributes:
debug (bool): enables/disables debug mode.
active (bool): stores the current engine activation status.
networking (bool): stores the current engine networking mode status.
mocks (list[pook.Mock]): stores engine mocks.
filters (list[function]): stores engine-level mock filter functions.
mappers (list[function]): stores engine-level mock mapper functions.
interceptors (list[pook.BaseInterceptor]): stores engine-level HTTP
traffic interceptors.
unmatched_reqs (list[pook.Request]): stores engine-level unmatched
outgoing HTTP requests.
network_filters (list[function]): stores engine-level real
networking mode filters.
"""
def __init__(self, network=False):
# Enables/Disables debug mode.
self.debug = True
# Store the engine enable/disable status
self.active = False
# Enables/Disables real networking
self.networking = network
# Stores mocks
self.mocks = []
# Store engine-level global filters
self.filters = []
# Store engine-level global mappers
self.mappers = []
# Store unmatched requests.
self.unmatched_reqs = []
# Store network filters used to determine when a request
# should be filtered or not.
self.network_filters = []
# Built-in mock engine to be used
self.mock_engine = MockEngine(self)
def set_mock_engine(self, engine):
"""
Sets a custom mock engine, replacing the built-in one.
This is particularly useful if you want to replace the built-in
HTTP traffic mock interceptor engine with your custom one.
For mock engine implementation details, see `pook.MockEngine`.
Arguments:
engine (pook.MockEngine): custom mock engine to use.
"""
if not engine:
raise TypeError('engine must be a valid object')
# Instantiate mock engine
mock_engine = engine(self)
# Validate minimum viable interface
methods = ('activate', 'disable')
if not all([hasattr(mock_engine, method) for method in methods]):
raise NotImplementedError('engine must implementent the '
'required methods')
# Use the custom mock engine
self.mock_engine = mock_engine
# Enable mock engine, if needed
if self.active:
self.mock_engine.activate()
def enable_network(self, *hostnames):
"""
Enables real networking mode, optionally passing one or multiple
hostnames that would be used as filter.
If at least one hostname matches with the outgoing traffic, the
request will be executed via the real network.
Arguments:
*hostnames: optional list of host names to enable real network
against them. hostname value can be a regular expression.
"""
def hostname_filter(hostname, req):
if isregex(hostname):
return hostname.match(req.url.hostname)
return req.url.hostname == hostname
for hostname in hostnames:
self.use_network_filter(partial(hostname_filter, hostname))
self.networking = True
def disable_network(self):
"""
Disables real networking mode.
"""
self.networking = False
def use_network_filter(self, *fn):
"""
Adds network filters to determine if certain outgoing unmatched
HTTP traffic can stablish real network connections.
Arguments:
*fn (function): variadic function filter arguments to be used.
"""
self.network_filters.extend(fn)
def flush_network_filters(self):
"""
Flushes registered real networking filters in the current
mock engine.
"""
self.network_filters = []
def mock(self, url=None, **kw):
"""
Creates and registers a new HTTP mock in the current engine.
Arguments:
url (str): request URL to mock.
activate (bool): force mock engine activation.
Defaults to ``False``.
**kw (mixed): variadic keyword arguments for ``Mock`` constructor.
Returns:
pook.Mock: new mock instance.
"""
# Activate mock engine, if explicitly requested
if kw.get('activate'):
kw.pop('activate')
self.activate()
# Create the new HTTP mock expectation
mock = Mock(url=url, **kw)
# Expose current engine instance via mock
mock._engine = self
# Register the mock in the current engine
self.add_mock(mock)
# Return it for consumer satisfaction
return mock
def add_mock(self, mock):
"""
Adds a new mock instance to the current engine.
Arguments:
mock (pook.Mock): mock instance to add.
"""
self.mocks.append(mock)
def remove_mock(self, mock):
"""
Removes a specific mock instance by object reference.
Arguments:
mock (pook.Mock): mock instance to remove.
"""
self.mocks = [m for m in self.mocks if m is not mock]
def flush_mocks(self):
"""
Flushes the current mocks.
"""
self.mocks = []
def _engine_proxy(self, method, *args, **kw):
engine_method = getattr(self.mock_engine, method, None)
if not engine_method:
raise NotImplementedError('current mock engine does not implements'
' required "{}" method'.format(method))
return engine_method(self.mock_engine, *args, **kw)
def add_interceptor(self, *interceptors):
"""
Adds one or multiple HTTP traffic interceptors to the current
mocking engine.
Interceptors are typically HTTP client specific wrapper classes that
implements the pook interceptor interface.
Note: this method is may not be implemented if using a custom mock
engine.
Arguments:
interceptors (pook.interceptors.BaseInterceptor)
"""
self._engine_proxy('add_interceptor', *interceptors)
def flush_interceptors(self):
"""
Flushes registered interceptors in the current mocking engine.
This method is low-level. Only call it if you know what you are doing.
Note: this method is may not be implemented if using a custom mock
engine.
"""
self._engine_proxy('flush_interceptors')
def remove_interceptor(self, name):
"""
Removes a specific interceptor by name.
Note: this method is may not be implemented if using a custom mock
engine.
Arguments:
name (str): interceptor name to disable.
Returns:
bool: `True` if the interceptor was disabled, otherwise `False`.
"""
return self._engine_proxy('remove_interceptor', name)
def activate(self):
"""
Activates the registered interceptors in the mocking engine.
This means any HTTP traffic captures by those interceptors will
trigger the HTTP mock matching engine in order to determine if a given
HTTP transaction should be mocked out or not.
"""
if self.active:
return None
# Activate mock engine
self.mock_engine.activate()
# Enable engine state
self.active = True
def disable(self):
"""
Disables interceptors and stops intercepting any outgoing HTTP traffic.
"""
if not self.active:
return None
# Disable current mock engine
self.mock_engine.disable()
# Disable engine state
self.active = False
def reset(self):
"""
Resets and flushes engine state and mocks to defaults.
"""
# Reset engine
Engine.__init__(self, network=self.networking)
def unmatched_requests(self):
"""
Returns a ``tuple`` of unmatched requests.
Unmatched requests will be registered only if ``networking`` mode
has been enabled.
Returns:
list: unmatched intercepted requests.
"""
return [mock for mock in self.unmatched_reqs]
def unmatched(self):
"""
Returns the total number of unmatched requests intercepted by pook.
Unmatched requests will be registered only if ``networking`` mode
has been enabled.
Returns:
int: total number of unmatched requests.
"""
return len(self.unmatched_requests())
def isunmatched(self):
"""
Returns ``True`` if there are unmatched requests. Otherwise ``False``.
Unmatched requests will be registered only if ``networking`` mode
has been enabled.
Returns:
bool
"""
return len(self.unmatched()) > 0
def pending(self):
"""
Returns the number of pending mocks to be matched.
Returns:
int: number of pending mocks.
"""
return len(self.pending_mocks())
def pending_mocks(self):
"""
Returns a ``tuple`` of pending mocks to be matched.
Returns:
tuple: pending mock instances.
"""
return [mock for mock in self.mocks if not mock.isdone()]
def ispending(self):
"""
Returns the ``True`` if the engine has pending mocks to be matched.
Otherwise ``False``.
Returns:
bool
"""
return len(self.pending_mocks())
def isactive(self):
"""
Returns the current engine enabled/disabled status.
Returns:
bool: ``True`` if the engine is active. Otherwise ``False``.
"""
return self.active
def isdone(self):
"""
Returns True if all the registered mocks has been triggered.
Returns:
bool: True is all the registered mocks are gone, otherwise False.
"""
return all(mock.isdone() for mock in self.mocks)
def _append(self, target, *fns):
(target.append(fn) for fn in fns if isfunction(fn))
def filter(self, *filters):
"""
Append engine-level HTTP request filter functions.
Arguments:
filters*: variadic filter functions to be added.
"""
self._append(self.filters, *filters)
def map(self, *mappers):
"""
Append engine-level HTTP request mapper functions.
Arguments:
filters*: variadic mapper functions to be added.
"""
self._append(self.mappers, *mappers)
def should_use_network(self, request):
"""
Verifies if real networking mode should be used for the given
request, passing it to the registered network filters.
Arguments:
request (pook.Request): outgoing HTTP request to test.
Returns:
bool
"""
return (self.networking and
all((fn(request) for fn in self.network_filters)))
def match(self, request):
"""
Matches a given Request instance contract against the registered mocks.
If a mock passes all the matchers, its response will be returned.
Arguments:
request (pook.Request): Request contract to match.
Raises:
pook.PookNoMatches: if networking is disabled and no mock matches
with the given request contract.
Returns:
pook.Response: the mock response to be used by the interceptor.
"""
# Trigger engine-level request filters
for test in self.filters:
if not test(request, self):
return False
# Trigger engine-level request mappers
for mapper in self.mappers:
request = mapper(request, self)
if not request:
raise ValueError('map function must return a request object')
# Store list of mock matching errors for further debugging
match_errors = []
# Try to match the request against registered mock definitions
for mock in self.mocks[:]:
try:
# Return the first matched HTTP request mock
matches, errors = mock.match(request.copy())
if len(errors):
match_errors += errors
if matches:
return mock
except PookExpiredMock:
# Remove the mock if already expired
self.mocks.remove(mock)
# Validate that we have a mock
if not self.should_use_network(request):
msg = 'pook error!\n\n'
msg += (
'=> Cannot match any mock for the '
'following request:\n{}'.format(request)
)
# Compose unmatch error details, if debug mode is enabled
if self.debug:
err = '\n\n'.join([str(err) for err in match_errors])
if err:
msg += '\n\n=> Detailed matching errors:\n{}\n'.format(err)
# Raise no matches exception
raise PookNoMatches(msg)
# Register unmatched request
self.unmatched_reqs.append(request)
| {
"repo_name": "h2non/pook",
"path": "pook/engine.py",
"copies": "1",
"size": "13979",
"license": "mit",
"hash": 8089552689612547000,
"line_mean": 30.3430493274,
"line_max": 79,
"alpha_frac": 0.5922455111,
"autogenerated": false,
"ratio": 4.843728343728344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5935973854828345,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from io import BytesIO
from operator import attrgetter
import unicodecsv as csv
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_text
except ImportError: # Django < 1.4
from django.utils.encoding import force_unicode as force_text
def get_pretty_name(accessor):
return accessor.replace('_', ' ') \
.replace('.', ' ') \
.capitalize()
def Getter(accessor, normalizer=lambda x: x):
"""
Returns a function that will access an attribute off of an object. If that
attribute is callable, it will call it. Accepts a normalizer to call on
the value at the end.
"""
if not callable(accessor):
short_description = get_pretty_name(accessor)
accessor = attrgetter(accessor)
else:
short_description = getattr(accessor, 'short_description', None)
def getter(obj):
ret = accessor(obj)
# handle things like get_absolute_url
if callable(ret):
ret = ret()
return normalizer(ret)
if short_description:
getter.short_description = short_description
return getter
# Should these be i18nized?
bool2string_map = {True: 'Yes', False: 'No'}
BooleanGetter = partial(Getter, normalizer=bool2string_map.get)
def DisplayGetter(accessor, *args, **kwargs):
"""
Returns a Getter that gets the display name for a model field with choices.
"""
short_description = get_pretty_name(accessor)
accessor = 'get_%s_display' % accessor
getter = Getter(accessor, *args, **kwargs)
getter.short_description = short_description
return getter
class ColumnSerializer(object):
output_headers = True
def __init__(self, columns, **kwargs):
self.output_headers = kwargs.get('output_headers', self.output_headers)
self.normalized_columns = list(map(self._normalize_column, columns))
def __call__(self, queryset, file=None):
"""
Serializes a queryset to CSV. If you pass in a file, it will write to
that file, otherwise, it will just return a string.
"""
if file is None:
output = BytesIO()
writer = csv.writer(output)
else:
writer = csv.writer(file)
if self.output_headers:
writer.writerow(self.get_header_row())
for obj in queryset:
writer.writerow(self.get_row(obj))
if file is None:
output.seek(0)
return output.read().decode('utf-8')
def format_header(self, column):
if self.output_headers:
try:
return column.short_description
except AttributeError:
raise ImproperlyConfigured(
"If you pass a function as an accessor,"
" please provide a column title."
)
def get_header_row(self):
return [force_text(c[1]) for c in self.normalized_columns]
def get_row(self, obj):
return [force_text(c[0](obj)) for c in self.normalized_columns]
def _normalize_column(self, column):
# column can either be a 2-tuple of (accessor, header), or just an
# accessor. accessor will be passed to Getter, and we will get the
# header off of the Getter. Returns a 2-tuple of (Getter, header).
if isinstance(column, (tuple, list)):
column = self._normalize_getter(column[0]), column[1]
else:
column = self._normalize_getter(column)
column = (column, self.format_header(column))
return column
_getter_cache = {}
def _normalize_getter(self, getter):
if getter not in self._getter_cache:
self._getter_cache[getter] = Getter(getter)
return self._getter_cache[getter]
| {
"repo_name": "fusionbox/django-separated",
"path": "separated/utils.py",
"copies": "1",
"size": "3832",
"license": "bsd-2-clause",
"hash": -3443047417813258000,
"line_mean": 30.1544715447,
"line_max": 79,
"alpha_frac": 0.624217119,
"autogenerated": false,
"ratio": 4.183406113537118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5307623232537118,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from io import BytesIO
from os import path
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.db.models import signals
from django.forms import Textarea, ValidationError
from django.template.defaultfilters import filesizeformat
from django.utils.encoding import smart_str
from django.utils.safestring import SafeData, mark_safe
from django.utils.translation import gettext_lazy as _
from machina.conf import settings as machina_settings
_rendered_field_name = lambda name: '_{}_rendered'.format(name)
def _get_markup_widget():
dotted_path = machina_settings.MARKUP_WIDGET
try:
assert dotted_path is not None
module, widget = dotted_path.rsplit('.', 1)
module, widget = smart_str(module), smart_str(widget)
widget = getattr(__import__(module, {}, {}, [widget]), widget)
return widget
except ImportError as e:
raise ImproperlyConfigured(
_('Could not import MACHINA_MARKUP_WIDGET {}: {}').format(
machina_settings.MARKUP_WIDGET, e
)
)
except AssertionError:
return Textarea
MarkupTextFieldWidget = _get_markup_widget()
def _get_render_function(dotted_path, kwargs):
module, func = dotted_path.rsplit('.', 1)
module, func = smart_str(module), smart_str(func)
func = getattr(__import__(module, {}, {}, [func]), func)
return partial(func, **kwargs)
try:
markup_lang = machina_settings.MARKUP_LANGUAGE
render_func = (
_get_render_function(markup_lang[0], markup_lang[1]) if markup_lang
else lambda text: text
)
except ImportError as e:
raise ImproperlyConfigured(
_('Could not import MACHINA_MARKUP_LANGUAGE {}: {}').format(
machina_settings.MARKUP_LANGUAGE, e,
)
)
except AttributeError:
raise ImproperlyConfigured(_('MACHINA_MARKUP_LANGUAGE setting is required'))
class MarkupText(SafeData):
def __init__(self, instance, field_name, rendered_field_name):
# Stores a reference to the instance along with field names
# to make assignment possible.
self.instance = instance
self.field_name = field_name
self.rendered_field_name = rendered_field_name
# raw is read/write
def _get_raw(self):
return self.instance.__dict__[self.field_name]
def _set_raw(self, val):
setattr(self.instance, self.field_name, val)
raw = property(_get_raw, _set_raw)
# rendered is a read only property
def _get_rendered(self):
return mark_safe(getattr(self.instance, self.rendered_field_name))
rendered = property(_get_rendered)
# Allows display via templates to work without safe filter
def __str__(self):
return self.raw
# Return the length of the rendered string so that bool tests work as expected
def __len__(self):
return len(self.raw)
class MarkupTextDescriptor:
"""
Acts as the Django's default attribute descriptor class, enabled via the SubfieldBase metaclass.
The main difference is that it does not call to_python() on the MarkupTextField class. Instead,
it stores the two different values of a markup content (the raw and the rendered data)
separately. These values can be separately updated when something is assigned. When the field is
accessed, a MarkupText instance will be returned ; this one is built with the current data.
"""
def __init__(self, field):
self.field = field
self.rendered_field_name = _rendered_field_name(self.field.name)
def __get__(self, instance, owner):
if instance is None:
return None
raw = instance.__dict__[self.field.name]
if raw is None:
return None
return MarkupText(instance, self.field.name, self.rendered_field_name)
def __set__(self, instance, value):
if isinstance(value, MarkupText):
instance.__dict__[self.field.name] = value.raw
setattr(instance, self.rendered_field_name, value.rendered)
else:
# Set only the raw field
instance.__dict__[self.field.name] = value
class MarkupTextField(models.TextField):
"""
A MarkupTextField contributes two columns to the model instead of the standard single column.
The initial column store any content written by using a given markup language and the other one
keeps the rendered content returned by a specific render function.
"""
def __init__(self, *args, **kwargs):
# For Django 1.7 migration serializer compatibility: the frozen version of a
# MarkupTextField can't try to add a '*_rendered' field, because the '*_rendered' field
# itself is frozen / serialized as well.
self.add_rendered_field = not kwargs.pop('no_rendered_field', False)
super().__init__(*args, **kwargs)
def deconstruct(self): # pragma: no cover
"""
As outlined in the Django 1.7 documentation, this method tells Django how to take an
instance of a new field in order to reduce it to a serialized form. This can be used to
configure what arguments need to be passed to the __init__() method of the field in order to
re-create it. We use it in order to pass the 'no_rendered_field' to the __init__() method.
This will allow the _rendered field to not be added to the model class twice.
"""
name, import_path, args, kwargs = super().deconstruct()
kwargs['no_rendered_field'] = True
return name, import_path, args, kwargs
def contribute_to_class(self, cls, name):
if self.add_rendered_field and not cls._meta.abstract:
rendered_field = models.TextField(editable=False, blank=True, null=True)
cls.add_to_class(_rendered_field_name(name), rendered_field)
# The data will be rendered before each save
signals.pre_save.connect(self.render_data, sender=cls)
# Add the default text field
super().contribute_to_class(cls, name)
# Associates the name of this field to a special descriptor that will return
# an appropriate Markup object each time the field is accessed
setattr(cls, name, MarkupTextDescriptor(self))
def value_to_string(self, obj):
value = self.value_from_object(obj)
return value.raw
def get_db_prep_value(self, value, connection=None, prepared=False):
try:
return value.raw
except AttributeError:
return value
def render_data(self, signal, sender, instance=None, **kwargs):
value = getattr(instance, self.attname)
rendered = None
if hasattr(value, 'raw'):
rendered = render_func(value.raw)
setattr(instance, _rendered_field_name(self.attname), rendered)
def formfield(self, **kwargs):
widget = _get_markup_widget()
defaults = {'widget': widget(**machina_settings.MARKUP_WIDGET_KWARGS)}
defaults.update(kwargs)
field = super().formfield(**defaults)
return field
class ExtendedImageField(models.ImageField):
"""
An ExtendedImageField is an ImageField whose image can be resized before being saved.
This field also add the capability of checking the image size, width and height a user may send.
"""
def __init__(self, *args, **kwargs):
self.width = kwargs.pop('width', None)
self.height = kwargs.pop('height', None)
# Both min_width and max_width must be provided in order to be used
self.min_width = kwargs.pop('min_width', None)
self.max_width = kwargs.pop('max_width', None)
# Both min_height and max_height must be provided in order to be used
self.min_height = kwargs.pop('min_height', None)
self.max_height = kwargs.pop('max_height', None)
self.max_upload_size = kwargs.pop('max_upload_size', 0)
super().__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
from django.core.files.images import get_image_dimensions
data = super().clean(*args, **kwargs)
image = data.file
# Controls the file size
if self.max_upload_size and hasattr(image, 'size'):
if image.size > self.max_upload_size:
raise ValidationError(
_('Files of size greater than {} are not allowed. Your file is {}').format(
filesizeformat(self.max_upload_size),
filesizeformat(image.size)
)
)
# Controls the image size
image_width, image_height = get_image_dimensions(data)
if self.min_width and self.max_width \
and not self.min_width <= image_width <= self.max_width:
raise ValidationError(
_('Images of width lesser than {}px or greater than {}px or are not allowed. '
'The width of your image is {}px').format(
self.min_width, self.max_width, image_width
)
)
if self.min_height and self.max_height \
and not self.min_height <= image_height <= self.max_height:
raise ValidationError(
_('Images of height lesser than {}px or greater than {}px or are not allowed. '
'The height of your image is {}px').format(
self.min_height, self.max_height, image_height
)
)
return data
def save_form_data(self, instance, data):
if data and self.width and self.height:
content = self.resize_image(data.read(), (self.width, self.height))
# Handle the filename because the image will be converted to PNG
filename = path.splitext(path.split(data.name)[-1])[0]
filename = '{}.png'.format(filename)
# Regenerate a File object
data = SimpleUploadedFile(filename, content)
super().save_form_data(instance, data)
def resize_image(self, data, size):
""" Resizes the given image to fit inside a box of the given size. """
from machina.core.compat import PILImage as Image
image = Image.open(BytesIO(data))
# Resize!
image.thumbnail(size, Image.ANTIALIAS)
string = BytesIO()
image.save(string, format='PNG')
return string.getvalue()
| {
"repo_name": "ellmetha/django-machina",
"path": "machina/models/fields.py",
"copies": "1",
"size": "10513",
"license": "bsd-3-clause",
"hash": -481433791678441500,
"line_mean": 37.937037037,
"line_max": 100,
"alpha_frac": 0.638067155,
"autogenerated": false,
"ratio": 4.228881737731295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009715274367848441,
"num_lines": 270
} |
from functools import partial
from io import StringIO
import os
from os.path import join
from unittest.mock import call, mock_open, Mock
import psutil
from app.subcommands.stop_subcommand import Configuration, StopSubcommand
from test.framework.base_unit_test_case import BaseUnitTestCase
class TestStopSubcommand(BaseUnitTestCase):
def setUp(self):
super().setUp()
self._mock_os_path_exists = self.patch('os.path.exists')
self._mock_psutil_pid_exists = self.patch('psutil.pid_exists')
self._mock_psutil_process = self.patch('psutil.Process')
self._fake_slave_pid_file_sys_path = join(os.getcwd(), 'slave_pid_file')
self._fake_master_pid_file_sys_path = join(os.getcwd(), 'master_pid_file')
Configuration['slave_pid_file'] = self._fake_slave_pid_file_sys_path
Configuration['master_pid_file'] = self._fake_master_pid_file_sys_path
self._fake_slave_pid = 1111
self._fake_master_pid = 2222
self._mock_open = mock_open()
self._mock_open.side_effect = [
StringIO(str(self._fake_slave_pid)), # pretend to be fhe slave pid file object
StringIO(str(self._fake_master_pid)), # pretend to be the master pid file object
]
self.patch('app.subcommands.stop_subcommand.open', new=self._mock_open, create=True)
self._mock_os_remove = self.patch('os.remove')
self._stop_subcommand = StopSubcommand()
# setup the return value of time.time() and SIGTERM grace period so the test won't actually sleep
self._stop_subcommand.SIGTERM_SIGKILL_GRACE_PERIOD_SEC = -1
mock_time = self.patch('time.time')
mock_time.return_value = 0
def _setup_pid_file_does_not_exist(self):
self._mock_os_path_exists.return_value = False
def _setup_pid_file_exists_but_pid_does_not_exist(self):
self._mock_os_path_exists.return_value = True
self._mock_psutil_pid_exists.return_value = False
def _setup_both_pid_file_and_pids_exist(self):
self._mock_os_path_exists.return_value = True
self._mock_psutil_pid_exists.return_value = True
def test_stop_subcommand_does_not_call_terminate_if_pid_file_does_not_exist(self):
# Arrange
self._setup_pid_file_does_not_exist()
# Act
self._stop_subcommand.run(None)
# Assert
self.assertFalse(self._mock_psutil_process.terminate.called)
self.assertEqual(
[
call(self._fake_slave_pid_file_sys_path),
call(self._fake_master_pid_file_sys_path),
],
self._mock_os_path_exists.call_args_list,
)
def test_stop_subcommand_does_not_call_terminate_if_pid_does_not_exist(self):
# Arrange
self._setup_pid_file_exists_but_pid_does_not_exist()
# Act
self._stop_subcommand.run(None)
# Assert
self.assertFalse(self._mock_psutil_process.terminate.called)
self.assertEqual(
[
call(self._fake_slave_pid_file_sys_path),
call(self._fake_master_pid_file_sys_path),
],
self._mock_os_remove.call_args_list,
)
def test_stop_subcommand_does_not_call_terminate_if_pid_file_and_pid_exist_but_command_isnt_whitelisted(self):
# Arrange
self._setup_both_pid_file_and_pids_exist()
self._setup_processes(
master_cmdline=['python', './foo.py'],
slave_cmdline=['python', './bar.py'],
)
# Act
self._stop_subcommand.run(None)
# Assert
self.assertFalse(self._mock_psutil_process.terminate.called)
def _create_mock_process(self, pid, child_processes=None, cmdline=None):
proc = Mock(psutil.Process)
proc.pid = pid
proc.is_running.return_value = True
if cmdline:
proc.cmdline.return_value = cmdline
proc.children.return_value = child_processes if child_processes else []
return proc
def _setup_processes(self, master_cmdline=None, slave_cmdline=None):
master_process = self._create_mock_process(
self._fake_master_pid,
cmdline=master_cmdline if master_cmdline else ['python', 'main.py', 'master'],
)
slave_child_process = self._create_mock_process(3333)
slave_process = self._create_mock_process(
self._fake_slave_pid,
child_processes=[slave_child_process],
cmdline=slave_cmdline if slave_cmdline else ['python', 'main.py', 'slave'],
)
self._mock_psutil_process.side_effect = [
slave_process,
master_process,
]
return master_process, slave_process, slave_child_process
def _assert_called_terminate(self, process_list):
for proc in process_list:
self.assertTrue(proc.terminate.called)
@staticmethod
def _successful_terminate_or_kill(proc):
proc.is_running.return_value = False
def test_stop_subcommand_kills_pid_with_sigterm_if_pid_file_and_pid_exist_and_command_is_whitelisted(self):
# Arrange
self._setup_both_pid_file_and_pids_exist()
master_process, slave_process, slave_child_process = self._setup_processes()
master_process.terminate.side_effect = partial(self._successful_terminate_or_kill, master_process)
slave_process.terminate.side_effect = partial(self._successful_terminate_or_kill, slave_process)
slave_child_process.terminate.side_effect = partial(self._successful_terminate_or_kill, slave_child_process)
# Act
self._stop_subcommand.run(None)
# Assert
self._assert_called_terminate([master_process, slave_process, slave_child_process])
self.assertFalse(master_process.kill.called)
self.assertFalse(slave_process.kill.called)
self.assertFalse(slave_child_process.kill.called)
def test_stop_subcommand_kills_proc_with_sigkill_if_still_running_after_sigterm(self):
# Arrange
self._setup_both_pid_file_and_pids_exist()
master_process, slave_process, slave_child_process = self._setup_processes()
master_process.terminate.side_effect = partial(self._successful_terminate_or_kill, master_process)
# Act
self._stop_subcommand.run(None)
# Assert
self._assert_called_terminate([master_process, slave_process, slave_child_process])
self.assertFalse(master_process.kill.called)
self.assertTrue(slave_process.kill.called)
self.assertTrue(slave_child_process.kill.called)
| {
"repo_name": "josephharrington/ClusterRunner",
"path": "test/unit/subcommands/test_stop_subcommand.py",
"copies": "3",
"size": "6628",
"license": "apache-2.0",
"hash": 5100339790978438000,
"line_mean": 37.7602339181,
"line_max": 116,
"alpha_frac": 0.6430295715,
"autogenerated": false,
"ratio": 3.6740576496674056,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5817087221167405,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import accumulate as accumulate_, chain, islice, tee
MYPY = False
if MYPY:
from typing import Callable, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar
T = TypeVar('T')
filter_ = partial(filter, None) # type: Callable[[Iterable[Optional[T]]], Iterator[T]]
flatten = chain.from_iterable
def accumulate(iterable, initial=None):
# type: (Iterable[int], int) -> Iterable[int]
if initial is None:
return accumulate_(iterable)
else:
return accumulate_(chain([initial], iterable))
def pairwise(iterable):
# type: (Iterable[T]) -> Iterable[Tuple[T, T]]
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def unique(iterable):
# type: (Iterable[T]) -> Iterator[T]
seen = set() # type: Set[T]
for item in iterable:
if item in seen:
continue
seen.add(item)
yield item
# Below functions taken from https://github.com/erikrose/more-itertools
# Copyright (c) 2012 Erik Rose
def take(n, iterable):
# type: (int, Iterable[T]) -> List[T]
"""Return first *n* items of the iterable as a list.
>>> take(3, range(10))
[0, 1, 2]
If there are fewer than *n* items in the iterable, all of them are
returned.
>>> take(10, range(3))
[0, 1, 2]
"""
return list(islice(iterable, n))
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
"""
return iter(partial(take, n, iter(iterable)), [])
def partition(pred, iterable):
# type: (Optional[Callable[[T], bool]], Iterable[T]) -> Tuple[Iterator[T], Iterator[T]]
"""
Returns a 2-tuple of iterables derived from the input iterable.
The first yields the items that have ``pred(item) == False``.
The second yields the items that have ``pred(item) == True``.
>>> is_odd = lambda x: x % 2 != 0
>>> iterable = range(10)
>>> even_items, odd_items = partition(is_odd, iterable)
>>> list(even_items), list(odd_items)
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
If *pred* is None, :func:`bool` is used.
>>> iterable = [0, 1, False, True, '', ' ']
>>> false_items, true_items = partition(None, iterable)
>>> list(false_items), list(true_items)
([0, False, ''], [1, True, ' '])
"""
if pred is None:
pred = bool
evaluations = ((pred(x), x) for x in iterable)
t1, t2 = tee(evaluations)
return (
(x for (cond, x) in t1 if not cond),
(x for (cond, x) in t2 if cond),
)
| {
"repo_name": "divmain/GitSavvy",
"path": "core/fns.py",
"copies": "1",
"size": "3300",
"license": "mit",
"hash": -1071184683616708000,
"line_mean": 29,
"line_max": 91,
"alpha_frac": 0.5875757576,
"autogenerated": false,
"ratio": 3.3880903490759753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9474144781374284,
"avg_score": 0.00030426506033824944,
"num_lines": 110
} |
from functools import partial
from itertools import chain, izip
from pycparser import c_ast
from pyc_fmtstr_parser.printf_parse import printf_parse, Arg_type as p_Arg_type
from pyc_fmtstr_parser.scanf_parse import scanf_parse, Arg_type as s_Arg_type
from decomp import ida, utils
from decomp.c import decl as cdecl, types as ep_ct
from decomp.cpu import ida as cpu_ida, regs
flatten = chain.from_iterable
# MIPS N32
# $v0,$v1, $a0..$a7 for return/args
# $f0,$f2 and $f12..f19 for fp return/args
class RegSpillError(Exception): pass
class StructByValueError(Exception): pass
# WARNING: this must be in the same order as c.types.ep_ctypes!
# (pointer is intentionally missing)
c_type_to_slot = zip(
iter(ep_ct.ep_ctypes),
[ep_ct.slot_to_typename[x] for x in
[ep_ct.slot_types[y] for y in
['i8', # signed char
'u8', # unsigned char
'i16', # short
'u16', # unsigned short
'i32', # int
'u32', # unsigned int
'i32', # long
'u32', # unsigned long
'i64', # long long
'u64', # unsigned long long
's', # float
'd', # double
'i8' # char
]]])
def make_stdio_sw(fmt_type, types, pointerize):
'''enum -> [str] -> bool -> dict'''
# the printf/scanf parsers can parse nearly all format string types, but we
# do not handle all of them here
fmt_to_type = izip([fmt_type[x] for x in types],
c_type_to_slot)
return {ty: (ep_ct.ptr(val)
if pointerize is True
else val)
for (ty, (_, val)) in fmt_to_type}
printf_types = [
'TYPE_SCHAR', 'TYPE_UCHAR', 'TYPE_SHORT', 'TYPE_USHORT',
'TYPE_INT', 'TYPE_UINT', 'TYPE_LONGINT', 'TYPE_ULONGINT',
'TYPE_LONGLONGINT', 'TYPE_ULONGLONGINT', 'TYPE_DOUBLE',
'TYPE_DOUBLE', 'TYPE_CHAR'
]
scanf_types = [
'TYPE_SCHAR', 'TYPE_UCHAR', 'TYPE_SHORT', 'TYPE_USHORT',
'TYPE_INT', 'TYPE_UINT', 'TYPE_LONGINT', 'TYPE_ULONGINT',
'TYPE_LONGLONGINT', 'TYPE_ULONGLONGINT', 'TYPE_FLOAT',
'TYPE_DOUBLE', 'TYPE_CHAR'
]
printf_sw = make_stdio_sw(p_Arg_type, printf_types, pointerize=False)
printf_sw[p_Arg_type.TYPE_POINTER] = ep_ct.ptr(ep_ct.simple_typename(['void']))
printf_sw[p_Arg_type.TYPE_STRING] = ep_ct.ptr(ep_ct.simple_typename(['char']))
scanf_sw = make_stdio_sw(s_Arg_type, scanf_types, pointerize=True)
scanf_sw[s_Arg_type.TYPE_POINTER] = ep_ct.ptr(ep_ct.ptr(ep_ct.simple_typename(['void'])))
scanf_sw[s_Arg_type.TYPE_STRING] = ep_ct.ptr(ep_ct.simple_typename(['char']))
scanf_sw[s_Arg_type.TYPE_CHARSEQ] = scanf_sw[s_Arg_type.TYPE_STRING]
def n32ify_regs(regs):
'''[str] -> [str]'''
n32_map = {'$t0': '$a4', '$t1': '$a5', '$t2': '$a6', '$t3': '$a7'}
r = enumerate(regs)
return list(n32_map[reg] if reg in n32_map else regs[i] for (i, reg) in r)
# fix up IDA's register list for N32
reg_list = n32ify_regs(cpu_ida.ida_reg_list())
# offset of beginning of FPR regs in IDA's list
fpr_off = reg_list.index('$f0')
# callee-saved registers
saveregs = frozenset(flatten([
xrange(16, 24), # $s0..$s7
xrange(28, 31), # $gp, $sp, $fp
xrange(fpr_off + 20, fpr_off + 32, 2) # $f20..$f31, evens
]))
# gpr and fpr argument and return registers
arg_regs = list(xrange(4, 12)) # $a0..$a7
fp_arg_regs = list(xrange(fpr_off + 12, fpr_off + 20)) # $f12..$f19
ret_regs = list([2, 3]) # $v0..$v1
fp_ret_regs = list([fpr_off, fpr_off + 2]) # $f0,$f2
# registers that we pass via the ARGS struct
regs_by_reference = frozenset(arg_regs + fp_arg_regs + ret_regs + fp_ret_regs)
def type_to_reg_and_slot(node, chooser, i):
'''c_ast -> fn -> int -> (reg_type, slot_type) | None'''
def yield_void():
# return an empty list for (void) arglists
raise StopIteration
def maybe_fail(node):
if type(node) is c_ast.Struct:
raise StructByValueError('structs by value not yet supported')
return type_to_reg_and_slot(node.type, chooser, i)
def get(names):
'''[str] -> (gpr|fpr, slot_ty)'''
if 'void' in names:
return yield_void()
ti = ida.parse_decl(' '.join(names))
(ty, base, slot) = chooser(ti)
return (ty(base + i), slot)
sw = {
c_ast.Decl: lambda x: type_to_reg_and_slot(x.type, chooser, i),
c_ast.TypeDecl: lambda x: type_to_reg_and_slot(x.type, chooser, i),
c_ast.Typename: lambda x: type_to_reg_and_slot(x.type, chooser, i),
c_ast.IdentifierType: lambda x: get(x.names)
}
if i > 7:
raise RegSpillError('spilling registers to stack not yet supported')
# in order to use chooser we need a tinfo_t--make one suitable for an
# an int (which is also suitable for a pointer on N32)
dummy_ti = ida.parse_decl('int')
(_, base, _) = chooser(dummy_ti)
node_ty = type(node)
if node_ty in [c_ast.ArrayDecl, c_ast.PtrDecl]:
return (regs.gpr(base + i), ep_ct.slot_types.u64)
elif node_ty is c_ast.Enum:
return (regs.gpr(base + i), ep_ct.slot_types.i64)
else:
return utils.dictswitch(node_ty, sw, node, maybe_fail, node)
def get_info_for_types(nodes, caster, chooser, pos=0, handle_va=False):
'''[c_ast] -> fn -> fn -> int -> bool ->
(reg_type, slot_type) | c_ast.EllipsisParam | None'''
# nodes: a list of Decls from a FuncDecl
# caster: use this function to produce casts to arg types (see 'castify' in
# c.decl)
# chooser: a function that will determine the register number for a return
# value or argument
# pos: number of a positional argument (0-indexed)
# handle_va: pass False if making the initial list of function signatures
# from; pass True if we want variable arguments to be processed
return [c_ast.EllipsisParam
if (handle_va is False and
type(node) is c_ast.EllipsisParam)
else
type_to_reg_and_slot(node, chooser, i) + (caster(node),)
for (i, node) in enumerate(nodes, pos)]
def va_chooser(gpr_base, _, ti):
'''int -> int -> tinfo_t -> (type, int, slot_ty)'''
if ti.is_float():
slot = ep_ct.slot_types.s
elif ti.is_double():
slot = ep_ct.slot_types.d
else:
slot = ep_ct.slot_types.u64
return (regs.gpr, gpr_base, slot)
def pos_chooser(gpr_base, fpr_base, ti):
'''int -> int -> tinfo_t -> (type, int, slot_ty)'''
if ti.is_float():
return (regs.fpr, fpr_base, ep_ct.slot_types.s)
elif ti.is_double():
return (regs.fpr, fpr_base, ep_ct.slot_types.d)
else:
return (regs.gpr, gpr_base, ep_ct.slot_types.u64)
pos_wrap = partial(pos_chooser, 4, 12)
# varargs of *any* type are passed in $a0..$a7 on N32 (the second argument to
# the partially-applied va_chooser is unused)
va_wrap = partial(va_chooser, 4, 12)
def get_abi_fn_arg_map(node):
'''c_ast -> fn_sig'''
# non-fp args are returned in $2..$3, fp-args in $f0,$f2
# XXX multi-reg returns are not supported, but this is rare
ret_chooser = partial(pos_chooser, 2, 0)
caster = lambda x: cdecl.castify(x.type)
rtype = utils.items_or_default(
lambda: get_info_for_types([node], caster, ret_chooser)[0],
None) # if void return type, return None
args = [x for (_, x) in node.args.children()]
arg_types = utils.items_or_default(
lambda: list(get_info_for_types(args, caster, pos_wrap)),
[]) # if function takes no args, return empty list
return ep_ct.fn_sig(rtype, arg_types)
def get_args_for_va_function(callee, pos_arg):
'''str -> str -> [(reg_type, slot_type) | None]'''
def get_convs(acc, va_arg):
return acc + [sw[va_arg.type]]
pos_sw = {'printf': (1, printf_sw, printf_parse, va_wrap),
'scanf': (1, scanf_sw, scanf_parse, pos_wrap),
'sscanf': (2, scanf_sw, scanf_parse, pos_wrap)}
try:
(pos, sw, fn, chooser) = pos_sw[callee]
except KeyError:
raise utils.BugError('unrecognized callee %s' % callee)
(_, args) = fn(pos_arg)
convs = reduce(get_convs, args.arg, [])
return list(get_info_for_types(
convs, lambda x: x, chooser, pos=pos, handle_va=True))
| {
"repo_name": "nihilus/epanos",
"path": "decomp/cpu/mips/abi/n32.py",
"copies": "1",
"size": "8197",
"license": "mit",
"hash": 1934216864747196000,
"line_mean": 33.7330508475,
"line_max": 89,
"alpha_frac": 0.6086373063,
"autogenerated": false,
"ratio": 2.866083916083916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39747212223839157,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import chain
from math import ceil
from multiprocessing.dummy import Pool
from .request import request
def parallel(worker):
def wrapper(self, args):
pool = Pool(len(args))
results = pool.map(partial(worker, self), args)
pool.close()
pool.join()
return list(chain.from_iterable(results))
return wrapper
class Collector(object):
MAT_APT_MAX_LIMIT = 2000
def __init__(self, params, count_url, find_url):
self.params = params
self.params['limit'] = self.MAT_APT_MAX_LIMIT
self.count_url = count_url
self.find_url = find_url
def collect(self):
self.params['filter'] = str(self.params.filter)
n_pages = self.count()
if n_pages:
pages = range(1, n_pages + 1)
params = [self.get_find_params(n) for n in pages]
data = self.find(params)
return data
def count(self):
params = self.get_count_params()
response_json = request(self.count_url, params=params)
n_results = float(response_json['data'])
n_pages = ceil(n_results / self.params['limit'])
n_pages = int(n_pages)
return n_pages
@parallel
def find(self, params):
response_json = request(self.find_url, params)
return response_json['data']
def get_count_params(self):
params = self.params.copy()
params.pop('limit', None)
params.pop('page', None)
return params
def get_find_params(self, thread_n):
params = self.params.copy()
params['page'] = thread_n
return params
| {
"repo_name": "AntonSever/umat",
"path": "endpoints/service/collect.py",
"copies": "1",
"size": "1668",
"license": "mit",
"hash": -3699188848073569000,
"line_mean": 27.7586206897,
"line_max": 62,
"alpha_frac": 0.5995203837,
"autogenerated": false,
"ratio": 3.7823129251700682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881833308870068,
"avg_score": 0,
"num_lines": 58
} |
from functools import partial
from itertools import chain
from operator import attrgetter
import collections
import copy
import inspect
import logging
import warnings
from django.apps import apps
from django.conf import settings
from django.core import validators
from django.db.models import options
from django.db.models.base import ModelBase
from django.utils.translation import gettext_lazy as _
import six
from ..compat import (
BaseModel,
ColumnDescriptor,
ModelDefinitionException,
ModelException,
ModelMetaClass,
OrderedDict,
columns,
query,
)
from . import django_field_methods, django_model_methods
from .constants import (
ORDER_BY_ERROR_HELP,
ORDER_BY_WARN,
PK_META_MISSING_HELP,
)
log = logging.getLogger(__name__)
_django_manager_attr_names = ('objects', 'default_manager', '_default_manager',
'base_manager', '_base_manager')
class DjangoCassandraOptions(options.Options):
default_field_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
def __init__(self, *args, **kwargs):
self.model_inst = kwargs.pop('cls')
self._defined_columns = self.model_inst._defined_columns
# Add Django attibutes to Columns
self._give_columns_django_field_attributes()
# Call Django to create _meta object
super(DjangoCassandraOptions, self).__init__(*args, **kwargs)
self._private_fields_name = 'private_fields'
if hasattr(self, 'virtual_fields'):
# Django < 1.10
self._private_fields_name = 'virtual_fields'
# Add Columns as Django Fields
for column in six.itervalues(self._defined_columns):
self.add_field(column)
self.setup_pk()
# Set further _meta attributes explicitly
self.proxy_for_model = self.concrete_model = self.model_inst
self.managed = False
self.swappable = False
def can_migrate(self, *args, **kwargs):
return False
def get_all_related_objects_with_model(self, *args, **kwargs):
return []
@property
def related_objects(self):
return []
def setup_pk(self):
self.pk = self.model_inst._get_explicit_pk_column()
def add_field(self, field, **kwargs):
"""Add each field as a private field."""
getattr(self, self._private_fields_name).append(field)
self._expire_cache(reverse=True)
self._expire_cache(reverse=False)
def _get_fields(self, *args, **kwargs):
fields = six.itervalues(self._defined_columns)
return options.make_immutable_fields_list('get_fields()', fields)
def _set_column_django_attributes(self, cql_column, name):
allow_null = (
(not cql_column.required and
not cql_column.is_primary_key and
not cql_column.partition_key) or cql_column.has_default and not cql_column.required
)
cql_column.error_messages = self.default_field_error_messages
cql_column.empty_values = list(validators.EMPTY_VALUES)
cql_column.db_index = cql_column.index
cql_column.serialize = True
cql_column.unique = cql_column.is_primary_key
cql_column.hidden = False
cql_column.auto_created = False
cql_column.help_text = ''
cql_column.blank = allow_null
cql_column.null = allow_null
cql_column.choices = []
cql_column.flatchoices = []
cql_column.validators = []
cql_column.editable = True
cql_column.concrete = True
cql_column.many_to_many = False
cql_column.many_to_one = False
cql_column.one_to_many = False
cql_column.one_to_one = False
cql_column.is_relation = False
cql_column.remote_field = None
cql_column.unique_for_date = None
cql_column.unique_for_month = None
cql_column.unique_for_year = None
cql_column.db_column = None
cql_column.rel = None
cql_column.attname = name
cql_column.field = cql_column
cql_column.model = self.model_inst
cql_column.name = cql_column.db_field_name
cql_column.verbose_name = cql_column.db_field_name
cql_column._verbose_name = cql_column.db_field_name
cql_column.field.related_query_name = lambda: None
def _give_columns_django_field_attributes(self):
"""
Add Django Field attributes to each cqlengine.Column instance.
So that the Django Options class may interact with it as if it were
a Django Field.
"""
methods_to_add = (
django_field_methods.value_from_object,
django_field_methods.value_to_string,
django_field_methods.get_attname,
django_field_methods.get_cache_name,
django_field_methods.pre_save,
django_field_methods.get_prep_value,
django_field_methods.get_choices,
django_field_methods.get_choices_default,
django_field_methods.save_form_data,
django_field_methods.formfield,
django_field_methods.get_db_prep_value,
django_field_methods.get_db_prep_save,
django_field_methods.db_type_suffix,
django_field_methods.select_format,
django_field_methods.get_internal_type,
django_field_methods.get_attname_column,
django_field_methods.check,
django_field_methods._check_field_name,
django_field_methods._check_db_index,
django_field_methods.deconstruct,
django_field_methods.run_validators,
django_field_methods.clean,
django_field_methods.get_db_converters,
django_field_methods.get_prep_lookup,
django_field_methods.get_db_prep_lookup,
django_field_methods.get_filter_kwargs_for_object,
django_field_methods.set_attributes_from_name,
django_field_methods.db_parameters,
django_field_methods.get_pk_value_on_save,
django_field_methods.get_col,
)
for name, cql_column in six.iteritems(self._defined_columns):
self._set_column_django_attributes(cql_column=cql_column, name=name)
for method in methods_to_add:
try:
method_name = method.func_name
except AttributeError:
# python 3
method_name = method.__name__
new_method = six.create_bound_method(method, cql_column)
setattr(cql_column, method_name, new_method)
class DjangoCassandraModelMetaClass(ModelMetaClass, ModelBase):
def __new__(cls, name, bases, attrs):
parents = [b for b in bases if isinstance(b, DjangoCassandraModelMetaClass)]
if not parents:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
for attr in _django_manager_attr_names:
setattr(cls, attr, None)
# ################################################################
# start code taken from python-driver 3.3.0 ModelMetaClass.__new__
# ################################################################
column_dict = OrderedDict()
primary_keys = OrderedDict()
pk_name = None
# get inherited properties
inherited_columns = OrderedDict()
for base in bases:
for k, v in getattr(base, '_defined_columns', {}).items():
inherited_columns.setdefault(k, v)
# short circuit __abstract__ inheritance
is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False)
# short circuit __discriminator_value__ inheritance
attrs['__discriminator_value__'] = attrs.get('__discriminator_value__')
# TODO __default__ttl__ should be removed in the next major release
options = attrs.get('__options__') or {}
attrs['__default_ttl__'] = options.get('default_time_to_live')
column_definitions = [(k, v) for k, v in attrs.items() if
isinstance(v, columns.Column)]
column_definitions = sorted(column_definitions,
key=lambda x: x[1].position)
is_polymorphic_base = any(
[c[1].discriminator_column for c in column_definitions])
column_definitions = [x for x in
inherited_columns.items()] + column_definitions
discriminator_columns = [c for c in column_definitions if
c[1].discriminator_column]
is_polymorphic = len(discriminator_columns) > 0
if len(discriminator_columns) > 1:
raise ModelDefinitionException(
'only one discriminator_column can be defined in a model, {0} found'.format(
len(discriminator_columns)))
if attrs['__discriminator_value__'] and not is_polymorphic:
raise ModelDefinitionException(
'__discriminator_value__ specified, but no base columns defined with discriminator_column=True')
discriminator_column_name, discriminator_column = \
discriminator_columns[0] if discriminator_columns else (None, None)
if isinstance(discriminator_column,
(columns.BaseContainerColumn, columns.Counter)):
raise ModelDefinitionException(
'counter and container columns cannot be used as discriminator columns')
# find polymorphic base class
polymorphic_base = None
if is_polymorphic and not is_polymorphic_base:
def _get_polymorphic_base(bases):
for base in bases:
if getattr(base, '_is_polymorphic_base', False):
return base
klass = _get_polymorphic_base(base.__bases__)
if klass:
return klass
polymorphic_base = _get_polymorphic_base(bases)
defined_columns = OrderedDict(column_definitions)
# check for primary key
if not is_abstract and not any(
[v.primary_key for k, v in column_definitions]):
raise ModelDefinitionException(
"At least 1 primary key is required.")
counter_columns = [c for c in defined_columns.values() if
isinstance(c, columns.Counter)]
data_columns = [c for c in defined_columns.values() if
not c.primary_key and not isinstance(c,
columns.Counter)]
if counter_columns and data_columns:
raise ModelDefinitionException(
'counter models may not have data columns')
has_partition_keys = any(
v.partition_key for (k, v) in column_definitions)
def _transform_column(col_name, col_obj):
column_dict[col_name] = col_obj
if col_obj.primary_key:
primary_keys[col_name] = col_obj
col_obj.set_column_name(col_name)
# set properties
attrs[col_name] = ColumnDescriptor(col_obj)
partition_key_index = 0
# transform column definitions
for k, v in column_definitions:
# don't allow a column with the same name as a built-in attribute or method
if k in BaseModel.__dict__:
raise ModelDefinitionException(
"column '{0}' conflicts with built-in attribute/method".format(
k))
# counter column primary keys are not allowed
if (v.primary_key or v.partition_key) and isinstance(v,
columns.Counter):
raise ModelDefinitionException(
'counter columns cannot be used as primary keys')
# this will mark the first primary key column as a partition
# key, if one hasn't been set already
if not has_partition_keys and v.primary_key:
v.partition_key = True
has_partition_keys = True
if v.partition_key:
v._partition_key_index = partition_key_index
partition_key_index += 1
overriding = column_dict.get(k)
if overriding:
v.position = overriding.position
v.partition_key = overriding.partition_key
v._partition_key_index = overriding._partition_key_index
_transform_column(k, v)
partition_keys = OrderedDict(
k for k in primary_keys.items() if k[1].partition_key)
clustering_keys = OrderedDict(
k for k in primary_keys.items() if not k[1].partition_key)
if attrs.get('__compute_routing_key__', True):
key_cols = [c for c in partition_keys.values()]
partition_key_index = dict(
(col.db_field_name, col._partition_key_index) for col in
key_cols)
key_cql_types = [c.cql_type for c in key_cols]
key_serializer = staticmethod(
lambda parts, proto_version: [t.to_binary(p, proto_version) for
t, p in
zip(key_cql_types, parts)])
else:
partition_key_index = {}
key_serializer = staticmethod(lambda parts, proto_version: None)
# setup partition key shortcut
if len(partition_keys) == 0:
if not is_abstract:
raise ModelException(
"at least one partition key must be defined")
if len(partition_keys) == 1:
pk_name = [x for x in partition_keys.keys()][0]
attrs['pk'] = attrs[pk_name]
else:
# composite partition key case, get/set a tuple of values
_get = lambda self: tuple(
self._values[c].getval() for c in partition_keys.keys())
_set = lambda self, val: tuple(
self._values[c].setval(v) for (c, v) in
zip(partition_keys.keys(), val))
attrs['pk'] = property(_get, _set)
# some validation
col_names = set()
for v in column_dict.values():
# check for duplicate column names
if v.db_field_name in col_names:
raise ModelException(
"{0} defines the column '{1}' more than once".format(name,
v.db_field_name))
if v.clustering_order and not (
v.primary_key and not v.partition_key):
raise ModelException(
"clustering_order may be specified only for clustering primary keys")
if v.clustering_order and v.clustering_order.lower() not in (
'asc', 'desc'):
raise ModelException(
"invalid clustering order '{0}' for column '{1}'".format(
repr(v.clustering_order), v.db_field_name))
col_names.add(v.db_field_name)
# create db_name -> model name map for loading
db_map = {}
for col_name, field in column_dict.items():
db_field = field.db_field_name
if db_field != col_name:
db_map[db_field] = col_name
# add management members to the class
attrs['_columns'] = column_dict
attrs['_primary_keys'] = primary_keys
attrs['_defined_columns'] = defined_columns
# maps the database field to the models key
attrs['_db_map'] = db_map
attrs['_pk_name'] = pk_name
attrs['_dynamic_columns'] = {}
attrs['_partition_keys'] = partition_keys
attrs['_partition_key_index'] = partition_key_index
attrs['_key_serializer'] = key_serializer
attrs['_clustering_keys'] = clustering_keys
attrs['_has_counter'] = len(counter_columns) > 0
# add polymorphic management attributes
attrs['_is_polymorphic_base'] = is_polymorphic_base
attrs['_is_polymorphic'] = is_polymorphic
attrs['_polymorphic_base'] = polymorphic_base
attrs['_discriminator_column'] = discriminator_column
attrs['_discriminator_column_name'] = discriminator_column_name
attrs['_discriminator_map'] = {} if is_polymorphic_base else None
# setup class exceptions
DoesNotExistBase = None
for base in bases:
DoesNotExistBase = getattr(base, 'DoesNotExist', None)
if DoesNotExistBase is not None:
break
DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist',
BaseModel.DoesNotExist)
attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), {})
MultipleObjectsReturnedBase = None
for base in bases:
MultipleObjectsReturnedBase = getattr(base,
'MultipleObjectsReturned',
None)
if MultipleObjectsReturnedBase is not None:
break
MultipleObjectsReturnedBase = MultipleObjectsReturnedBase or attrs.pop(
'MultipleObjectsReturned', BaseModel.MultipleObjectsReturned)
attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned',
(MultipleObjectsReturnedBase,),
{})
# create the class and add a QuerySet to it
klass = super(ModelBase, cls).__new__(cls, name, bases, attrs)
udts = []
for col in column_dict.values():
columns.resolve_udts(col, udts)
# for user_type in set(udts):
# user_type.register_for_keyspace(klass._get_keyspace())
# ################################################################
# end code taken from python-driver 3.3.0 ModelMetaClass.__new__
# ################################################################
klass._deferred = False
if not is_abstract:
klass = cls._add_django_meta_and_register_model(
klass=klass,
attrs=attrs,
name=name
)
return klass
def add_to_class(cls, name, value):
django_meta_default_names = options.DEFAULT_NAMES
# patch django so Meta.get_pk_field can be specified these models
options.DEFAULT_NAMES = django_meta_default_names + ('get_pk_field',)
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
try:
setattr(cls, name, value)
except AttributeError:
raise AttributeError('failed to set attribute {}'.format(name))
options.DEFAULT_NAMES = django_meta_default_names
@classmethod
def _add_django_meta_and_register_model(cls, klass, attrs, name):
# Create the class.
module = attrs.get('__module__')
if not module:
return klass
new_class = klass
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
if meta:
meta.managed = False
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
# Add _meta/Options attribute to the model
new_class.add_to_class(
'_meta', DjangoCassandraOptions(meta, app_label, cls=new_class))
# Add manager to the model
for manager_attr in _django_manager_attr_names:
new_class.add_to_class(manager_attr, new_class.objects)
# Register the model
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
@classmethod
def check(cls, **kwargs):
errors = []
return errors
def convert_pk_field_names_to_real(model, field_names):
"""
Convert field names including 'pk' to the real field names:
>>> convert_pk_field_names_to_real(['pk', 'another_field'])
['real_pk_field', 'another_field']
"""
pk_field_names = tuple(f.name for f in model._get_primary_key_columns())
def append_field(field_name):
if field_name not in real_field_names:
real_field_names.append(field_name)
real_field_names = []
for name in field_names:
if name == 'pk':
for real_pk_field_name in pk_field_names:
append_field(real_pk_field_name)
elif name == '-pk':
for real_pk_field_name in pk_field_names:
append_field('-' + real_pk_field_name)
else:
append_field(name)
return real_field_names
class ReadOnlyDjangoCassandraQuerySet(list):
name = 'objects'
use_in_migrations = False
def __init__(self, data, model_class):
if not isinstance(data, collections.Iterable):
raise TypeError(
'ReadOnlyDjangoCassandraQuerySet requires iterable data')
super(ReadOnlyDjangoCassandraQuerySet, self).__init__(data)
self.model = model_class
self.query = StubQuery(model=self.model)
@property
def objects(self):
return self
def first(self):
return next(iter(self), None)
def _clone(self):
return copy.deepcopy(self)
def all(self):
return self
def get_queryset(self):
return self
def count(self):
return len(self)
def exists(self):
return len(self) > 0
def values_list(self, *fields, **kwargs):
fields = convert_pk_field_names_to_real(model=self.model,
field_names=fields)
values_list = []
for model_record in self:
values_list_item = []
for field_name in fields:
values_list_item.append(model_record[field_name])
values_list.append(values_list_item)
if kwargs.get('flat') is True:
values_list = list(chain.from_iterable(values_list))
return values_list
def _raise_not_implemented(self, method_name):
raise NotImplementedError(
'You cannot .{}() on a DjangoCassandraQuerySet which '
'has been ordered using python'.format(method_name)
)
def filter(self, **kwargs):
self._raise_not_implemented(method_name='filter')
def get(self, **kwargs):
self._raise_not_implemented(method_name='get')
def distinct(self, *args, **kwargs):
self._raise_not_implemented(method_name='distinct')
def limit(self, *args, **kwargs):
self._raise_not_implemented(method_name='limit')
def only(self, *args, **kwargs):
self._raise_not_implemented(method_name='only')
def create(self, *args, **kwargs):
self._raise_not_implemented(method_name='create')
def delete(self, *args, **kwargs):
self._raise_not_implemented(method_name='delete')
def defer(self, *args, **kwargs):
self._raise_not_implemented(method_name='defer')
def exclude(self, *args, **kwargs):
self._raise_not_implemented(method_name='defer')
class StubQuery(object):
def __init__(self, model):
self.model = model
self.order_by = ['pk']
@property
def select_related(self):
return False
def add_context(self, *args, **kwargs):
pass
def get_context(self, *args, **kwargs):
return {}
def get_meta(self):
return self.model._meta
def _prepare(self, field):
return self
class DjangoCassandraQuerySet(query.ModelQuerySet):
name = 'objects'
use_in_migrations = False
def __init__(self, *args, **kwargs):
super(query.ModelQuerySet, self).__init__(*args, **kwargs)
self._allow_filtering = True
self.query = StubQuery(model=self.model)
def _select_fields(self):
if self._defer_fields or self._only_fields:
fields = self.model._columns.keys()
if self._defer_fields:
fields = [f for f in fields if f not in self._defer_fields]
elif self._only_fields:
fields = self._only_fields
return [self.model._columns[f].db_field_name for f in fields]
return super(query.ModelQuerySet, self)._select_fields()
def count(self):
if self._count is None:
self._count = super(query.ModelQuerySet, self).count()
return self._count
def get_queryset(self):
if len(self._where) > 0:
return super(query.ModelQuerySet, self).filter()
else:
return super(query.ModelQuerySet, self).all()
def exclude(self, *args, **kwargs):
new_queryset = []
for model in self.get_queryset():
should_exclude_model = False
for field_name, field_value in six.iteritems(kwargs):
if getattr(model, field_name) == field_value:
should_exclude_model = True
break
if not should_exclude_model:
new_queryset.append(model)
return ReadOnlyDjangoCassandraQuerySet(
new_queryset, model_class=self.model)
def python_order_by(self, qset, colnames):
if not isinstance(qset, list):
raise TypeError('qset must be a list')
colnames = convert_pk_field_names_to_real(model=self.model,
field_names=colnames)
any_cols_revesed = any(c.startswith('-') for c in colnames)
if any_cols_revesed:
for col in colnames:
should_reverse = col.startswith('-')
if should_reverse:
col = col[1:]
qset.sort(key=attrgetter(col), reverse=should_reverse)
else:
new_colnames = []
for col in colnames:
if col == 'pk':
pk_cols = self.model._get_primary_key_column_names()
for pk_name in pk_cols:
new_colnames.append(pk_name)
else:
new_colnames.append(col)
try:
qset.sort(key=attrgetter(*new_colnames))
except AttributeError:
msg = 'Can\'t resolve one of column names: {}'.format(
*new_colnames)
raise query.QueryException(msg)
return ReadOnlyDjangoCassandraQuerySet(qset, model_class=self.model)
def exists(self):
return self.count() > 0
def get(self, *args, **kwargs):
obj = super(DjangoCassandraQuerySet, self).get(*args, **kwargs)
obj.pk = getattr(obj, obj._get_explicit_pk_column().name)
return obj
def order_by(self, *colnames):
if len(colnames) == 0:
clone = copy.deepcopy(self)
clone._order = []
return clone
order_using_python = False
conditions = []
for col in colnames:
try:
if hasattr(col, 'resolve_expression'):
warnings.warn('Sorting by Django DB Expressions is not supported')
continue
conditions.append('"{0}" {1}'.format(
*self._get_ordering_condition(col))
)
except query.QueryException as exc:
order_by_exception = (
'Can\'t order' in str(exc) or
'Can\'t resolve the column name' in str(exc)
)
if order_by_exception:
order_using_python = settings.CASSANDRA_FALLBACK_ORDER_BY_PYTHON
if order_using_python:
log.debug('ordering in python column "%s"', col)
msg = ORDER_BY_WARN.format(col=col, exc=exc)
warnings.warn(msg)
else:
raise query.QueryException(
'{exc}\n\n'
'{help}'.format(exc=exc, help=ORDER_BY_ERROR_HELP))
else:
raise exc
clone = copy.deepcopy(self)
if order_using_python is True:
return self.python_order_by(qset=list(clone), colnames=colnames)
else:
clone._order.extend(conditions)
return clone
def values_list(self, *fields, **kwargs):
if 'pk' in fields:
fields = convert_pk_field_names_to_real(
model=self.model, field_names=fields)
super_values_list = super(DjangoCassandraQuerySet, self).values_list
return super_values_list(*fields, **kwargs)
def _clone(self):
return copy.deepcopy(self)
def iterator(self, *args, **kwargs):
return super(query.ModelQuerySet, self).all()
class DjangoCassandraModel(
six.with_metaclass(DjangoCassandraModelMetaClass, BaseModel)
):
__queryset__ = DjangoCassandraQuerySet
__abstract__ = True
__table_name__ = None
__table_name_case_sensitive__ = False
__keyspace__ = None
__options__ = None
__discriminator_value__ = None
__compute_routing_key__ = True
def __init__(self, *args, **kwargs):
super(DjangoCassandraModel, self).__init__(*args, **kwargs)
methods = inspect.getmembers(django_model_methods, inspect.isfunction)
for method_name, method in methods:
new_method = partial(method, self)
setattr(self, method_name, new_method)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
@classmethod
def get(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'get\'')
@classmethod
def filter(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'filter\'')
@classmethod
def all(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'all\'')
@classmethod
def _get_primary_key_columns(cls):
return tuple(c for c in six.itervalues(cls._columns)
if c.is_primary_key is True)
@classmethod
def _get_primary_key_column_names(cls):
return tuple(c.name for c in cls._get_primary_key_columns())
@classmethod
def _get_column(cls, name):
"""
Based on cqlengine.models.BaseModel._get_column.
But to work with 'pk'
"""
if name == 'pk':
return cls._meta.get_field(cls._meta.pk.name)
return cls._columns[name]
@classmethod
def _get_explicit_pk_column(cls):
try:
if len(cls._primary_keys) > 1:
try:
pk_field = cls.Meta.get_pk_field
except AttributeError:
raise RuntimeError(PK_META_MISSING_HELP.format(cls))
return cls._primary_keys[pk_field]
else:
return list(six.itervalues(cls._primary_keys))[0]
except IndexError:
return None
| {
"repo_name": "r4fek/django-cassandra-engine",
"path": "django_cassandra_engine/models/__init__.py",
"copies": "1",
"size": "32499",
"license": "bsd-2-clause",
"hash": -1024666503662155800,
"line_mean": 36.3551724138,
"line_max": 112,
"alpha_frac": 0.5651250808,
"autogenerated": false,
"ratio": 4.293130779392338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5358255860192338,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import chain
import operator as op
import random
import numpy as np
def sanitize_table_name(table_name):
return table_name.replace('\'', '\'\'')
# benchmark => "'benchmark'"
# 100 => "100"
def quote(x):
if type(x) in [str, unicode]: return "\'{}\'".format(x)
else: return str(x)
# "\infty" => "\infty"
# 3.141592654 => "3.14"
def formatter(x, d=0):
if type(x) is str: return x
else: return "{0:,.{1}f}".format(x, d)
# initialize dic[k] only if key k is not bound
# i.e., preserve the previous one if mappings for k already exist
def init_k(dic, k):
if k not in dic: dic[k] = {}
# make a new entry of list type or append the given item
# e.g., {x: [1]}, x, 2 => {x: [1,2]}
# {x: [1]}, y, 2 => {x: [1], y: [2]}
def mk_or_append(dic, k, v, uniq=False):
if k in dic: # already bound key
if not uniq: # or v not in dic[k]: # uniq => value v not recorded
dic[k].append(v)
else: # new occurence of key k
dic[k] = [v]
# ~ List.split in OCaml
# transform a list of pairs into a pair of lists
# e.g., [ (1, 'a'), (2, 'b'), (3, 'c') ] -> ([1, 2, 3], ['a', 'b', 'c'])
def split(lst):
if not lst: return ([], [])
else:
try:
a, b = zip(*lst) # zip doesn't handle an empty list
return (list(a), list(b))
except ValueError: # [ (1, ), (2, ) ] -> [1, 2]
return list(zip(*lst)[0])
except TypeError: # already 1d list
return lst
# ~ List.flatten in OCaml
# e.g., [ [1], [2, 3], [4] ] -> [1, 2, 3, 4]
def flatten(lstlst):
return list(chain.from_iterable(lstlst))
# remove anything that is evaluated to False, such as None, 0, or empty string
# e.g., [0, 1, 2, None, 3] -> [1, 2, 3]
# ["a", "", "z"] => ["a", "z"]
def ffilter(lst):
return filter(None, lst)
# transform a list of pairs into a dict
def to_dict(lst):
if not lst: return {}
else: return { k: v for k, v in lst }
# http://stackoverflow.com/questions/9807634/find-all-occurences-of-a-key-in-nested-python-dictionaries-and-lists
def gen_dict_extract(key, var):
if hasattr(var,'iteritems'):
for k, v in var.iteritems():
if k == key:
yield v
if isinstance(v, dict):
for result in gen_dict_extract(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in gen_dict_extract(key, d):
yield result
def find_all(dic, k):
return flatten(list(gen_dict_extract(k, dic)))
# calculate percentiles
# default: quartiles: 0%, 25%, 50%, 75%, 100%
def calc_percentile(lst, ps=[0, 25, 50, 75, 100]):
_lst = split(lst)
a = np.array(_lst)
f = partial(np.percentile, a)
return map(f, ps)
# calculate semi-interquartile range
def calc_siqr(lst):
_, q1, q2, q3, _ = calc_percentile(lst)
siqr = (q3 - q1) / 2
return q2, siqr
# sort both lists according to the order of the 1st list
def sort_both(l1, l2):
return zip(*sorted(zip(l1, l2), key=op.itemgetter(0)))
# merge Succeed/Failed data
def merge_succ_fail(data, succ_weight=-1):
_merged = {}
for b in data:
_merged[b] = {}
for d in data[b]:
_merged[b][d] = {}
_merged[b][d]["ttime"] = []
_max = 0
if "Succeed" in data[b][d]:
for t in data[b][d]["Succeed"]:
_merged[b][d]["ttime"].append( (True, t) )
if t > _max: _max = t
if "Failed" in data[b][d]:
for t in data[b][d]["Failed"]:
_merged[b][d]["ttime"].append( (False, t) )
if t > _max: _max = t
random.shuffle(_merged[b][d])
# add a default succ case (to avoid zero division)
if "Succeed" not in data[b][d]:
if succ_weight > 0:
_merged[b][d]["ttime"].append( (True, _max * succ_weight) )
for k in data[b][d]:
if k in ["Succeed", "Failed"]: continue
_merged[b][d][k] = data[b][d][k]
return _merged
| {
"repo_name": "plum-umd/adaptive-concretization",
"path": "util.py",
"copies": "1",
"size": "3856",
"license": "mit",
"hash": 8051133744522881000,
"line_mean": 25.4109589041,
"line_max": 113,
"alpha_frac": 0.5692427386,
"autogenerated": false,
"ratio": 2.820775420629115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38900181592291144,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import chain
import factory
from pycroft.model.user import Membership, PropertyGroup
from .base import BaseFactory
from .user import UserFactory
class MembershipFactory(BaseFactory):
class Meta:
model = Membership
begins_at = None
ends_at = None
user = factory.SubFactory(UserFactory)
# note: group is non-nullable!
group = None
def _maybe_append_seq(n, prefix):
"""Append a sequence value to a prefix if non-zero"""
if not n:
return prefix
return "{} {}".format(prefix, n)
class PropertyGroupFactory(BaseFactory):
class Meta:
model = PropertyGroup
exclude = ('granted', 'denied')
granted = frozenset()
denied = frozenset()
name = factory.Sequence(lambda n: "Property group %s" % n)
@factory.lazy_attribute
def property_grants(self):
return dict(chain(((k, True) for k in self.granted),
((k, False) for k in self.denied)))
class AdminPropertyGroupFactory(PropertyGroupFactory):
name = factory.Sequence(partial(_maybe_append_seq, prefix="Admin-Gruppe"))
granted = frozenset((
'user_show', 'user_change', 'user_mac_change',
'finance_show', 'finance_change',
'infrastructure_show', 'infrastructure_change',
'facilities_show', 'facilities_change',
'groups_show', 'groups_change_membership', 'groups_change',
))
class MemberPropertyGroupFactory(PropertyGroupFactory):
name = factory.Sequence(partial(_maybe_append_seq, prefix="Mitglied-Gruppe"))
granted = frozenset((
'ldap', 'ldap_login_enabled', 'mail', 'member', 'membership_fee',
'network_access', 'userdb', 'userwww'
))
| {
"repo_name": "lukasjuhrich/pycroft",
"path": "tests/factories/property.py",
"copies": "1",
"size": "1737",
"license": "apache-2.0",
"hash": 4139817203823906000,
"line_mean": 27.4754098361,
"line_max": 81,
"alpha_frac": 0.6586067933,
"autogenerated": false,
"ratio": 3.8859060402684564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5044512833568456,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import chain, tee
import logging
import numpy as np
log = logging.getLogger(__name__)
PI2 = 2 * np.pi
def pairwise(iterable):
one, two = tee(iterable)
next(two, None)
return zip(one, two)
def grouper(iterable, n):
return zip(*([iter(iterable)] * n))
def test_split_vertical():
i, j = split_vertical([[1, 2], [3, 4]])
assert i.tolist() == [[1], [3]]
assert j.tolist() == [[2], [4]]
def split_vertical(mat):
mat = np.asarray(mat)
half = mat.shape[1] / 2
return mat[:, :half], mat[:, half:]
def test_iconcatenate_pairs():
pairs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert [list(r) for r in iconcatenate_pairs(pairs)] == \
[
[1, 2, 3, 4, 5, 6],
[4, 5, 6, 7, 8, 9],
]
def iconcatenate_pairs(items):
for pair in pairwise(items):
yield np.concatenate(pair)
def is_power_of_two(val):
return val and val & (val - 1) == 0
def gen_halfs(arrays, size):
halfsize = size // 2
for array in arrays:
pair = split_array(array, halfsize)
for j in filter(len, pair):
yield j
def test_gen_halfs():
d = [ [1,2,3,4], [5,6,7], ]
assert list(gen_halfs(d, 4)) == [[1, 2], [3, 4], [5, 6], [7]]
def split_array(array, where):
return array[:where], array[where:]
def map_only_last(fn, iterable):
items = iter(iterable)
last = next(items)
for elem in items:
yield last
last = elem
yield fn(last)
def test_map_only_last():
mapped = map_only_last(lambda x: x+1, range(3))
assert list(mapped) == [0, 1, 3]
class NumpyPadder(object):
def __init__(self, size):
self.size = size
def __call__(self, array):
self.original_size = len(array)
self.pad_size = self.size - self.original_size
if self.pad_size == 0:
return array
elif self.pad_size > 0:
return np.pad(array, (0, self.pad_size), 'constant')
assert False # Should never come here
raise Exception('Pad size < 0')
class BaseWaveletBox(object):
def __init__(self, nsamples, samplerate, scale_resolution, omega0):
if not is_power_of_two(nsamples):
raise Exception(u'nsamples must be power of two')
self.nsamples = nsamples
self.omega0 = omega0
self.scales = autoscales(nsamples, samplerate,
scale_resolution, omega0)
self.angular_frequencies = angularfreq(nsamples, samplerate)
@property
def frequencies(self):
# Set coefficient in accordance with wavelet type
return 11 * (self.omega0 / 70) / self.scales
def sound_apply_cwt(self, sound, progressbar, **kwargs):
blocks = sound.get_blocks(self.nsamples)
# blocks = sound.get_blocks(self.nsamples//2)
with progressbar(blocks) as blocks_:
return self._apply_cwt(blocks_, progressbar, **kwargs)
def _apply_cwt(self, blocks, progressbar, decimate, **kwargs):
half_nsamples = self.nsamples // 2
chunks = gen_halfs(blocks, self.nsamples)
padder = NumpyPadder(half_nsamples)
equal_sized_pieces = map_only_last(padder, chunks)
zero_pad = np.zeros(half_nsamples)
overlapped_blocks = iconcatenate_pairs(
chain([zero_pad], equal_sized_pieces, [zero_pad])
)
windowed_pieces = overlapped_blocks * np.hanning(self.nsamples)
complex_images = [
self.cwt(windowed_piece, decimate, **kwargs)
for windowed_piece in windowed_pieces
]
halfs = chain.from_iterable(map(split_vertical, complex_images))
next(halfs)
overlapped_halfs = [left + right for left, right in grouper(halfs, 2)]
# Cut pad size from last
last_image_size = padder.original_size // decimate
overlapped_halfs[-1] = overlapped_halfs[-1][:, :last_image_size]
return np.concatenate(overlapped_halfs, axis=1)
def angularfreq(nsamples, samplerate):
""" Compute angular frequencies """
angfreq = np.arange(nsamples, dtype=np.float32)
angfreq[-nsamples/2 + 1:] -= nsamples
angfreq *= samplerate * PI2 / nsamples
return angfreq
# Чем больше, тем больше октав снизу будет отброшено
LOWER_FQ_LIMIT_COEFF = 0.5
def autoscales(samples_count, samplerate, scale_resolution, omega0):
""" Compute scales as fractional power of two """
# morle_samples - количество отсчетов для базового вейвлета
morle_samples = (omega0 + np.sqrt(2 + omega0 ** 2)) / PI2
# scale - измеряется в секундах
minimal_scale = morle_samples / samplerate
# сколько базовых вейвлетов поместится (диапазон частот)
freq_interval = samples_count / morle_samples
skip_n_lower_octaves = LOWER_FQ_LIMIT_COEFF * samples_count / samplerate
skipped_low_freq_interval = max(1, 2**skip_n_lower_octaves)
visible_freq_interval = freq_interval / skipped_low_freq_interval
maximal_scale = np.log2(visible_freq_interval)
indexes_count = int(np.floor(maximal_scale / scale_resolution))
indexes = np.arange(indexes_count + 1, dtype=np.float32)
logarithmic_indexes = 2 ** (indexes * scale_resolution)
return minimal_scale * logarithmic_indexes
| {
"repo_name": "ivanovwaltz/wavelet_sound_microscope",
"path": "analyze/wavelet/base.py",
"copies": "1",
"size": "5440",
"license": "mit",
"hash": 4357567612382354400,
"line_mean": 25.475,
"line_max": 78,
"alpha_frac": 0.6171860246,
"autogenerated": false,
"ratio": 3.1820913461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9294589870753847,
"avg_score": 0.0009375,
"num_lines": 200
} |
from functools import partial
from itertools import count
class MemoizedGenerator(object):
"""Memoize a generator to avoid computing any term more than once.
"""
def __init__(self, gen):
# The underlying generator
self.__gen = gen
# Memoization fields
self.__cache = []
self.__iter = None
self.__empty = False
def __call__(self, *args, **kwargs):
"""Make instances of this class callable.
This method must be present, and must be a generator
function, so that class instances work the same as their
underlying generators.
"""
if not (self.__empty or self.__iter):
self.__iter = self.__gen(*args, **kwargs)
for n in count():
# First check the cache
if n < len(self.__cache):
yield self.__cache[n]
# See if another copy of the generator emptied it
# since our last iteration
elif self.__empty:
break
# If none of the above, advance the generator
# (which may empty it)
else:
try:
term = next(self.__iter)
except StopIteration:
self.__empty = True
break
else:
self.__cache.append(term)
yield term
# This creates a decorator that works if applied to a method
# (the above will only work on an ordinary generator function)
# -- requires the Delayed Decorator recipe at
# http://code.activestate.com/recipes/577993-delayed-decorator/
memoize_generator = partial(DelayedDecorator, MemoizedGenerator)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577992_Memoize_Generator/recipe-577992.py",
"copies": "1",
"size": "1723",
"license": "mit",
"hash": -2767489896494934500,
"line_mean": 33.46,
"line_max": 70,
"alpha_frac": 0.5548461985,
"autogenerated": false,
"ratio": 4.894886363636363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01082828282828283,
"num_lines": 50
} |
from functools import partial
from itertools import cycle
import logging
import numpy as np
import os
import pickle
from scipy import sparse as sp
import tensorflow as tf
from .errors import (
ModelNotBiasedException, ModelNotFitException, ModelWithoutAttentionException, BatchNonSparseInputException,
TfVersionException
)
from .input_utils import create_tensorrec_iterator, get_dimensions_from_tensorrec_dataset
from .loss_graphs import AbstractLossGraph, RMSELossGraph
from .prediction_graphs import AbstractPredictionGraph, DotProductPredictionGraph
from .recommendation_graphs import (
project_biases, split_sparse_tensor_indices, bias_prediction_dense, bias_prediction_serial, rank_predictions,
densify_sampled_item_predictions, collapse_mixture_of_tastes, predict_similar_items
)
from .representation_graphs import AbstractRepresentationGraph, LinearRepresentationGraph
from .session_management import get_session
from .util import sample_items, calculate_batched_alpha, datasets_from_raw_input
class TensorRec(object):
def __init__(self,
n_components=100,
n_tastes=1,
user_repr_graph=LinearRepresentationGraph(),
item_repr_graph=LinearRepresentationGraph(),
attention_graph=None,
prediction_graph=DotProductPredictionGraph(),
loss_graph=RMSELossGraph(),
biased=True,):
"""
A TensorRec recommendation model.
:param n_components: Integer
The dimension of a single output of the representation function. Must be >= 1.
:param n_tastes: Integer
The number of tastes/reprs to be calculated for each user. Must be >= 1.
:param user_repr_graph: AbstractRepresentationGraph
An object which inherits AbstractRepresentationGraph that contains a method to calculate user representations.
See tensorrec.representation_graphs for examples.
:param item_repr_graph: AbstractRepresentationGraph
An object which inherits AbstractRepresentationGraph that contains a method to calculate item representations.
See tensorrec.representation_graphs for examples.
:param attention_graph: AbstractRepresentationGraph or None
Optional. An object which inherits AbstractRepresentationGraph that contains a method to calculate user
attention. Any valid repr_graph is also a valid attention graph. If None, no attention process will be applied.
:param prediction_graph: AbstractPredictionGraph
An object which inherits AbstractPredictionGraph that contains a method to calculate predictions from a pair of
user/item reprs.
See tensorrec.prediction_graphs for examples.
:param loss_graph: AbstractLossGraph
An object which inherits AbstractLossGraph that contains a method to calculate the loss function.
See tensorrec.loss_graphs for examples.
:param biased: bool
If True, a bias value will be calculated for every user feature and item feature.
"""
# Check TensorFlow version
major, minor, patch = tf.__version__.split(".")
if int(major) < 1 or int(major) == 1 and int(minor) < 7:
raise TfVersionException(tf_version=tf.__version__)
# Arg Check
if (n_components is None) or (n_tastes is None) or (user_repr_graph is None) or (item_repr_graph is None) \
or (prediction_graph is None) or (loss_graph is None):
raise ValueError("All arguments to TensorRec() must be non-None")
if n_components < 1:
raise ValueError("n_components must be >= 1")
if n_tastes < 1:
raise ValueError("n_tastes must be >= 1")
if not isinstance(user_repr_graph, AbstractRepresentationGraph):
raise ValueError("user_repr_graph must inherit AbstractRepresentationGraph")
if not isinstance(item_repr_graph, AbstractRepresentationGraph):
raise ValueError("item_repr_graph must inherit AbstractRepresentationGraph")
if not isinstance(prediction_graph, AbstractPredictionGraph):
raise ValueError("prediction_graph must inherit AbstractPredictionGraph")
if not isinstance(loss_graph, AbstractLossGraph):
raise ValueError("loss_graph must inherit AbstractLossGraph")
if attention_graph is not None:
if not isinstance(attention_graph, AbstractRepresentationGraph):
raise ValueError("attention_graph must be None or inherit AbstractRepresentationGraph")
if n_tastes == 1:
raise ValueError("attention_graph must be None if n_tastes == 1")
self.n_components = n_components
self.n_tastes = n_tastes
self.user_repr_graph_factory = user_repr_graph
self.item_repr_graph_factory = item_repr_graph
self.attention_graph_factory = attention_graph
self.prediction_graph_factory = prediction_graph
self.loss_graph_factory = loss_graph
self.biased = biased
# A list of the attr names of every graph hook attr
self.graph_tensor_hook_attr_names = [
# Top-level API nodes
'tf_user_representation', 'tf_item_representation', 'tf_prediction_serial', 'tf_prediction', 'tf_rankings',
'tf_predict_similar_items', 'tf_rank_similar_items',
# Training nodes
'tf_basic_loss', 'tf_weight_reg_loss', 'tf_loss',
# Feed placeholders
'tf_learning_rate', 'tf_alpha', 'tf_sample_indices', 'tf_n_sampled_items', 'tf_similar_items_ids',
]
if self.biased:
self.graph_tensor_hook_attr_names += ['tf_projected_user_biases', 'tf_projected_item_biases']
if self.attention_graph_factory is not None:
self.graph_tensor_hook_attr_names += ['tf_user_attention_representation']
self.graph_operation_hook_attr_names = [
# AdamOptimizer
'tf_optimizer',
]
self.graph_iterator_hook_attr_names = [
# Input data iterators
'tf_user_feature_iterator', 'tf_item_feature_iterator', 'tf_interaction_iterator',
]
# Calling the break routine during __init__ creates all the attrs on the TensorRec object with an initial value
# of None
self._break_graph_hooks()
# A map of every graph hook attr name to the node name after construction
# Tensors and operations are stored separated because they are handled differently by TensorFlow
self.graph_tensor_hook_node_names = {}
self.graph_operation_hook_node_names = {}
self.graph_iterator_hook_node_names = {}
def _break_graph_hooks(self):
for graph_tensor_hook_attr_name in self.graph_tensor_hook_attr_names:
self.__setattr__(graph_tensor_hook_attr_name, None)
for graph_operation_hook_attr_name in self.graph_operation_hook_attr_names:
self.__setattr__(graph_operation_hook_attr_name, None)
for graph_iterator_hook_attr_name in self.graph_iterator_hook_attr_names:
self.__setattr__(graph_iterator_hook_attr_name, None)
def _attach_graph_hooks(self):
session = get_session()
for graph_tensor_hook_attr_name in self.graph_tensor_hook_attr_names:
graph_tensor_hook_node_name = self.graph_tensor_hook_node_names[graph_tensor_hook_attr_name]
node = session.graph.get_tensor_by_name(name=graph_tensor_hook_node_name)
self.__setattr__(graph_tensor_hook_attr_name, node)
for graph_operation_hook_attr_name in self.graph_operation_hook_attr_names:
graph_operation_hook_node_name = self.graph_operation_hook_node_names[graph_operation_hook_attr_name]
node = session.graph.get_operation_by_name(name=graph_operation_hook_node_name)
self.__setattr__(graph_operation_hook_attr_name, node)
for graph_iterator_hook_attr_name in self.graph_iterator_hook_attr_names:
iterator_resource_name, output_types, output_shapes, output_classes = \
self.graph_iterator_hook_node_names[graph_iterator_hook_attr_name]
iterator_resource = session.graph.get_tensor_by_name(name=iterator_resource_name)
iterator = tf.data.Iterator(iterator_resource, None, output_types, output_shapes, output_classes)
self.__setattr__(graph_iterator_hook_attr_name, iterator)
def _record_graph_hook_names(self):
# Record serializable node names/info for each graph hook
for graph_tensor_hook_attr_name in self.graph_tensor_hook_attr_names:
hook = self.__getattribute__(graph_tensor_hook_attr_name)
self.graph_tensor_hook_node_names[graph_tensor_hook_attr_name] = hook.name
for graph_operation_hook_attr_name in self.graph_operation_hook_attr_names:
hook = self.__getattribute__(graph_operation_hook_attr_name)
self.graph_operation_hook_node_names[graph_operation_hook_attr_name] = hook.name
for graph_iterator_hook_attr_name in self.graph_iterator_hook_attr_names:
hook = self.__getattribute__(graph_iterator_hook_attr_name)
iterator_resource_name = hook._iterator_resource.name
output_types = hook.output_types
output_shapes = hook.output_shapes
output_classes = hook.output_classes
self.graph_iterator_hook_node_names[graph_iterator_hook_attr_name] = (
iterator_resource_name, output_types, output_shapes, output_classes
)
def _create_batched_dataset_initializers(self, interactions, user_features, item_features, user_batch_size=None):
if user_batch_size is not None:
# Raise exception if interactions and user_features aren't sparse matrices
if (not sp.issparse(interactions)) or (not sp.issparse(user_features)):
raise BatchNonSparseInputException()
# Coerce to CSR for fast batching
if not isinstance(interactions, sp.csr_matrix):
interactions = sp.csr_matrix(interactions)
if not isinstance(user_features, sp.csr_matrix):
user_features = sp.csr_matrix(user_features)
n_users = user_features.shape[0]
interactions_batched = []
user_features_batched = []
start_batch = 0
while start_batch < n_users:
# min() ensures that the batch bounds doesn't go past the end of the index
end_batch = min(start_batch + user_batch_size, n_users)
interactions_batched.append(interactions[start_batch:end_batch])
user_features_batched.append(user_features[start_batch:end_batch])
start_batch = end_batch
# Overwrite the input with the new, batched input
interactions = interactions_batched
user_features = user_features_batched
# TODO this is hand-wavy and begging for a cleaner refactor
(int_ds, uf_ds, if_ds), (int_init, uf_init, if_init) = self._create_datasets_and_initializers(
interactions=interactions, user_features=user_features, item_features=item_features
)
# Ensure that lengths make sense
if len(int_init) != len(uf_init):
raise ValueError('Number of batches in user_features and interactions must be equal.')
if (len(if_init) > 1) and (len(if_init) != len(uf_init)):
raise ValueError('Number of batches in item_features must be 1 or equal to the number of batches in '
'user_features.')
# Cycle item features when zipping because there should only be one
datasets = [ds_set for ds_set in zip(int_ds, uf_ds, cycle(if_ds))]
initializers = [init_set for init_set in zip(int_init, uf_init, cycle(if_init))]
return datasets, initializers
def _create_datasets_and_initializers(self, interactions=None, user_features=None, item_features=None):
datasets = []
initializers = []
if interactions is not None:
interactions_datasets = datasets_from_raw_input(raw_input=interactions)
interactions_initializers = [self.tf_interaction_iterator.make_initializer(dataset)
for dataset in interactions_datasets]
datasets.append(interactions_datasets)
initializers.append(interactions_initializers)
if user_features is not None:
user_features_datasets = datasets_from_raw_input(raw_input=user_features)
user_features_initializers = [self.tf_user_feature_iterator.make_initializer(dataset)
for dataset in user_features_datasets]
datasets.append(user_features_datasets)
initializers.append(user_features_initializers)
if item_features is not None:
item_features_datasets = datasets_from_raw_input(raw_input=item_features)
item_features_initializers = [self.tf_item_feature_iterator.make_initializer(dataset)
for dataset in item_features_datasets]
datasets.append(item_features_datasets)
initializers.append(item_features_initializers)
return datasets, initializers
def _build_input_iterators(self):
self.tf_user_feature_iterator = create_tensorrec_iterator(name='tf_user_feature_iterator')
self.tf_item_feature_iterator = create_tensorrec_iterator(name='tf_item_feature_iterator')
self.tf_interaction_iterator = create_tensorrec_iterator(name='tf_interaction_iterator')
def _build_tf_graph(self, n_user_features, n_item_features):
# Build placeholders
self.tf_n_sampled_items = tf.placeholder('int64')
self.tf_similar_items_ids = tf.placeholder('int64', [None])
self.tf_learning_rate = tf.placeholder('float', None)
self.tf_alpha = tf.placeholder('float', None)
tf_user_feature_rows, tf_user_feature_cols, tf_user_feature_values, tf_n_users, _ = \
self.tf_user_feature_iterator.get_next()
tf_item_feature_rows, tf_item_feature_cols, tf_item_feature_values, tf_n_items, _ = \
self.tf_item_feature_iterator.get_next()
tf_interaction_rows, tf_interaction_cols, tf_interaction_values, _, _ = \
self.tf_interaction_iterator.get_next()
tf_user_feature_indices = tf.stack([tf_user_feature_rows, tf_user_feature_cols], axis=1)
tf_item_feature_indices = tf.stack([tf_item_feature_rows, tf_item_feature_cols], axis=1)
tf_interaction_indices = tf.stack([tf_interaction_rows, tf_interaction_cols], axis=1)
# Construct the features and interactions as sparse matrices
tf_user_features = tf.SparseTensor(tf_user_feature_indices, tf_user_feature_values,
[tf_n_users, n_user_features])
tf_item_features = tf.SparseTensor(tf_item_feature_indices, tf_item_feature_values,
[tf_n_items, n_item_features])
tf_interactions = tf.SparseTensor(tf_interaction_indices, tf_interaction_values,
[tf_n_users, tf_n_items])
# Construct the sampling py_func
sample_items_partial = partial(sample_items, replace=self.loss_graph_factory.is_sampled_with_replacement)
self.tf_sample_indices = tf.py_func(func=sample_items_partial,
inp=[tf_n_items, tf_n_users, self.tf_n_sampled_items],
Tout=tf.int64)
self.tf_sample_indices.set_shape([None, None])
# Collect the weights for regularization
tf_weights = []
# Build the item representations
self.tf_item_representation, item_weights = \
self.item_repr_graph_factory.connect_representation_graph(tf_features=tf_item_features,
n_components=self.n_components,
n_features=n_item_features,
node_name_ending='item')
tf_weights.extend(item_weights)
tf_x_user, tf_x_item = split_sparse_tensor_indices(tf_sparse_tensor=tf_interactions, n_dimensions=2)
tf_transposed_sample_indices = tf.transpose(self.tf_sample_indices)
tf_x_user_sample = tf_transposed_sample_indices[0]
tf_x_item_sample = tf_transposed_sample_indices[1]
# These lists will hold the reprs and predictions for each taste
tastes_tf_user_representations = []
tastes_tf_predictions = []
tastes_tf_prediction_serials = []
tastes_tf_sample_prediction_serials = []
# If this model does not use attention, Nones are used as sentinels in place of the attentions
if self.attention_graph_factory is not None:
tastes_tf_attentions = []
tastes_tf_attention_serials = []
tastes_tf_sample_attention_serials = []
tastes_tf_attention_representations = []
else:
tastes_tf_attentions = None
tastes_tf_attention_serials = None
tastes_tf_sample_attention_serials = None
tastes_tf_attention_representations = None
# Build n_tastes user representations and predictions
for taste in range(self.n_tastes):
tf_user_representation, user_weights = \
self.user_repr_graph_factory.connect_representation_graph(tf_features=tf_user_features,
n_components=self.n_components,
n_features=n_user_features,
node_name_ending='user_{}'.format(taste))
tastes_tf_user_representations.append(tf_user_representation)
tf_weights.extend(user_weights)
# Connect attention, if applicable
if self.attention_graph_factory is not None:
tf_attention_representation, attention_weights = \
self.attention_graph_factory.connect_representation_graph(tf_features=tf_user_features,
n_components=self.n_components,
n_features=n_user_features,
node_name_ending='attn_{}'.format(taste))
tf_weights.extend(attention_weights)
tf_attention = self.prediction_graph_factory.connect_dense_prediction_graph(
tf_user_representation=tf_attention_representation,
tf_item_representation=self.tf_item_representation
)
tf_attention_serial = self.prediction_graph_factory.connect_serial_prediction_graph(
tf_user_representation=tf_attention_representation,
tf_item_representation=self.tf_item_representation,
tf_x_user=tf_x_user,
tf_x_item=tf_x_item,
)
tf_sample_attention_serial = self.prediction_graph_factory.connect_serial_prediction_graph(
tf_user_representation=tf_user_representation,
tf_item_representation=self.tf_item_representation,
tf_x_user=tf_x_user_sample,
tf_x_item=tf_x_item_sample,
)
tastes_tf_attentions.append(tf_attention)
tastes_tf_attention_serials.append(tf_attention_serial)
tastes_tf_sample_attention_serials.append(tf_sample_attention_serial)
tastes_tf_attention_representations.append(tf_attention_representation)
# Connect the configurable prediction graphs for each taste
tf_prediction = self.prediction_graph_factory.connect_dense_prediction_graph(
tf_user_representation=tf_user_representation,
tf_item_representation=self.tf_item_representation
)
tf_prediction_serial = self.prediction_graph_factory.connect_serial_prediction_graph(
tf_user_representation=tf_user_representation,
tf_item_representation=self.tf_item_representation,
tf_x_user=tf_x_user,
tf_x_item=tf_x_item,
)
tf_sample_predictions_serial = self.prediction_graph_factory.connect_serial_prediction_graph(
tf_user_representation=tf_user_representation,
tf_item_representation=self.tf_item_representation,
tf_x_user=tf_x_user_sample,
tf_x_item=tf_x_item_sample,
)
# Append to tastes
tastes_tf_predictions.append(tf_prediction)
tastes_tf_prediction_serials.append(tf_prediction_serial)
tastes_tf_sample_prediction_serials.append(tf_sample_predictions_serial)
# If attention is in the graph, build the API node
if self.attention_graph_factory is not None:
self.tf_user_attention_representation = tf.stack(tastes_tf_attention_representations)
self.tf_user_representation = tf.stack(tastes_tf_user_representations)
self.tf_prediction = collapse_mixture_of_tastes(
tastes_predictions=tastes_tf_predictions,
tastes_attentions=tastes_tf_attentions
)
self.tf_prediction_serial = collapse_mixture_of_tastes(
tastes_predictions=tastes_tf_prediction_serials,
tastes_attentions=tastes_tf_attention_serials
)
tf_sample_predictions_serial = collapse_mixture_of_tastes(
tastes_predictions=tastes_tf_sample_prediction_serials,
tastes_attentions=tastes_tf_sample_attention_serials
)
# Add biases, if this is a biased estimator
if self.biased:
tf_user_feature_biases, self.tf_projected_user_biases = project_biases(
tf_features=tf_user_features, n_features=n_user_features
)
tf_item_feature_biases, self.tf_projected_item_biases = project_biases(
tf_features=tf_item_features, n_features=n_item_features
)
tf_weights.append(tf_user_feature_biases)
tf_weights.append(tf_item_feature_biases)
self.tf_prediction = bias_prediction_dense(
tf_prediction=self.tf_prediction,
tf_projected_user_biases=self.tf_projected_user_biases,
tf_projected_item_biases=self.tf_projected_item_biases)
self.tf_prediction_serial = bias_prediction_serial(
tf_prediction_serial=self.tf_prediction_serial,
tf_projected_user_biases=self.tf_projected_user_biases,
tf_projected_item_biases=self.tf_projected_item_biases,
tf_x_user=tf_x_user,
tf_x_item=tf_x_item)
tf_sample_predictions_serial = bias_prediction_serial(
tf_prediction_serial=tf_sample_predictions_serial,
tf_projected_user_biases=self.tf_projected_user_biases,
tf_projected_item_biases=self.tf_projected_item_biases,
tf_x_user=tf_x_user_sample,
tf_x_item=tf_x_item_sample)
tf_interactions_serial = tf_interactions.values
# Construct API nodes
self.tf_rankings = rank_predictions(tf_prediction=self.tf_prediction)
self.tf_predict_similar_items = predict_similar_items(prediction_graph_factory=self.prediction_graph_factory,
tf_item_representation=self.tf_item_representation,
tf_similar_items_ids=self.tf_similar_items_ids)
self.tf_rank_similar_items = rank_predictions(tf_prediction=self.tf_predict_similar_items)
# Compose loss function args
# This composition is for execution safety: it prevents loss functions that are incorrectly configured from
# having visibility of certain nodes.
loss_graph_kwargs = {
'tf_prediction_serial': self.tf_prediction_serial,
'tf_interactions_serial': tf_interactions_serial,
'tf_interactions': tf_interactions,
'tf_n_users': tf_n_users,
'tf_n_items': tf_n_items,
}
if self.loss_graph_factory.is_dense:
loss_graph_kwargs.update({
'tf_prediction': self.tf_prediction,
'tf_rankings': self.tf_rankings,
})
if self.loss_graph_factory.is_sample_based:
tf_sample_predictions = densify_sampled_item_predictions(
tf_sample_predictions_serial=tf_sample_predictions_serial,
tf_n_sampled_items=self.tf_n_sampled_items,
tf_n_users=tf_n_users,
)
loss_graph_kwargs.update({'tf_sample_predictions': tf_sample_predictions,
'tf_n_sampled_items': self.tf_n_sampled_items})
# Build loss graph
self.tf_basic_loss = self.loss_graph_factory.connect_loss_graph(**loss_graph_kwargs)
self.tf_weight_reg_loss = sum(tf.nn.l2_loss(weights) for weights in tf_weights)
self.tf_loss = self.tf_basic_loss + (self.tf_alpha * self.tf_weight_reg_loss)
self.tf_optimizer = tf.train.AdamOptimizer(learning_rate=self.tf_learning_rate).minimize(self.tf_loss)
# Record the new node names
self._record_graph_hook_names()
def fit(self, interactions, user_features, item_features, epochs=100, learning_rate=0.1, alpha=0.00001,
verbose=False, user_batch_size=None, n_sampled_items=None):
"""
Constructs the TensorRec graph and fits the model.
:param interactions: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of interactions of shape [n_users, n_items].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param user_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of user features of shape [n_users, n_user_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param item_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of item features of shape [n_items, n_item_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param epochs: Integer
The number of epochs to fit the model.
:param learning_rate: Float
The learning rate of the model.
:param alpha:
The weight regularization loss coefficient.
:param verbose: boolean
If true, the model will print a number of status statements during fitting.
:param user_batch_size: int or None
The maximum number of users per batch, or None for all users.
:param n_sampled_items: int or None
The number of items to sample per user for use in loss functions. Must be non-None if
self.loss_graph_factory.is_sample_based is True.
"""
# Pass-through to fit_partial
self.fit_partial(interactions=interactions,
user_features=user_features,
item_features=item_features,
epochs=epochs,
learning_rate=learning_rate,
alpha=alpha,
verbose=verbose,
user_batch_size=user_batch_size,
n_sampled_items=n_sampled_items)
def fit_partial(self, interactions, user_features, item_features, epochs=1, learning_rate=0.1,
alpha=0.00001, verbose=False, user_batch_size=None, n_sampled_items=None):
"""
Constructs the TensorRec graph and fits the model.
:param interactions: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of interactions of shape [n_users, n_items].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param user_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of user features of shape [n_users, n_user_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param item_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of item features of shape [n_items, n_item_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param epochs: Integer
The number of epochs to fit the model.
:param learning_rate: Float
The learning rate of the model.
:param alpha:
The weight regularization loss coefficient.
:param verbose: boolean
If true, the model will print a number of status statements during fitting.
:param user_batch_size: int or None
The maximum number of users per batch, or None for all users.
:param n_sampled_items: int or None
The number of items to sample per user for use in loss functions. Must be non-None if
self.loss_graph_factory.is_sample_based is True.
"""
session = get_session()
# Arg checking
if self.loss_graph_factory.is_sample_based:
if (n_sampled_items is None) or (n_sampled_items <= 0):
raise ValueError("n_sampled_items must be an integer >0")
if (n_sampled_items is not None) and (not self.loss_graph_factory.is_sample_based):
logging.warning('n_sampled_items was specified, but the loss graph is not sample-based')
# Check if the iterators have been constructed. If not, build them.
if self.tf_interaction_iterator is None:
self._build_input_iterators()
if verbose:
logging.info('Processing interaction and feature data')
dataset_sets, initializer_sets = self._create_batched_dataset_initializers(interactions=interactions,
user_features=user_features,
item_features=item_features,
user_batch_size=user_batch_size)
# Check if the graph has been constructed by checking the dense prediction node
# If it hasn't been constructed, initialize it
if self.tf_prediction is None:
# Check input dimensions
first_batch = dataset_sets[0]
_, n_user_features = get_dimensions_from_tensorrec_dataset(first_batch[1])
_, n_item_features = get_dimensions_from_tensorrec_dataset(first_batch[2])
# Numbers of features are either learned at fit time from the shape of these two matrices or specified at
# TensorRec construction and cannot be changed.
self._build_tf_graph(n_user_features=n_user_features, n_item_features=n_item_features)
session.run(tf.global_variables_initializer())
# Build the shared feed dict
feed_dict = {self.tf_learning_rate: learning_rate,
self.tf_alpha: calculate_batched_alpha(num_batches=len(initializer_sets), alpha=alpha)}
if self.loss_graph_factory.is_sample_based:
feed_dict[self.tf_n_sampled_items] = n_sampled_items
if verbose:
logging.info('Beginning fitting')
for epoch in range(epochs):
for batch, initializers in enumerate(initializer_sets):
session.run(initializers)
if not verbose:
session.run(self.tf_optimizer, feed_dict=feed_dict)
else:
_, loss, serial_predictions, wr_loss = session.run(
[self.tf_optimizer, self.tf_basic_loss, self.tf_prediction_serial, self.tf_weight_reg_loss],
feed_dict=feed_dict
)
mean_loss = np.mean(loss)
mean_pred = np.mean(serial_predictions)
weight_reg_l2_loss = alpha * wr_loss
logging.info('EPOCH {} BATCH {} loss = {}, weight_reg_l2_loss = {}, mean_pred = {}'.format(
epoch, batch, mean_loss, weight_reg_l2_loss, mean_pred
))
def predict(self, user_features, item_features):
"""
Predict recommendation scores for the given users and items.
:param user_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of user features of shape [n_users, n_user_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param item_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of item features of shape [n_items, n_item_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:return: np.ndarray
The predictions in an ndarray of shape [n_users, n_items]
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='predict')
_, initializers = self._create_datasets_and_initializers(interactions=None,
user_features=user_features,
item_features=item_features)
get_session().run(initializers)
predictions = self.tf_prediction.eval(session=get_session())
return predictions
def predict_similar_items(self, item_features, item_ids, n_similar):
"""
Predicts the most similar items to the given item_ids.
:param item_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of item features of shape [n_items, n_item_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param item_ids: list or np.array
The ids of the items of interest.
E.g. [4, 8, 12] to get sims for items 4, 8, and 12.
:param n_similar: int
The number of similar items to get per item of interest.
:return: list of lists of tuples
The first level list corresponds to input arg item_ids.
The second level list is of length n_similar and contains tuples of (item_id, score) for each similar item.
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='predict_similar_items')
_, initializers = self._create_datasets_and_initializers(interactions=None,
user_features=None,
item_features=item_features)
get_session().run(initializers)
feed_dict = {self.tf_similar_items_ids: np.array(item_ids)}
sims = self.tf_predict_similar_items.eval(session=get_session(), feed_dict=feed_dict)
results = []
for i in range(len(item_ids)):
item_sims = sims[i]
best = np.argpartition(item_sims, -n_similar)[-n_similar:]
item_results = sorted(zip(best, item_sims[best]), key=lambda x: -x[1])
results.append(item_results)
return results
def predict_rank(self, user_features, item_features):
"""
Predict recommendation ranks for the given users and items.
:param user_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of user features of shape [n_users, n_user_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:param item_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of item features of shape [n_items, n_item_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:return: np.ndarray
The ranks in an ndarray of shape [n_users, n_items]
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='predict_rank')
_, initializers = self._create_datasets_and_initializers(interactions=None,
user_features=user_features,
item_features=item_features)
get_session().run(initializers)
rankings = self.tf_rankings.eval(session=get_session())
return rankings
def predict_user_representation(self, user_features):
"""
Predict latent representation vectors for the given users.
:param user_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of user features of shape [n_users, n_user_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:return: np.ndarray
The latent user representations in an ndarray of shape [n_users, n_components]
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='predict_user_representation')
_, initializers = self._create_datasets_and_initializers(interactions=None,
user_features=user_features,
item_features=None)
get_session().run(initializers)
user_repr = self.tf_user_representation.eval(session=get_session())
# If there is only one user repr per user, collapse from rank 3 to rank 2
if self.n_tastes == 1:
user_repr = np.sum(user_repr, axis=0)
return user_repr
def predict_user_attention_representation(self, user_features):
"""
Predict latent attention representation vectors for the given users.
:param user_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of user features of shape [n_users, n_user_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:return: np.ndarray
The latent user attention representations in an ndarray of shape [n_users, n_components]
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='predict_user_attention_representation')
if self.attention_graph_factory is None:
raise ModelWithoutAttentionException()
_, initializers = self._create_datasets_and_initializers(interactions=None,
user_features=user_features,
item_features=None)
get_session().run(initializers)
user_attn_repr = self.tf_user_attention_representation.eval(session=get_session())
# If there is only one user attn repr per user, collapse from rank 3 to rank 2
if self.n_tastes == 1:
user_attn_repr = np.sum(user_attn_repr, axis=0)
return user_attn_repr
def predict_item_representation(self, item_features):
"""
Predict representation vectors for the given items.
:param item_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of item features of shape [n_items, n_item_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:return: np.ndarray
The latent item representations in an ndarray of shape [n_items, n_components]
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='predict_item_representation')
_, initializers = self._create_datasets_and_initializers(interactions=None,
user_features=None,
item_features=item_features)
get_session().run(initializers)
item_repr = self.tf_item_representation.eval(session=get_session())
return item_repr
def predict_user_bias(self, user_features):
"""
Predict bias values for the given users.
:param user_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of user features of shape [n_users, n_user_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:return: np.ndarray
The user biases in an ndarray of shape [n_users]
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='predict_user_bias')
if not self.biased:
raise ModelNotBiasedException(actor='user')
_, initializers = self._create_datasets_and_initializers(interactions=None,
user_features=user_features,
item_features=None)
get_session().run(initializers)
predictions = self.tf_projected_user_biases.eval(session=get_session())
return predictions
def predict_item_bias(self, item_features):
"""
Predict bias values for the given items.
:param item_features: scipy.sparse matrix, tensorflow.data.Dataset, str, or list
A matrix of item features of shape [n_items, n_item_features].
If a Dataset, the Dataset must follow the format used in tensorrec.input_utils.
If a str, the string must be the path to a TFRecord file.
If a list, the list must contain scipy.sparse matrices, tensorflow.data.Datasets, or strs.
:return: np.ndarray
The item biases in an ndarray of shape [n_items]
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='predict_item_bias')
if not self.biased:
raise ModelNotBiasedException(actor='item')
_, initializers = self._create_datasets_and_initializers(interactions=None,
user_features=None,
item_features=item_features)
get_session().run(initializers)
predictions = self.tf_projected_item_biases.eval(session=get_session())
return predictions
def save_model(self, directory_path):
"""
Saves the model to files in the given directory.
:param directory_path: str
The path to the directory in which to save the model.
:return:
"""
# Ensure that the model has been fit
if self.tf_prediction is None:
raise ModelNotFitException(method='save_model')
if not os.path.exists(directory_path):
os.makedirs(directory_path)
saver = tf.train.Saver()
session_path = os.path.join(directory_path, 'tensorrec_session.cpkt')
saver.save(sess=get_session(), save_path=session_path)
# Break connections to the graph before saving the python object
self._break_graph_hooks()
tensorrec_path = os.path.join(directory_path, 'tensorrec.pkl')
with open(tensorrec_path, 'wb') as file:
pickle.dump(file=file, obj=self)
# Reconnect to the graph after saving
self._attach_graph_hooks()
@classmethod
def load_model(cls, directory_path):
"""
Loads the TensorRec model and TensorFlow session saved in the given directory.
:param directory_path: str
The path to the directory containing the saved model.
:return:
"""
graph_path = os.path.join(directory_path, 'tensorrec_session.cpkt.meta')
saver = tf.train.import_meta_graph(graph_path)
session_path = os.path.join(directory_path, 'tensorrec_session.cpkt')
saver.restore(sess=get_session(), save_path=session_path)
tensorrec_path = os.path.join(directory_path, 'tensorrec.pkl')
with open(tensorrec_path, 'rb') as file:
model = pickle.load(file=file)
model._attach_graph_hooks()
return model
| {
"repo_name": "jfkirk/tensorrec",
"path": "tensorrec/tensorrec.py",
"copies": "1",
"size": "47774",
"license": "apache-2.0",
"hash": 1213120234606242300,
"line_mean": 51.0981461287,
"line_max": 119,
"alpha_frac": 0.6222422238,
"autogenerated": false,
"ratio": 4.231157559117881,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5353399782917881,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import groupby
from operator import attrgetter
from django.forms.models import (
ModelChoiceIterator, ModelChoiceField, ModelMultipleChoiceField
)
class GroupedModelChoiceIterator(ModelChoiceIterator):
def __init__(self, field, groupby):
self.groupby = groupby
super().__init__(field)
def __iter__(self):
if self.field.empty_label is not None:
yield ('', self.field.empty_label)
queryset = self.queryset
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for group, objs in groupby(queryset, self.groupby):
yield (group, [self.choice(obj) for obj in objs])
class BaseGroupedModelChoiceField:
def __init__(self, *args, group_by_field, **kwargs):
if isinstance(group_by_field, str):
group_by_field = attrgetter(group_by_field)
elif not callable(group_by_field):
raise TypeError(
'group_by_field must either be a str or a callable accepting '
'a single argument'
)
self.iterator = partial(
GroupedModelChoiceIterator,
groupby=group_by_field
)
super().__init__(*args, **kwargs)
class GroupedModelChoiceField(BaseGroupedModelChoiceField, ModelChoiceField):
pass
class GroupedModelMultiChoiceField(BaseGroupedModelChoiceField, ModelMultipleChoiceField):
pass
| {
"repo_name": "gamernetwork/gn-django",
"path": "gn_django/form/grouped_model_choice_field.py",
"copies": "1",
"size": "1541",
"license": "mit",
"hash": -6905279933611327000,
"line_mean": 31.1041666667,
"line_max": 90,
"alpha_frac": 0.6567164179,
"autogenerated": false,
"ratio": 4.479651162790698,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022893772893772896,
"num_lines": 48
} |
from functools import partial
from itertools import groupby
from operator import attrgetter
try:
from itertools import ifilter
from itertools import imap
except ImportError:
imap = map
ifilter = filter
import logging
import heapq
from karld import is_py3
#generator that gets sorted iterator
def merge(*iterables, **kwargs):
"""Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(\*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([[2,1],[2,3],[2,5],[2,7]],
[[2,0],[2,2],[2,4],[2,8]],
[[2,5],[2,10],[2,15],[2,20]],
[], [[2,25]]), key=itemgetter(-1))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
"""
key = kwargs.get('key')
_heappop, _heapreplace, _StopIteration = heapq.heappop, heapq.heapreplace, StopIteration
if is_py3():
next_method = attrgetter('__next__')
else:
next_method = attrgetter('next')
h = []
h_append = h.append
key_is_None = key is None
for itnum, it in enumerate(map(iter, iterables)):
try:
nnext = next_method(it)
v = nnext()
h_append([v if key_is_None else key(v), itnum, v, nnext])
except _StopIteration:
pass
heapq.heapify(h)
while 1:
try:
while 1:
# raises IndexError when h is empty
k, itnum, v, nnext = s = h[0]
yield v
v = nnext() # raises StopIteration when exhausted
s[0] = v if key_is_None else key(v)
s[2] = v
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
except IndexError:
return
def sorted_by(key, items):
return sorted(items, key=key)
def sort_iterables(iterables, key=None):
assert key is not None
sorted_by_key = partial(sorted_by, key)
return list(map(sorted_by_key, iterables))
def i_merge_group_sorted(iterables, key=None):
assert key is not None
all_sorted = merge(*iterables, key=key)
grouped = groupby(all_sorted, key=key)
grouped_voters = ((key_value, list(grouped))
for key_value, grouped in grouped)
return grouped_voters
def sort_merge_group(iterables, key=None):
assert key is not None
return list(i_merge_group_sorted(
sort_iterables(iterables, key=key),
key=key))
def get_first_if_any(values):
if values:
return values[0]
def get_first_type_instance_of_group(instance_type, group):
key_value, items = group
try:
return get_first_if_any(
list(filter(lambda vs: isinstance(vs, instance_type), items)))
except ValueError:
logging.exception("couldn't unpack {0}".format(group))
def i_get_multi_groups(iterables, key=None):
assert key is not None
return ifilter(lambda v: len(v[1]) > 1,
sort_merge_group(iterables, key=key))
| {
"repo_name": "johnwlockwood/karl_data",
"path": "karld/merger.py",
"copies": "1",
"size": "3172",
"license": "apache-2.0",
"hash": 7613787576726257000,
"line_mean": 27.0707964602,
"line_max": 92,
"alpha_frac": 0.5952080706,
"autogenerated": false,
"ratio": 3.5963718820861676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9688792970265505,
"avg_score": 0.0005573964841327123,
"num_lines": 113
} |
from functools import partial
from itertools import groupby
import numpy as np
import sympy
from devito.exceptions import InvalidOperator
from devito.ir.support import Any, Backward, Forward, IterationSpace
from devito.ir.clusters.analysis import analyze
from devito.ir.clusters.cluster import Cluster, ClusterGroup
from devito.ir.clusters.queue import Queue, QueueStateful
from devito.symbolics import uxreplace, xreplace_indices
from devito.tools import DefaultOrderedDict, as_mapper, flatten, is_integer, timed_pass
from devito.types import ModuloDimension
__all__ = ['clusterize']
def clusterize(exprs):
"""
Turn a sequence of LoweredEqs into a sequence of Clusters.
"""
# Initialization
clusters = [Cluster(e, e.ispace, e.dspace) for e in exprs]
# Setup the IterationSpaces based on data dependence analysis
clusters = Schedule().process(clusters)
# Handle SteppingDimensions
clusters = Stepper().process(clusters)
# Handle ConditionalDimensions
clusters = guard(clusters)
# Determine relevant computational properties (e.g., parallelism)
clusters = analyze(clusters)
return ClusterGroup(clusters)
class Schedule(QueueStateful):
"""
This special Queue produces a new sequence of "scheduled" Clusters, which
means that:
* The iteration direction along each Dimension of each Cluster is such
that the information "naturally flows from one iteration to another".
For example, in `u[t+1, x] = u[t, x]`, the iteration Dimension `t`
gets assigned the `Forward` direction, to honor the flow-dependence
along `t`. Instead, in `u[t-1, x] = u[t, x]`, `t` gets assigned the
`Backward` direction. This simple rule ensures that when we evaluate
the LHS, the information on the RHS is up-to-date.
* If a Cluster has both a flow- and an anti-dependence along a given
Dimension `x`, then `x` is assigned the `Forward` direction but its
IterationSpace is _lifted_ such that it cannot be fused with any
other Clusters within the same iteration Dimension `x`. For example,
consider the following coupled statements:
- `u[t+1, x] = f(u[t, x])`
- `v[t+1, x] = g(v[t, x], u[t, x], u[t+1, x], u[t+2, x]`
The first statement has a flow-dependence along `t`, while the second
one has both a flow- and an anti-dependence along `t`, hence the two
statements will ultimately be kept in separate Clusters and then
scheduled to different loop nests.
* If *all* dependences across two Clusters along a given Dimension are
backward carried depedences, then the IterationSpaces are _lifted_
such that the two Clusters cannot be fused. This is to maximize
the number of parallel Dimensions. Essentially, this is what low-level
compilers call "loop fission" -- only that here it occurs at a much
higher level of abstraction. For example:
- `u[x+1] = w[x] + v[x]`
- `v[x] = u[x] + w[x]
Here, the two statements will ultimately be kept in separate Clusters
and then scheduled to different loops; this way, `x` will be a parallel
Dimension in both Clusters.
"""
@timed_pass(name='schedule')
def process(self, clusters):
return self._process_fdta(clusters, 1)
def callback(self, clusters, prefix, backlog=None, known_break=None):
if not prefix:
return clusters
known_break = known_break or set()
backlog = backlog or []
# Take the innermost Dimension -- no other Clusters other than those in
# `clusters` are supposed to share it
candidates = prefix[-1].dim._defines
scope = self._fetch_scope(clusters)
# Handle the nastiest case -- ambiguity due to the presence of both a
# flow- and an anti-dependence.
#
# Note: in most cases, `scope.d_anti.cause == {}` -- either because
# `scope.d_anti == {}` or because the few anti dependences are not carried
# in any Dimension. We exploit this observation so that we only compute
# `d_flow`, which instead may be expensive, when strictly necessary
maybe_break = scope.d_anti.cause & candidates
if len(clusters) > 1 and maybe_break:
require_break = scope.d_flow.cause & maybe_break
if require_break:
backlog = [clusters[-1]] + backlog
# Try with increasingly smaller ClusterGroups until the ambiguity is gone
return self.callback(clusters[:-1], prefix, backlog, require_break)
# Schedule Clusters over different IterationSpaces if this increases parallelism
for i in range(1, len(clusters)):
if self._break_for_parallelism(scope, candidates, i):
return self.callback(clusters[:i], prefix, clusters[i:] + backlog,
candidates | known_break)
# Compute iteration direction
idir = {d: Backward for d in candidates if d.root in scope.d_anti.cause}
if maybe_break:
idir.update({d: Forward for d in candidates if d.root in scope.d_flow.cause})
idir.update({d: Forward for d in candidates if d not in idir})
# Enforce iteration direction on each Cluster
processed = []
for c in clusters:
ispace = IterationSpace(c.ispace.intervals, c.ispace.sub_iterators,
{**c.ispace.directions, **idir})
processed.append(c.rebuild(ispace=ispace))
if not backlog:
return processed
# Handle the backlog -- the Clusters characterized by flow- and anti-dependences
# along one or more Dimensions
idir = {d: Any for d in known_break}
for i, c in enumerate(list(backlog)):
ispace = IterationSpace(c.ispace.intervals.lift(known_break),
c.ispace.sub_iterators,
{**c.ispace.directions, **idir})
dspace = c.dspace.lift(known_break)
backlog[i] = c.rebuild(ispace=ispace, dspace=dspace)
return processed + self.callback(backlog, prefix)
def _break_for_parallelism(self, scope, candidates, i):
# `test` will be True if there's at least one data-dependence that would
# break parallelism
test = False
for d in scope.d_from_access_gen(scope.a_query(i)):
if d.is_local or d.is_storage_related(candidates):
# Would break a dependence on storage
return False
if any(d.is_carried(i) for i in candidates):
if (d.is_flow and d.is_lex_negative) or (d.is_anti and d.is_lex_positive):
# Would break a data dependence
return False
test = test or (bool(d.cause & candidates) and not d.is_lex_equal)
return test
@timed_pass()
def guard(clusters):
"""
Split Clusters containing conditional expressions into separate Clusters.
"""
processed = []
for c in clusters:
# Group together consecutive expressions with same ConditionalDimensions
for cds, g in groupby(c.exprs, key=lambda e: tuple(e.conditionals)):
exprs = list(g)
if not cds:
processed.append(c.rebuild(exprs=exprs))
continue
# Chain together all conditions from all expressions in `c`
guards = {}
for cd in cds:
condition = guards.setdefault(cd.parent, [])
for e in exprs:
try:
condition.append(e.conditionals[cd])
break
except KeyError:
pass
guards = {d: sympy.And(*v, evaluate=False) for d, v in guards.items()}
# Construct a guarded Cluster
processed.append(c.rebuild(exprs=exprs, guards=guards))
return ClusterGroup(processed)
class Stepper(Queue):
"""
Produce a new sequence of Clusters in which the IterationSpaces carry the
sub-iterators induced by a SteppingDimension.
"""
def process(self, clusters):
return self._process_fdta(clusters, 1)
def callback(self, clusters, prefix):
if not prefix:
return clusters
d = prefix[-1].dim
subiters = flatten([c.ispace.sub_iterators.get(d, []) for c in clusters])
subiters = {i for i in subiters if i.is_Stepping}
if not subiters:
return clusters
# Collect the index access functions along `d`, e.g., `t + 1` where `t` is
# a SteppingDimension for `d = time`
mapper = DefaultOrderedDict(lambda: DefaultOrderedDict(set))
for c in clusters:
indexeds = [a.indexed for a in c.scope.accesses if a.function.is_Tensor]
for i in indexeds:
try:
iaf = i.indices[d]
except KeyError:
continue
# Sanity checks
sis = iaf.free_symbols & subiters
if len(sis) == 0:
continue
elif len(sis) == 1:
si = sis.pop()
else:
raise InvalidOperator("Cannot use multiple SteppingDimensions "
"to index into a Function")
size = i.function.shape_allocated[d]
assert is_integer(size)
mapper[size][si].add(iaf)
# Construct the ModuloDimensions
mds = []
for size, v in mapper.items():
for si, iafs in list(v.items()):
# Offsets are sorted so that the semantic order (t0, t1, t2) follows
# SymPy's index ordering (t, t-1, t+1) afer modulo replacement so
# that associativity errors are consistent. This corresponds to
# sorting offsets {-1, 0, 1} as {0, -1, 1} assigning -inf to 0
siafs = sorted(iafs, key=lambda i: -np.inf if i - si == 0 else (i - si))
for iaf in siafs:
name = '%s%d' % (si.name, len(mds))
offset = uxreplace(iaf, {si: d.root})
mds.append(ModuloDimension(name, si, offset, size, origin=iaf))
# Replacement rule for ModuloDimensions
def rule(size, e):
try:
return e.function.shape_allocated[d] == size
except (AttributeError, KeyError):
return False
# Reconstruct the Clusters
processed = []
for c in clusters:
# Apply substitutions to expressions
# Note: In an expression, there could be `u[t+1, ...]` and `v[t+1,
# ...]`, where `u` and `v` are TimeFunction with circular time
# buffers (save=None) *but* different modulo extent. The `t+1`
# indices above are therefore conceptually different, so they will
# be replaced with the proper ModuloDimension through two different
# calls to `xreplace_indices`
exprs = c.exprs
groups = as_mapper(mds, lambda d: d.modulo)
for size, v in groups.items():
mapper = {md.origin: md for md in v}
func = partial(xreplace_indices, mapper=mapper, key=partial(rule, size))
exprs = [e.apply(func) for e in exprs]
# Augment IterationSpace
ispace = IterationSpace(c.ispace.intervals,
{**c.ispace.sub_iterators, **{d: tuple(mds)}},
c.ispace.directions)
processed.append(c.rebuild(exprs=exprs, ispace=ispace))
return processed
| {
"repo_name": "opesci/devito",
"path": "devito/ir/clusters/algorithms.py",
"copies": "1",
"size": "11936",
"license": "mit",
"hash": 6383802306931843000,
"line_mean": 39.7372013652,
"line_max": 90,
"alpha_frac": 0.591655496,
"autogenerated": false,
"ratio": 4.151652173913043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5243307669913043,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import imap, ifilter, islice, takewhile, dropwhile
import operator
from pipetools.debug import set_name, repr_args, get_name
from pipetools.decorators import data_structure_builder, regex_condition
from pipetools.decorators import pipe_util, auto_string_formatter
from pipetools.main import pipe, X, _iterable
KEY, VALUE = X[0], X[1]
@pipe_util
@auto_string_formatter
@data_structure_builder
def foreach(function):
"""
Returns a function that takes an iterable and returns an iterator over the
results of calling `function` on each item of the iterable.
>>> xrange(5) > foreach(factorial) | list
[1, 1, 2, 6, 24]
"""
return partial(imap, function)
@pipe_util
def foreach_do(function):
"""
Like :func:`foreach` but is evaluated immediately and doesn't return
anything.
For the occasion that you just want to do some side-effects::
open('addresses.txt') > foreach(geocode) | foreach_do(launch_missile)
-- With :func:`foreach` nothing would happen (except an itetrator being
created)
"""
def f(iterable):
for item in iterable:
function(item)
return f
@pipe_util
@regex_condition
def where(condition):
"""
Pipe-able lazy filter.
>>> odd_range = xrange | where(X % 2) | list
>>> odd_range(10)
[1, 3, 5, 7, 9]
"""
return partial(ifilter, condition)
@pipe_util
@regex_condition
def where_not(condition):
"""
Inverted :func:`where`.
"""
return partial(ifilter, pipe | condition | operator.not_)
@pipe_util
@data_structure_builder
def sort_by(function):
"""
Sorts an incoming sequence by using the given `function` as key.
>>> xrange(10) > sort_by(-X)
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
Supports automatic data-structure creation::
users > sort_by([X.last_name, X.first_name])
There is also a shortcut for ``sort_by(X)`` called ``sort``:
>>> [4, 5, 8, -3, 0] > sort
[-3, 0, 4, 5, 8]
And (as of ``0.2.3``) a shortcut for reversing the sort:
>>> 'asdfaSfa' > sort_by(X.lower()).descending
['s', 'S', 'f', 'f', 'd', 'a', 'a', 'a']
"""
f = partial(sorted, key=function)
f.attrs = {'descending': _descending_sort_by(function)}
return f
@pipe_util
def _descending_sort_by(function):
return partial(sorted, key=function, reverse=True)
sort = sort_by(X)
@pipe_util
@auto_string_formatter
@data_structure_builder
def debug_print(function):
"""
Prints function applied on input and returns the input.
::
foo = (pipe
| something
| debug_print(X.get_status())
| something_else
| foreach(debug_print("attr is: {0.attr}"))
| etc)
"""
def debug(thing):
print function(thing)
return thing
return debug
@pipe_util
def as_args(function):
"""
Applies the sequence in the input as positional arguments to `function`.
::
some_lists > as_args(izip)
"""
return lambda x: function(*x)
@pipe_util
def as_kwargs(function):
"""
Applies the dictionary in the input as keyword arguments to `function`.
"""
return lambda x: function(**x)
def take_first(count):
"""
Assumes an iterable on the input, returns an iterable with first `count`
items from the input (or possibly less, if there isn't that many).
>>> xrange(9000) > where(X % 100 == 0) | take_first(5) | tuple
(0, 100, 200, 300, 400)
"""
def _take_first(iterable):
return islice(iterable, count)
return pipe | set_name('take_first(%s)' % count, _take_first)
def drop_first(count):
"""
Assumes an iterable on the input, returns an iterable with identical items
except for the first `count`.
>>> xrange(10) > drop_first(5) | tuple
(5, 6, 7, 8, 9)
"""
def _drop_first(iterable):
g = (x for x in xrange(1, count + 1))
return dropwhile(lambda i: unless(StopIteration, g.next)(), iterable)
return pipe | set_name('drop_first(%s)' % count, _drop_first)
def unless(exception_class_or_tuple, func, *args, **kwargs):
"""
When `exception_class_or_tuple` occurs while executing `func`, it will
be caught and ``None`` will be returned.
>>> f = where(X > 10) | list | unless(IndexError, X[0])
>>> f([5, 8, 12, 4])
12
>>> f([1, 2, 3])
None
"""
@pipe_util
@auto_string_formatter
@data_structure_builder
def construct_unless(function):
# a wrapper so we can re-use the decorators
def _unless(*args, **kwargs):
try:
return function(*args, **kwargs)
except exception_class_or_tuple:
pass
return _unless
name = lambda: 'unless(%s, %s)' % (exception_class_or_tuple, ', '.join(
filter(None, (get_name(func), repr_args(*args, **kwargs)))))
return set_name(name, construct_unless(func, *args, **kwargs))
@pipe_util
@regex_condition
def select_first(condition):
"""
Returns first item from input sequence that satisfies `condition`. Or
``None`` if none does.
>>> ['py', 'pie', 'pi'] > select_first(X.startswith('pi'))
'pie'
As of ``0.2.1`` you can also
:ref:`directly use regular expressions <auto-regex>` and write the above
as:
>>> ['py', 'pie', 'pi'] > select_first('^pi')
'pie'
There is also a shortcut for ``select_first(X)`` called ``first_of``:
>>> first_of(['', None, 0, 3, 'something'])
3
>>> first_of([])
None
"""
return where(condition) | unless(StopIteration, X.next())
first_of = select_first(X)
@pipe_util
@auto_string_formatter
@data_structure_builder
def group_by(function):
"""
Groups input sequence by `function`.
Returns an iterator over a sequence of tuples where the first item is a
result of `function` and the second one a list of items matching this
result.
Ordering of the resulting iterator is undefined, but ordering of the items
in the groups is preserved.
>>> [1, 2, 3, 4, 5, 6] > group_by(X % 2) | list
[(0, [2, 4, 6]), (1, [1, 3, 5])]
"""
def _group_by(seq):
result = {}
for item in seq:
result.setdefault(function(item), []).append(item)
return result.iteritems()
return _group_by
def _flatten(x):
if not _iterable(x):
yield x
else:
for y in x:
for z in _flatten(y):
yield z
def flatten(*args):
"""
Flattens an arbitrarily deep nested iterable(s).
"""
return _flatten(args)
flatten = pipe | flatten
def count(iterable):
"""
Returns the number of items in `iterable`.
"""
return sum(1 for whatever in iterable)
count = pipe | count
@pipe_util
@regex_condition
def take_until(condition):
"""
>>> [1, 4, 6, 4, 1] > take_until(X > 5) | list
[1, 4]
"""
return partial(takewhile, pipe | condition | operator.not_)
| {
"repo_name": "jD0T/pipetools",
"path": "pipetools/utils.py",
"copies": "3",
"size": "7002",
"license": "mit",
"hash": -1276415535051169300,
"line_mean": 22.8976109215,
"line_max": 78,
"alpha_frac": 0.6033990288,
"autogenerated": false,
"ratio": 3.4957563654518222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5599155394251822,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import imap, ifilter, islice, takewhile
import operator
from pipetools.debug import set_name, repr_args, get_name
from pipetools.decorators import data_structure_builder
from pipetools.decorators import pipe_util, auto_string_formatter
from pipetools.main import pipe, X, _iterable
KEY, VALUE = X[0], X[1]
@pipe_util
@auto_string_formatter
@data_structure_builder
def foreach(function):
"""
Returns a function that takes an iterable and returns an iterator over the
results of calling `function` on each item of the iterable.
>>> xrange(5) > foreach(factorial) | list
[1, 1, 2, 6, 24]
"""
return partial(imap, function)
@pipe_util
def foreach_do(function):
"""
Like :func:`foreach` but is evaluated immediately and doesn't return
anything.
For the occasion that you just want to do some side-effects::
open('addresses.txt') > foreach(geocode) | foreach_do(launch_missile)
-- With :func:`foreach` nothing would happen (except an itetrator being
created)
"""
def f(iterable):
for item in iterable:
function(item)
return f
@pipe_util
def where(function):
"""
Pipe-able lazy filter.
>>> odd_range = xrange | where(X % 2) | list
>>> odd_range(10)
[1, 3, 5, 7, 9]
"""
return partial(ifilter, function)
@pipe_util
def where_not(function):
"""
Inverted :func:`where`.
"""
return partial(ifilter, pipe | function | operator.not_)
@pipe_util
@data_structure_builder
def sort_by(function):
"""
Sorts an incoming sequence by using the given `function` as key.
>>> xrange(10) > sort_by(-X)
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
Supports automatic data-structure creation::
users > sort_by([X.last_name, X.first_name])
There is also a shortcut for ``sort_by(X)`` called ``sort``:
>>> [4, 5, 8, -3, 0] > sort
[-3, 0, 4, 5, 8]
"""
return partial(sorted, key=function)
sort = sort_by(X)
@pipe_util
@auto_string_formatter
@data_structure_builder
def debug_print(function):
"""
Prints function applied on input and returns the input.
::
foo = (pipe
| something
| debug_print(X.get_status())
| something_else
| foreach(debug_print("attr is: {0.attr}"))
| etc)
"""
def debug(thing):
print function(thing)
return thing
return debug
@pipe_util
def as_args(function):
"""
Applies the sequence in the input as positional arguments to `function`.
::
some_lists > as_args(izip)
"""
return lambda x: function(*x)
@pipe_util
def as_kwargs(function):
"""
Applies the dictionary in the input as keyword arguments to `function`.
"""
return lambda x: function(**x)
def take_first(count):
"""
Assumes an iterable on the input, returns an iterable with first `count`
items from the input (or possibly less, if there isn't that many).
>>> xrange(9000) > where(X % 100 == 0) | take_first(5) | tuple
(0, 100, 200, 300, 400)
"""
def _take_first(iterable):
return islice(iterable, count)
return pipe | set_name('take_first(%s)' % count, _take_first)
def unless(exception_class_or_tuple, func, *args, **kwargs):
"""
When `exception_class_or_tuple` occurs while executing `func`, it will
be caught and ``None`` will be returned.
>>> f = where(X > 10) | list | unless(IndexError, X[0])
>>> f([5, 8, 12, 4])
12
>>> f([1, 2, 3])
None
"""
@pipe_util
@auto_string_formatter
@data_structure_builder
def construct_unless(function):
# a wrapper so we can re-use the decorators
def _unless(*args, **kwargs):
try:
return function(*args, **kwargs)
except exception_class_or_tuple:
pass
return _unless
name = 'unless(%s, %s)' % (exception_class_or_tuple, ', '.join(
filter(None, (get_name(func), repr_args(*args, **kwargs)))))
return set_name(name, construct_unless(func, *args, **kwargs))
@pipe_util
def select_first(condition):
"""
Returns first item from input sequence that satisfies `condition`. Or
``None`` if none does.
>>> ['py', 'pie', 'pi'] > select_first(X.startswith('pi'))
'pie'
There is also a shortcut for ``select_first(X)`` called ``first_of``:
>>> first_of(['', None, 0, 3, 'something'])
3
>>> first_of([])
None
"""
return where(condition) | unless(StopIteration, X.next())
first_of = select_first(X)
@pipe_util
@auto_string_formatter
@data_structure_builder
def group_by(function):
"""
Returns a dictionary of input sequence items grouped by `function`.
"""
def _group_by(seq):
result = {}
for item in seq:
result.setdefault(function(item), []).append(item)
return result
return _group_by
def _flatten(x):
if not _iterable(x):
yield x
else:
for y in x:
for z in _flatten(y):
yield z
def flatten(*args):
"""
Flattens an arbitrarily deep nested iterable(s).
"""
return _flatten(args)
flatten = pipe | flatten
def count(iterable):
"""
Returns the number of items in `iterable`.
"""
return sum(1 for whatever in iterable)
count = pipe | count
@pipe_util
def take_until(function):
"""
>>> [1, 4, 6, 4, 1] > take_until(X > 5) | list
[1, 4]
"""
return partial(takewhile, pipe | function | operator.not_)
| {
"repo_name": "starenka/pipetools",
"path": "pipetools/utils.py",
"copies": "1",
"size": "5594",
"license": "mit",
"hash": -4169721544889463300,
"line_mean": 21.8326530612,
"line_max": 78,
"alpha_frac": 0.6011798355,
"autogenerated": false,
"ratio": 3.563057324840764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9660559371765016,
"avg_score": 0.0007355577151495518,
"num_lines": 245
} |
from functools import partial
from itertools import imap
import string
# Higher order
from standardize_address import expand_standardize_abbr
from standardize_address import lookup_standardize_abbr
from standardize_address import title_case_string
def lower_getter(getter, data):
"""
Lower cases the result of the getter given data.
"""
return string.lower(str(getter(data)))
def lower_list_getter(getter, data):
"""
Lower cases the items in the result of the getter given data.
"""
value = getter(data)
if isinstance(value, tuple):
return map(string.lower, map(str, value))
return string.lower(str(value))
def title_getter(getter, data):
"""
Title cases the result of the getter given data.
"""
return title_case_string(str(getter(data)))
def title_list_getter(getter, data):
"""
Title cases the items in the result of the getter given data.
"""
return map(title_case_string, map(str, getter(data)))
def number_getter(getter, data):
"""
Gets the leading digits from the result of the getter given data.
"""
return get_number_prefix(getter(data))
def join_stripped_gotten_value(sep, getters, data):
"""
Join the values, coerced to str and stripped of whitespace padding,
from entity, gotten with collection of getters,
with the separator.
:param sep: :class: `str` Separator of values.
:param getters: collection of callables takes that data and returns value.
:param data: argument for the getters
"""
return sep.join(
filter(
None,
imap(string.strip,
imap(str,
filter(None, [getter(data) for getter in getters])))))
def join_stripped_values(sep, collection_getter, data):
"""
Join the values, coerced to str and stripped of whitespace padding,
from entity, gotten with collection_getter,
with the separator.
:param sep: :class: `str` Separator of values.
:param collection_getter: callable takes that data and returns collection.
:param data: argument for the collection_getter
"""
value = collection_getter(data)
if not isinstance(value, tuple):
value = (value,)
return sep.join(
filter(
None,
imap(string.strip,
imap(str, filter(None, value)))))
# High order
def get_full_name(name_parts_getter, data):
"""
Space join the non-empty values from data with the name parts getter.
"""
return join_stripped_values(' ', name_parts_getter, data)
def get_phone(phone_parts_getter, data):
"""
Dash join the non-empty values from data with the phone parts getter.
The phone_parts_getter should return
the area code, exchange and last four.
"""
return join_stripped_values('-', phone_parts_getter, data)
def get_zip(zip_parts_getter, data):
"""
Dash join non-empty values from data with the zip parts getter.
"""
return join_stripped_values('-', zip_parts_getter, data)
# Addresses
def get_number_prefix(number):
number = str(number)
if not number:
return ""
try:
number = str(int(number))
except (ValueError, TypeError), e:
digits = []
for digit in number:
if digit in string.digits:
digits.append(digit)
else:
break
number = "".join(digits)
return number
def get_raw_address_label(address_parts_getter, data):
"""
Get the address label for use with in the geocoder.
Space join non-empty parts of the address label
from the data.
"""
return join_stripped_values(' ', address_parts_getter, data)
def get_geocodable_address_label(house_number_getter,
street_name_getter,
data):
"""
Get the address label for use with the geocoder
using separate getters. Space join non-empty parts
"""
value = title_case_string(
expand_standardize_abbr(join_stripped_gotten_value(
' ', (house_number_getter,
street_name_getter), data)))
if "'" in value:
return value.replace("'", "")
return value
def get_address_label(address_parts_getter, data):
return lookup_standardize_abbr(
get_raw_address_label(address_parts_getter, data))
def get_address(address_label_getter,
city_getter,
state_getter,
zip_parts_getter,
data):
"""
Get the address for use in the geocoder.
Comma-space join non-empty parts of the address
from the data.
"""
return [join_stripped_gotten_value(
', ', (
address_label_getter,
city_getter,
state_getter,
partial(get_zip, zip_parts_getter)
),
data)]
def get_separated_address(address_label_getter,
city_getter,
state_getter,
zip_parts_getter,
data):
"""
Get the address for use in the geocoder.
Comma-space join non-empty parts of the address
from the data.
"""
return (address_label_getter(data),
city_getter(data),
state_getter(data),
get_zip(zip_parts_getter, data))
# get_geocoder_address just passes in the get_raw_address_label
# get_full_address passes in address label
# getter that runs lookup_standardize_abbr on the value.
def get_unit(unit_parts_getter, data):
return join_stripped_values(' ', unit_parts_getter, data)
def get_zip_road(zip5_getter, road_getter, data):
return join_stripped_gotten_value('|', (zip5_getter, road_getter), data)
| {
"repo_name": "johnwlockwood/txt2vote",
"path": "txttovote/info_extractors/__init__.py",
"copies": "1",
"size": "5782",
"license": "apache-2.0",
"hash": 4299744699552664600,
"line_mean": 26.2735849057,
"line_max": 78,
"alpha_frac": 0.6191629194,
"autogenerated": false,
"ratio": 4.02086230876217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.514002522816217,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import izip
import csv
# n k
# a b c r
## {{{ http://code.activestate.com/recipes/164740/ (r1)
import sys
def iterslice(sequence, start=None, stop=None, step=1):
if step == 0:
raise ValueError, "Attempt to use 0 as step value"
if stop is None:
stop = sys.maxint*cmp(step,0)
elif stop<0:
try:
stop = max(len(sequence)+stop,0)
except TypeError:
raise TypeError, "Negative slice index on unsized sequence"
if start is None:
if step>0:
start = 0
else:
try:
start = len(sequence)-1
except TypeError:
raise TypeError, ("Unable to start from the end of an "
"unsized sequence")
elif start<0:
try:
start = max(len(sequence)+start,0)
except TypeError:
raise TypeError, "Negative slice index on unsized sequence"
try:
for i in xrange(start, stop, step):
yield sequence[i]
except IndexError:
return
except TypeError:
if step<0:
raise TypeError, ("Attempt to use negative step on an "
"unindexable sequence")
#check if the sequence support iterator protocol
itr = iter(sequence)
try:
for i in xrange(start):
itr.next()
while start<stop:
yield itr.next()
for i in xrange(step-1):
itr.next()
start+=step
except StopIteration:
return
def general_m(i, a, b, c, r):
if i == 0:
return a
else:
return ((b*general_m(i-1, a, b, c, r) + c) % r)
def get_k(k, a, b, c, r):
return [general_m(i, a, b, c, r) for i in range(k)]
##class mm(object):
## def __init__(self, a, b, c, r):
## self.
def get_min_nonneg_int(sofar):
s = sorted(sofar)
for index, item in enumerate([0] + s):
try:
for y in range(item + 1, s[index]):
return y
except IndexError:
return s[-1]+1
def get_min_loop(sofar):
x = get_min_nonneg_int(sofar)
yield x
for y in get_min_loop(sofar[1:] + [x]):
yield y
## s = sorted(sofar)
## for index, item in enumerate(s):
## if index == len(s) - 1:
## while True:
## yield item
## item += 1
## elif index == 0:
## for subitem in range(1, item):
## yield subitem
## else:
## diff = range(item+1, s[index+1])
## for subitem in diff:
## yield subitem
def get_nth_value(n, k, sofar):
a=0
newn = n - k
e = []
for x in get_min_loop(sofar):
a+=1
e.append(x)
if a == newn:
print 'minlist:' +str(e)
print 'len k+minlist:' + str(len(sofar)+len(e)) + ', n:' + str(n)
break
a=0
for x in get_min_loop(sofar):
a+=1
if a == n:
return x
def doit(n, k, a, b, c, r):
## m = partial(general_m, a=a, b=b, c=c, r=r)
## global thelist
thelist = get_k(k, a, b, c, r)
print thelist
print len(thelist)
print 1 in thelist
## global x
## x = get_min_nonneg_int(thelist)
## for x in range(1, n+1):
## print get_nth_value(x, thelist)
return get_nth_value(n, k, thelist)
def main():
#infile = open(r'C:/Users/JJ Fliegelman/Downloads/find_the_mintxt.txt', 'r')
infile = open(r'C:/FB/input.txt', 'r')
lines = list( csv.reader(infile, delimiter=' ') )
with open(r'C:/FB/out.txt', 'w') as outfile:
for index, (line1, line2) in enumerate(izip(lines[1::2], lines[2::2])):
#if index == 1:
n, k = map(int, line1)
a, b, c, r = map(int, line2)
outfile.write('Case #{}: {}\n'.format(index+1, doit(n,k,a,b,c,r)))
if __name__ == '__main__':
main()
| {
"repo_name": "jdotjdot/Coding-Fun",
"path": "Facebook Hacker Cup/findthemin.py",
"copies": "1",
"size": "4040",
"license": "mit",
"hash": 3013382388439178000,
"line_mean": 27.6524822695,
"line_max": 82,
"alpha_frac": 0.495049505,
"autogenerated": false,
"ratio": 3.3087633087633086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4303812813763308,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import (
dropwhile,
takewhile,
islice,
count,
product,
chain,
starmap,
filterfalse,
)
import collections
import types
from functional.execution import ExecutionStrategies
#: Defines a Transformation from a name, function, and execution_strategies
Transformation = collections.namedtuple(
"Transformation", ["name", "function", "execution_strategies"]
)
#: Cache transformation
CACHE_T = Transformation("cache", None, None)
def name(function):
"""
Retrieve a pretty name for the function
:param function: function to get name from
:return: pretty name
"""
if isinstance(function, types.FunctionType):
return function.__name__
else:
return str(function)
def map_t(func):
"""
Transformation for Sequence.map
:param func: map function
:return: transformation
"""
return Transformation(
"map({0})".format(name(func)),
partial(map, func),
{ExecutionStrategies.PARALLEL},
)
def select_t(func):
"""
Transformation for Sequence.select
:param func: select function
:return: transformation
"""
return Transformation(
"select({0})".format(name(func)),
partial(map, func),
{ExecutionStrategies.PARALLEL},
)
def starmap_t(func):
"""
Transformation for Sequence.starmap and Sequence.smap
:param func: starmap function
:return: transformation
"""
return Transformation(
"starmap({})".format(name(func)),
partial(starmap, func),
{ExecutionStrategies.PARALLEL},
)
def filter_t(func):
"""
Transformation for Sequence.filter
:param func: filter function
:return: transformation
"""
return Transformation(
"filter({0})".format(name(func)),
partial(filter, func),
{ExecutionStrategies.PARALLEL},
)
def where_t(func):
"""
Transformation for Sequence.where
:param func: where function
:return: transformation
"""
return Transformation(
"where({0})".format(name(func)),
partial(filter, func),
{ExecutionStrategies.PARALLEL},
)
def filter_not_t(func):
"""
Transformation for Sequence.filter_not
:param func: filter_not function
:return: transformation
"""
return Transformation(
"filter_not({0})".format(name(func)),
partial(filterfalse, func),
{ExecutionStrategies.PARALLEL},
)
def reversed_t():
"""
Transformation for Sequence.reverse
:return: transformation
"""
return Transformation("reversed", reversed, [ExecutionStrategies.PRE_COMPUTE])
def slice_t(start, until):
"""
Transformation for Sequence.slice
:param start: start index
:param until: until index (does not include element at until)
:return: transformation
"""
return Transformation(
"slice({0}, {1})".format(start, until),
lambda sequence: islice(sequence, start, until),
None,
)
def distinct_t():
"""
Transformation for Sequence.distinct
:return: transformation
"""
def distinct(sequence):
return iter(set(sequence))
return Transformation("distinct", distinct, None)
def distinct_by_t(func):
"""
Transformation for Sequence.distinct_by
:param func: distinct_by function
:return: transformation
"""
def distinct_by(sequence):
distinct_lookup = {}
for element in sequence:
key = func(element)
if key not in distinct_lookup:
distinct_lookup[key] = element
return distinct_lookup.values()
return Transformation("distinct_by({0})".format(name(func)), distinct_by, None)
def sorted_t(key=None, reverse=False):
"""
Transformation for Sequence.sorted
:param key: key to sort by
:param reverse: reverse or not
:return: transformation
"""
return Transformation(
"sorted", lambda sequence: sorted(sequence, key=key, reverse=reverse), None
)
def order_by_t(func):
"""
Transformation for Sequence.order_by
:param func: order_by function
:return: transformation
"""
return Transformation(
"order_by({0})".format(name(func)),
lambda sequence: sorted(sequence, key=func),
None,
)
def drop_right_t(n):
"""
Transformation for Sequence.drop_right
:param n: number to drop from right
:return: transformation
"""
if n <= 0:
end_index = None
else:
end_index = -n
return Transformation(
"drop_right({0})".format(n), lambda sequence: sequence[:end_index], None
)
def drop_t(n):
"""
Transformation for Sequence.drop
:param n: number to drop from left
:return: transformation
"""
return Transformation(
"drop({0})".format(n), lambda sequence: islice(sequence, n, None), None
)
def drop_while_t(func):
"""
Transformation for Sequence.drop_while
:param func: drops while func is true
:return: transformation
"""
return Transformation(
"drop_while({0})".format(name(func)), partial(dropwhile, func), None
)
def take_t(n):
"""
Transformation for Sequence.take
:param n: number to take
:return: transformation
"""
return Transformation(
"take({0})".format(n), lambda sequence: islice(sequence, 0, n), None
)
def take_while_t(func):
"""
Transformation for Sequence.take_while
:param func: takes while func is True
:return: transformation
"""
return Transformation(
"take_while({0})".format(name(func)), partial(takewhile, func), None
)
def flat_map_impl(func, sequence):
"""
Implementation for flat_map_t
:param func: function to map
:param sequence: sequence to flat_map over
:return: flat_map generator
"""
for element in sequence:
for value in func(element):
yield value
def flat_map_t(func):
"""
Transformation for Sequence.flat_map
:param func: function to flat_map
:return: transformation
"""
return Transformation(
"flat_map({0})".format(name(func)),
partial(flat_map_impl, func),
{ExecutionStrategies.PARALLEL},
)
def flatten_t():
"""
Transformation for Sequence.flatten
:return: transformation
"""
return Transformation(
"flatten", partial(flat_map_impl, lambda x: x), {ExecutionStrategies.PARALLEL}
)
def zip_t(zip_sequence):
"""
Transformation for Sequence.zip
:param zip_sequence: sequence to zip with
:return: transformation
"""
return Transformation(
"zip(<sequence>)", lambda sequence: zip(sequence, zip_sequence), None
)
def zip_with_index_t(start):
"""
Transformation for Sequence.zip_with_index
:return: transformation
"""
return Transformation(
"zip_with_index", lambda sequence: zip(sequence, count(start=start)), None
)
def enumerate_t(start):
"""
Transformation for Sequence.enumerate
:param start: start index for enumerate
:return: transformation
"""
return Transformation(
"enumerate", lambda sequence: enumerate(sequence, start=start), None
)
def cartesian_t(iterables, repeat):
"""
Transformation for Sequence.cartesian
:param iterables: elements for cartesian product
:param repeat: how many times to repeat iterables
:return: transformation
"""
return Transformation(
"cartesian", lambda sequence: product(sequence, *iterables, repeat=repeat), None
)
def init_t():
"""
Transformation for Sequence.init
:return: transformation
"""
return Transformation(
"init", lambda sequence: sequence[:-1], {ExecutionStrategies.PRE_COMPUTE}
)
def tail_t():
"""
Transformation for Sequence.tail
:return: transformation
"""
return Transformation("tail", lambda sequence: islice(sequence, 1, None), None)
def inits_t(wrap):
"""
Transformation for Sequence.inits
:param wrap: wrap children values with this
:return: transformation
"""
return Transformation(
"inits",
lambda sequence: [
wrap(sequence[:i]) for i in reversed(range(len(sequence) + 1))
],
{ExecutionStrategies.PRE_COMPUTE},
)
def tails_t(wrap):
"""
Transformation for Sequence.tails
:param wrap: wrap children values with this
:return: transformation
"""
return Transformation(
"tails",
lambda sequence: [wrap(sequence[i:]) for i in range(len(sequence) + 1)],
{ExecutionStrategies.PRE_COMPUTE},
)
def union_t(other):
"""
Transformation for Sequence.union
:param other: sequence to union with
:return: transformation
"""
return Transformation("union", lambda sequence: set(sequence).union(other), None)
def intersection_t(other):
"""
Transformation for Sequence.intersection
:param other: sequence to intersect with
:return: transformation
"""
return Transformation(
"intersection", lambda sequence: set(sequence).intersection(other), None
)
def difference_t(other):
"""
Transformation for Sequence.difference
:param other: sequence to different with
:return: transformation
"""
return Transformation(
"difference", lambda sequence: set(sequence).difference(other), None
)
def symmetric_difference_t(other):
"""
Transformation for Sequence.symmetric_difference
:param other: sequence to symmetric_difference with
:return: transformation
"""
return Transformation(
"symmetric_difference",
lambda sequence: set(sequence).symmetric_difference(other),
None,
)
def group_by_key_impl(sequence):
"""
Implementation for group_by_key_t
:param sequence: sequence to group
:return: grouped sequence
"""
result = {}
for element in sequence:
if result.get(element[0]):
result.get(element[0]).append(element[1])
else:
result[element[0]] = [element[1]]
return result.items()
def group_by_key_t():
"""
Transformation for Sequence.group_by_key
:return: transformation
"""
return Transformation("group_by_key", group_by_key_impl, None)
def reduce_by_key_impl(func, sequence):
"""
Implementation for reduce_by_key_t
:param func: reduce function
:param sequence: sequence to reduce
:return: reduced sequence
"""
result = {}
for key, value in sequence:
if key in result:
result[key] = func(result[key], value)
else:
result[key] = value
return result.items()
def reduce_by_key_t(func):
"""
Transformation for Sequence.reduce_by_key
:param func: reduce function
:return: transformation
"""
return Transformation(
"reduce_by_key({0})".format(name(func)), partial(reduce_by_key_impl, func), None
)
def accumulate_impl(func, sequence):
# pylint: disable=no-name-in-module
"""
Implementation for accumulate
:param sequence: sequence to accumulate
:param func: accumulate function
"""
from itertools import accumulate
return accumulate(sequence, func)
def accumulate_t(func):
"""
Transformation for Sequence.accumulate
"""
return Transformation(
"accumulate({0})".format(name(func)), partial(accumulate_impl, func), None
)
def count_by_key_impl(sequence):
"""
Implementation for count_by_key_t
:param sequence: sequence of (key, value) pairs
:return: counts by key
"""
counter = collections.Counter()
for key, _ in sequence:
counter[key] += 1
return counter.items()
def count_by_key_t():
"""
Transformation for Sequence.count_by_key
:return: transformation
"""
return Transformation("count_by_key", count_by_key_impl, None)
def count_by_value_impl(sequence):
"""
Implementation for count_by_value_t
:param sequence: sequence of values
:return: counts by value
"""
counter = collections.Counter()
for e in sequence:
counter[e] += 1
return counter.items()
def count_by_value_t():
"""
Transformation for Sequence.count_by_value
:return: transformation
"""
return Transformation("count_by_value", count_by_value_impl, None)
def group_by_impl(func, sequence):
"""
Implementation for group_by_t
:param func: grouping function
:param sequence: sequence to group
:return: grouped sequence
"""
result = {}
for element in sequence:
if result.get(func(element)):
result.get(func(element)).append(element)
else:
result[func(element)] = [element]
return result.items()
def group_by_t(func):
"""
Transformation for Sequence.group_by
:param func: grouping function
:return: transformation
"""
return Transformation(
"group_by({0})".format(name(func)), partial(group_by_impl, func), None
)
def grouped_impl(wrap, size, sequence):
"""
Implementation for grouped_t
:param wrap: wrap children values with this
:param size: size of groups
:param sequence: sequence to group
:return: grouped sequence
"""
iterator = iter(sequence)
try:
while True:
batch = islice(iterator, size)
yield list(chain((wrap(next(batch)),), batch))
except StopIteration:
return
def grouped_t(wrap, size):
"""
Transformation for Sequence.grouped
:param wrap: wrap children values with this
:param size: size of groups
:return: transformation
"""
return Transformation(
"grouped({0})".format(size), partial(grouped_impl, wrap, size), None
)
def sliding_impl(wrap, size, step, sequence):
"""
Implementation for sliding_t
:param wrap: wrap children values with this
:param size: size of window
:param step: step size
:param sequence: sequence to create sliding windows from
:return: sequence of sliding windows
"""
i = 0
n = len(sequence)
while i + size <= n or (step != 1 and i < n):
yield wrap(sequence[i : i + size])
i += step
def sliding_t(wrap, size, step):
"""
Transformation for Sequence.sliding
:param wrap: wrap children values with this
:param size: size of window
:param step: step size
:return: transformation
"""
return Transformation(
"sliding({0}, {1})".format(size, step),
partial(sliding_impl, wrap, size, step),
{ExecutionStrategies.PRE_COMPUTE},
)
def partition_impl(wrap, predicate, sequence):
truthy_partition = []
falsy_partition = []
for e in sequence:
if predicate(e):
truthy_partition.append(e)
else:
falsy_partition.append(e)
return wrap((wrap(truthy_partition), wrap(falsy_partition)))
def partition_t(wrap, func):
"""
Transformation for Sequence.partition
:param wrap: wrap children values with this
:param func: partition function
:return: transformation
"""
return Transformation(
"partition({0})".format(name(func)), partial(partition_impl, wrap, func), None
)
def inner_join_impl(other, sequence):
"""
Implementation for part of join_impl
:param other: other sequence to join with
:param sequence: first sequence to join with
:return: joined sequence
"""
seq_dict = {}
for element in sequence:
seq_dict[element[0]] = element[1]
seq_kv = seq_dict
other_kv = dict(other)
keys = seq_kv.keys() if len(seq_kv) < len(other_kv) else other_kv.keys()
result = {}
for k in keys:
if k in seq_kv and k in other_kv:
result[k] = (seq_kv[k], other_kv[k])
return result.items()
def join_impl(other, join_type, sequence):
"""
Implementation for join_t
:param other: other sequence to join with
:param join_type: join type (inner, outer, left, right)
:param sequence: first sequence to join with
:return: joined sequence
"""
if join_type == "inner":
return inner_join_impl(other, sequence)
seq_dict = {}
for element in sequence:
seq_dict[element[0]] = element[1]
seq_kv = seq_dict
other_kv = dict(other)
if join_type == "left":
keys = seq_kv.keys()
elif join_type == "right":
keys = other_kv.keys()
elif join_type == "outer":
keys = set(list(seq_kv.keys()) + list(other_kv.keys()))
else:
raise TypeError("Wrong type of join specified")
result = {}
for k in keys:
result[k] = (seq_kv.get(k), other_kv.get(k))
return result.items()
def join_t(other, join_type):
"""
Transformation for Sequence.join, Sequence.inner_join, Sequence.outer_join, Sequence.right_join,
and Sequence.left_join
:param other: other sequence to join with
:param join_type: join type from left, right, inner, and outer
:return: transformation
"""
return Transformation(
"{0}_join".format(join_type), partial(join_impl, other, join_type), None
)
| {
"repo_name": "EntilZha/ScalaFunctional",
"path": "functional/transformations.py",
"copies": "1",
"size": "17277",
"license": "mit",
"hash": -454844308131215900,
"line_mean": 23.5761024182,
"line_max": 100,
"alpha_frac": 0.6305492852,
"autogenerated": false,
"ratio": 3.9993055555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003181818334521613,
"num_lines": 703
} |
from functools import partial
from itertools import permutations
from typing import (Iterable,
List)
from hypothesis import strategies
from hypothesis.searchstrategy import SearchStrategy
from sqlalchemy import Column
from cetus.types import RecordType
from .utils import values_strategies_by_python_types
MAX_RECORDS_COUNT = 100
def generate_records(columns: List[Column]) -> List[RecordType]:
strategy = records_lists_strategy(columns)
return strategy.example()
def generate_similar_records(columns: List[Column]
) -> List[RecordType]:
strategy = similar_records_lists_strategy(columns)
return strategy.example()
def records_lists_strategy(columns: List[Column]
) -> SearchStrategy:
strategy = records_strategy(columns)
return (strategies.lists(strategy,
min_size=1,
max_size=MAX_RECORDS_COUNT)
.filter(partial(records_satisfy_constraints,
columns=columns)))
def similar_records_lists_strategy(columns: List[Column]
) -> SearchStrategy:
strategy = records_strategy(columns)
record = strategy.example()
similar_records_strategy = strategies.tuples(*[
strategies.just(record[ind])
if not (column.primary_key or column.unique)
else strategies.one_of(
values_strategies_by_python_types[column.type.python_type],
strategies.none())
if column.nullable
else values_strategies_by_python_types[column.type.python_type]
for ind, column in enumerate(columns)
])
return (strategies.lists(similar_records_strategy,
min_size=2,
max_size=MAX_RECORDS_COUNT)
.filter(partial(records_satisfy_constraints,
columns=columns)))
def records_strategy(columns: Iterable[Column]
) -> SearchStrategy:
return strategies.tuples(*[
strategies.one_of(
values_strategies_by_python_types[column.type.python_type],
strategies.none())
if column.nullable
else values_strategies_by_python_types[column.type.python_type]
for column in columns])
def records_satisfy_constraints(records: List[RecordType],
columns: List[Column]
) -> bool:
for column_ind, column in enumerate(columns):
if not (column.unique or column.primary_key):
continue
records_unique_column_fields_are_not_unique = any(
record[column_ind] == other_record[column_ind]
for record, other_record in permutations(records, r=2))
if records_unique_column_fields_are_not_unique:
return False
return True
| {
"repo_name": "lycantropos/cetus",
"path": "tests/strategies/records.py",
"copies": "1",
"size": "2896",
"license": "mit",
"hash": -1413063946963916300,
"line_mean": 35.2,
"line_max": 71,
"alpha_frac": 0.6149861878,
"autogenerated": false,
"ratio": 4.4829721362229105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 80
} |
from functools import partial
from itertools import permutations
import numpy as np
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba import jit, njit, from_dtype, typeof
from numba.core.errors import TypingError
from numba.core import types, errors
from numba.tests.support import (TestCase, MemoryLeakMixin, CompilationCache,
tag)
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
no_pyobj_flags = Flags()
no_pyobj_flags.nrt = True
def from_generic(pyfuncs_to_use):
"""Decorator for generic check functions.
Iterates over 'pyfuncs_to_use', calling 'func' with the iterated
item as first argument. Example:
@from_generic(numpy_array_reshape, array_reshape)
def check_only_shape(pyfunc, arr, shape, expected_shape):
# Only check Numba result to avoid Numpy bugs
self.memory_leak_setup()
got = generic_run(pyfunc, arr, shape)
self.assertEqual(got.shape, expected_shape)
self.assertEqual(got.size, arr.size)
del got
self.memory_leak_teardown()
"""
def decorator(func):
def result(*args, **kwargs):
return [func(pyfunc, *args, **kwargs) for pyfunc in pyfuncs_to_use]
return result
return decorator
def array_reshape(arr, newshape):
return arr.reshape(newshape)
def numpy_array_reshape(arr, newshape):
return np.reshape(arr, newshape)
def flatten_array(a):
return a.flatten()
def ravel_array(a):
return a.ravel()
def ravel_array_size(a):
return a.ravel().size
def numpy_ravel_array(a):
return np.ravel(a)
def transpose_array(a):
return a.transpose()
def numpy_transpose_array(a):
return np.transpose(a)
def numpy_transpose_array_axes_kwarg(arr, axes):
return np.transpose(arr, axes=axes)
def numpy_transpose_array_axes_kwarg_copy(arr, axes):
return np.transpose(arr, axes=axes).copy()
def array_transpose_axes(arr, axes):
return arr.transpose(axes)
def array_transpose_axes_copy(arr, axes):
return arr.transpose(axes).copy()
def transpose_issue_4708(m, n):
r1 = np.reshape(np.arange(m * n * 3), (m, 3, n))
r2 = np.reshape(np.arange(n * 3), (n, 3))
r_dif = (r1 - r2.T).T
r_dif = np.transpose(r_dif, (2, 0, 1))
z = r_dif + 1
return z
def squeeze_array(a):
return a.squeeze()
def expand_dims(a, axis):
return np.expand_dims(a, axis)
def atleast_1d(*args):
return np.atleast_1d(*args)
def atleast_2d(*args):
return np.atleast_2d(*args)
def atleast_3d(*args):
return np.atleast_3d(*args)
def as_strided1(a):
# as_strided() with implicit shape
strides = (a.strides[0] // 2,) + a.strides[1:]
return np.lib.stride_tricks.as_strided(a, strides=strides)
def as_strided2(a):
# Rolling window example as in https://github.com/numba/numba/issues/1884
window = 3
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def add_axis2(a):
return a[np.newaxis, :]
def bad_index(arr, arr2d):
x = arr.x,
y = arr.y
# note that `x` is a tuple, which causes a new axis to be created.
arr2d[x, y] = 1.0
def bad_float_index(arr):
# 2D index required for this function because 1D index
# fails typing
return arr[1, 2.0]
def numpy_fill_diagonal(arr, val, wrap=False):
return np.fill_diagonal(arr, val, wrap)
def numpy_shape(arr):
return np.shape(arr)
def numpy_flatnonzero(a):
return np.flatnonzero(a)
def numpy_argwhere(a):
return np.argwhere(a)
class TestArrayManipulation(MemoryLeakMixin, TestCase):
"""
Check shape-changing operations on arrays.
"""
def setUp(self):
super(TestArrayManipulation, self).setUp()
self.ccache = CompilationCache()
def test_array_reshape(self):
pyfuncs_to_use = [array_reshape, numpy_array_reshape]
def generic_run(pyfunc, arr, shape):
cres = compile_isolated(pyfunc, (typeof(arr), typeof(shape)))
return cres.entry_point(arr, shape)
@from_generic(pyfuncs_to_use)
def check(pyfunc, arr, shape):
expected = pyfunc(arr, shape)
self.memory_leak_setup()
got = generic_run(pyfunc, arr, shape)
self.assertPreciseEqual(got, expected)
del got
self.memory_leak_teardown()
@from_generic(pyfuncs_to_use)
def check_only_shape(pyfunc, arr, shape, expected_shape):
# Only check Numba result to avoid Numpy bugs
self.memory_leak_setup()
got = generic_run(pyfunc, arr, shape)
self.assertEqual(got.shape, expected_shape)
self.assertEqual(got.size, arr.size)
del got
self.memory_leak_teardown()
@from_generic(pyfuncs_to_use)
def check_err_shape(pyfunc, arr, shape):
with self.assertRaises(NotImplementedError) as raises:
generic_run(pyfunc, arr, shape)
self.assertEqual(str(raises.exception),
"incompatible shape for array")
@from_generic(pyfuncs_to_use)
def check_err_size(pyfunc, arr, shape):
with self.assertRaises(ValueError) as raises:
generic_run(pyfunc, arr, shape)
self.assertEqual(str(raises.exception),
"total size of new array must be unchanged")
@from_generic(pyfuncs_to_use)
def check_err_multiple_negative(pyfunc, arr, shape):
with self.assertRaises(ValueError) as raises:
generic_run(pyfunc, arr, shape)
self.assertEqual(str(raises.exception),
"multiple negative shape values")
# C-contiguous
arr = np.arange(24)
check(arr, (24,))
check(arr, (4, 6))
check(arr, (8, 3))
check(arr, (8, 1, 3))
check(arr, (1, 8, 1, 1, 3, 1))
arr = np.arange(24).reshape((2, 3, 4))
check(arr, (24,))
check(arr, (4, 6))
check(arr, (8, 3))
check(arr, (8, 1, 3))
check(arr, (1, 8, 1, 1, 3, 1))
check_err_size(arr, ())
check_err_size(arr, (25,))
check_err_size(arr, (8, 4))
arr = np.arange(24).reshape((1, 8, 1, 1, 3, 1))
check(arr, (24,))
check(arr, (4, 6))
check(arr, (8, 3))
check(arr, (8, 1, 3))
# F-contiguous
arr = np.arange(24).reshape((2, 3, 4)).T
check(arr, (4, 3, 2))
check(arr, (1, 4, 1, 3, 1, 2, 1))
check_err_shape(arr, (2, 3, 4))
check_err_shape(arr, (6, 4))
check_err_shape(arr, (2, 12))
# Test negative shape value
arr = np.arange(25).reshape(5,5)
check(arr, -1)
check(arr, (-1,))
check(arr, (-1, 5))
check(arr, (5, -1, 5))
check(arr, (5, 5, -1))
check_err_size(arr, (-1, 4))
check_err_multiple_negative(arr, (-1, -2, 5, 5))
check_err_multiple_negative(arr, (5, 5, -1, -1))
# 0-sized arrays
def check_empty(arr):
check(arr, 0)
check(arr, (0,))
check(arr, (1, 0, 2))
check(arr, (0, 55, 1, 0, 2))
# -1 is buggy in Numpy with 0-sized arrays
check_only_shape(arr, -1, (0,))
check_only_shape(arr, (-1,), (0,))
check_only_shape(arr, (0, -1), (0, 0))
check_only_shape(arr, (4, -1), (4, 0))
check_only_shape(arr, (-1, 0, 4), (0, 0, 4))
check_err_size(arr, ())
check_err_size(arr, 1)
check_err_size(arr, (1, 2))
arr = np.array([])
check_empty(arr)
check_empty(arr.reshape((3, 2, 0)))
# Exceptions leak references
self.disable_leak_check()
def test_array_transpose_axes(self):
pyfuncs_to_use = [numpy_transpose_array_axes_kwarg,
numpy_transpose_array_axes_kwarg_copy,
array_transpose_axes,
array_transpose_axes_copy]
def run(pyfunc, arr, axes):
cres = self.ccache.compile(pyfunc, (typeof(arr), typeof(axes)))
return cres.entry_point(arr, axes)
@from_generic(pyfuncs_to_use)
def check(pyfunc, arr, axes):
expected = pyfunc(arr, axes)
got = run(pyfunc, arr, axes)
self.assertPreciseEqual(got, expected)
self.assertEqual(got.flags.f_contiguous,
expected.flags.f_contiguous)
self.assertEqual(got.flags.c_contiguous,
expected.flags.c_contiguous)
@from_generic(pyfuncs_to_use)
def check_err_axis_repeated(pyfunc, arr, axes):
with self.assertRaises(ValueError) as raises:
run(pyfunc, arr, axes)
self.assertEqual(str(raises.exception),
"repeated axis in transpose")
@from_generic(pyfuncs_to_use)
def check_err_axis_oob(pyfunc, arr, axes):
with self.assertRaises(ValueError) as raises:
run(pyfunc, arr, axes)
self.assertEqual(str(raises.exception),
"axis is out of bounds for array of given dimension")
@from_generic(pyfuncs_to_use)
def check_err_invalid_args(pyfunc, arr, axes):
with self.assertRaises((TypeError, TypingError)):
run(pyfunc, arr, axes)
arrs = [np.arange(24),
np.arange(24).reshape(4, 6),
np.arange(24).reshape(2, 3, 4),
np.arange(24).reshape(1, 2, 3, 4),
np.arange(64).reshape(8, 4, 2)[::3,::2,:]]
for i in range(len(arrs)):
# First check `None`, the default, which is to reverse dims
check(arrs[i], None)
# Check supplied axis permutations
for axes in permutations(tuple(range(arrs[i].ndim))):
ndim = len(axes)
neg_axes = tuple([x - ndim for x in axes])
check(arrs[i], axes)
check(arrs[i], neg_axes)
@from_generic([transpose_issue_4708])
def check_issue_4708(pyfunc, m, n):
expected = pyfunc(m, n)
got = njit(pyfunc)(m, n)
# values in arrays are equals,
# but stronger assertions not hold (layout and strides equality)
np.testing.assert_equal(got, expected)
check_issue_4708(3, 2)
check_issue_4708(2, 3)
check_issue_4708(5, 4)
# Exceptions leak references
self.disable_leak_check()
check_err_invalid_args(arrs[1], "foo")
check_err_invalid_args(arrs[1], ("foo",))
check_err_invalid_args(arrs[1], 5.3)
check_err_invalid_args(arrs[2], (1.2, 5))
check_err_axis_repeated(arrs[1], (0, 0))
check_err_axis_repeated(arrs[2], (2, 0, 0))
check_err_axis_repeated(arrs[3], (3, 2, 1, 1))
check_err_axis_oob(arrs[0], (1,))
check_err_axis_oob(arrs[0], (-2,))
check_err_axis_oob(arrs[1], (0, 2))
check_err_axis_oob(arrs[1], (-3, 2))
check_err_axis_oob(arrs[1], (0, -3))
check_err_axis_oob(arrs[2], (3, 1, 2))
check_err_axis_oob(arrs[2], (-4, 1, 2))
check_err_axis_oob(arrs[3], (3, 1, 2, 5))
check_err_axis_oob(arrs[3], (3, 1, 2, -5))
with self.assertRaises(TypingError) as e:
jit(nopython=True)(numpy_transpose_array)((np.array([0, 1]),))
self.assertIn("np.transpose does not accept tuples",
str(e.exception))
def test_expand_dims(self):
pyfunc = expand_dims
def run(arr, axis):
cres = self.ccache.compile(pyfunc, (typeof(arr), typeof(axis)))
return cres.entry_point(arr, axis)
def check(arr, axis):
expected = pyfunc(arr, axis)
self.memory_leak_setup()
got = run(arr, axis)
self.assertPreciseEqual(got, expected)
del got
self.memory_leak_teardown()
def check_all_axes(arr):
for axis in range(-arr.ndim - 1, arr.ndim + 1):
check(arr, axis)
# 1d
arr = np.arange(5)
check_all_axes(arr)
# 3d (C, F, A)
arr = np.arange(24).reshape((2, 3, 4))
check_all_axes(arr)
check_all_axes(arr.T)
check_all_axes(arr[::-1])
# 0d
arr = np.array(42)
check_all_axes(arr)
def check_atleast_nd(self, pyfunc, cfunc):
def check_result(got, expected):
# We would like to check the result has the same contiguity,
# but we can't rely on the "flags" attribute when there are
# 1-sized dimensions.
self.assertStridesEqual(got, expected)
self.assertPreciseEqual(got.flatten(), expected.flatten())
def check_single(arg):
check_result(cfunc(arg), pyfunc(arg))
def check_tuple(*args):
expected_tuple = pyfunc(*args)
got_tuple = cfunc(*args)
self.assertEqual(len(got_tuple), len(expected_tuple))
for got, expected in zip(got_tuple, expected_tuple):
check_result(got, expected)
# 0d
a1 = np.array(42)
a2 = np.array(5j)
check_single(a1)
check_tuple(a1, a2)
# 1d
b1 = np.arange(5)
b2 = np.arange(6) + 1j
b3 = b1[::-1]
check_single(b1)
check_tuple(b1, b2, b3)
# 2d
c1 = np.arange(6).reshape((2, 3))
c2 = c1.T
c3 = c1[::-1]
check_single(c1)
check_tuple(c1, c2, c3)
# 3d
d1 = np.arange(24).reshape((2, 3, 4))
d2 = d1.T
d3 = d1[::-1]
check_single(d1)
check_tuple(d1, d2, d3)
# 4d
e = np.arange(16).reshape((2, 2, 2, 2))
check_single(e)
# mixed dimensions
check_tuple(a1, b2, c3, d2)
def test_atleast_1d(self):
pyfunc = atleast_1d
cfunc = jit(nopython=True)(pyfunc)
self.check_atleast_nd(pyfunc, cfunc)
def test_atleast_2d(self):
pyfunc = atleast_2d
cfunc = jit(nopython=True)(pyfunc)
self.check_atleast_nd(pyfunc, cfunc)
def test_atleast_3d(self):
pyfunc = atleast_3d
cfunc = jit(nopython=True)(pyfunc)
self.check_atleast_nd(pyfunc, cfunc)
def check_as_strided(self, pyfunc):
def run(arr):
cres = self.ccache.compile(pyfunc, (typeof(arr),))
return cres.entry_point(arr)
def check(arr):
expected = pyfunc(arr)
got = run(arr)
self.assertPreciseEqual(got, expected)
arr = np.arange(24)
check(arr)
check(arr.reshape((6, 4)))
check(arr.reshape((4, 1, 6)))
def test_as_strided(self):
self.check_as_strided(as_strided1)
self.check_as_strided(as_strided2)
def test_flatten_array(self, flags=enable_pyobj_flags, layout='C'):
a = np.arange(9).reshape(3, 3)
if layout == 'F':
a = a.T
pyfunc = flatten_array
arraytype1 = typeof(a)
if layout == 'A':
# Force A layout
arraytype1 = arraytype1.copy(layout='A')
self.assertEqual(arraytype1.layout, layout)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_flatten_array_npm(self):
self.test_flatten_array(flags=no_pyobj_flags)
self.test_flatten_array(flags=no_pyobj_flags, layout='F')
self.test_flatten_array(flags=no_pyobj_flags, layout='A')
def test_ravel_array(self, flags=enable_pyobj_flags):
def generic_check(pyfunc, a, assume_layout):
# compile
arraytype1 = typeof(a)
self.assertEqual(arraytype1.layout, assume_layout)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
# Check result matches
np.testing.assert_equal(expected, got)
# Check copying behavior
py_copied = (a.ctypes.data != expected.ctypes.data)
nb_copied = (a.ctypes.data != got.ctypes.data)
self.assertEqual(py_copied, assume_layout != 'C')
self.assertEqual(py_copied, nb_copied)
check_method = partial(generic_check, ravel_array)
check_function = partial(generic_check, numpy_ravel_array)
def check(*args, **kwargs):
check_method(*args, **kwargs)
check_function(*args, **kwargs)
# Check 2D
check(np.arange(9).reshape(3, 3), assume_layout='C')
check(np.arange(9).reshape(3, 3, order='F'), assume_layout='F')
check(np.arange(18).reshape(3, 3, 2)[:, :, 0], assume_layout='A')
# Check 3D
check(np.arange(18).reshape(2, 3, 3), assume_layout='C')
check(np.arange(18).reshape(2, 3, 3, order='F'), assume_layout='F')
check(np.arange(36).reshape(2, 3, 3, 2)[:, :, :, 0], assume_layout='A')
def test_ravel_array_size(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = ravel_array_size
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_ravel_array_npm(self):
self.test_ravel_array(flags=no_pyobj_flags)
def test_ravel_array_size_npm(self):
self.test_ravel_array_size(flags=no_pyobj_flags)
def test_transpose_array(self, flags=enable_pyobj_flags):
@from_generic([transpose_array, numpy_transpose_array])
def check(pyfunc):
a = np.arange(9).reshape(3, 3)
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
check()
def test_transpose_array_npm(self):
self.test_transpose_array(flags=no_pyobj_flags)
def test_squeeze_array(self, flags=enable_pyobj_flags):
a = np.arange(2 * 1 * 3 * 1 * 4).reshape(2, 1, 3, 1, 4)
pyfunc = squeeze_array
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_squeeze_array_npm(self):
with self.assertRaises(errors.TypingError) as raises:
self.test_squeeze_array(flags=no_pyobj_flags)
self.assertIn("squeeze", str(raises.exception))
def test_add_axis2(self, flags=enable_pyobj_flags):
a = np.arange(9).reshape(3, 3)
pyfunc = add_axis2
arraytype1 = typeof(a)
cr = compile_isolated(pyfunc, (arraytype1,), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(a)
got = cfunc(a)
np.testing.assert_equal(expected, got)
def test_add_axis2_npm(self):
with self.assertTypingError() as raises:
self.test_add_axis2(flags=no_pyobj_flags)
self.assertIn("unsupported array index type none in",
str(raises.exception))
def test_bad_index_npm(self):
with self.assertTypingError() as raises:
arraytype1 = from_dtype(np.dtype([('x', np.int32),
('y', np.int32)]))
arraytype2 = types.Array(types.int32, 2, 'C')
compile_isolated(bad_index, (arraytype1, arraytype2),
flags=no_pyobj_flags)
self.assertIn('unsupported array index type', str(raises.exception))
def test_bad_float_index_npm(self):
with self.assertTypingError() as raises:
compile_isolated(bad_float_index,
(types.Array(types.float64, 2, 'C'),))
self.assertIn('unsupported array index type float64',
str(raises.exception))
def test_fill_diagonal_basic(self):
pyfunc = numpy_fill_diagonal
cfunc = jit(nopython=True)(pyfunc)
def _shape_variations(n):
# square
yield (n, n)
# tall and thin
yield (2 * n, n)
# short and fat
yield (n, 2 * n)
# a bit taller than wide; odd numbers of rows and cols
yield ((2 * n + 1), (2 * n - 1))
# 4d, all dimensions same
yield (n, n, n, n)
# weird edge case
yield (1, 1, 1)
def _val_variations():
yield 1
yield 3.142
yield np.nan
yield -np.inf
yield True
yield np.arange(4)
yield (4,)
yield [8, 9]
yield np.arange(54).reshape(9, 3, 2, 1) # contiguous C
yield np.asfortranarray(np.arange(9).reshape(3, 3)) # contiguous F
yield np.arange(9).reshape(3, 3)[::-1] # non-contiguous
# contiguous arrays
def _multi_dimensional_array_variations(n):
for shape in _shape_variations(n):
yield np.zeros(shape, dtype=np.float64)
yield np.asfortranarray(np.ones(shape, dtype=np.float64))
# non-contiguous arrays
def _multi_dimensional_array_variations_strided(n):
for shape in _shape_variations(n):
tmp = np.zeros(tuple([x * 2 for x in shape]), dtype=np.float64)
slicer = tuple(slice(0, x * 2, 2) for x in shape)
yield tmp[slicer]
def _check_fill_diagonal(arr, val):
for wrap in None, True, False:
a = arr.copy()
b = arr.copy()
if wrap is None:
params = {}
else:
params = {'wrap': wrap}
pyfunc(a, val, **params)
cfunc(b, val, **params)
self.assertPreciseEqual(a, b)
for arr in _multi_dimensional_array_variations(3):
for val in _val_variations():
_check_fill_diagonal(arr, val)
for arr in _multi_dimensional_array_variations_strided(3):
for val in _val_variations():
_check_fill_diagonal(arr, val)
# non-numeric input arrays
arr = np.array([True] * 9).reshape(3, 3)
_check_fill_diagonal(arr, False)
_check_fill_diagonal(arr, [False, True, False])
_check_fill_diagonal(arr, np.array([True, False, True]))
def test_fill_diagonal_exception_cases(self):
pyfunc = numpy_fill_diagonal
cfunc = jit(nopython=True)(pyfunc)
val = 1
# Exceptions leak references
self.disable_leak_check()
# first argument unsupported number of dimensions
for a in np.array([]), np.ones(5):
with self.assertRaises(TypingError) as raises:
cfunc(a, val)
assert "The first argument must be at least 2-D" in str(raises.exception)
# multi-dimensional input where dimensions are not all equal
with self.assertRaises(ValueError) as raises:
a = np.zeros((3, 3, 4))
cfunc(a, val)
self.assertEqual("All dimensions of input must be of equal length", str(raises.exception))
# cases where val has incompatible type / value
def _assert_raises(arr, val):
with self.assertRaises(ValueError) as raises:
cfunc(arr, val)
self.assertEqual("Unable to safely conform val to a.dtype", str(raises.exception))
arr = np.zeros((3, 3), dtype=np.int32)
val = np.nan
_assert_raises(arr, val)
val = [3.3, np.inf]
_assert_raises(arr, val)
val = np.array([1, 2, 1e10], dtype=np.int64)
_assert_raises(arr, val)
arr = np.zeros((3, 3), dtype=np.float32)
val = [1.4, 2.6, -1e100]
_assert_raises(arr, val)
val = 1.1e100
_assert_raises(arr, val)
val = np.array([-1e100])
_assert_raises(arr, val)
def test_shape(self):
pyfunc = numpy_shape
cfunc = jit(nopython=True)(pyfunc)
def check(x):
expected = pyfunc(x)
got = cfunc(x)
self.assertPreciseEqual(got, expected)
# check arrays
for t in [(), (1,), (2, 3,), (4, 5, 6)]:
arr = np.empty(t)
check(arr)
# check some types that go via asarray
for t in [1, False, [1,], [[1, 2,],[3, 4]], (1,), (1, 2, 3)]:
check(arr)
with self.assertRaises(TypingError) as raises:
cfunc('a')
self.assertIn("The argument to np.shape must be array-like",
str(raises.exception))
def test_flatnonzero_basic(self):
pyfunc = numpy_flatnonzero
cfunc = jit(nopython=True)(pyfunc)
def a_variations():
yield np.arange(-5, 5)
yield np.full(5, fill_value=0)
yield np.array([])
a = self.random.randn(100)
a[np.abs(a) > 0.2] = 0.0
yield a
yield a.reshape(5, 5, 4)
yield a.reshape(50, 2, order='F')
yield a.reshape(25, 4)[1::2]
yield a * 1j
for a in a_variations():
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def test_argwhere_basic(self):
pyfunc = numpy_argwhere
cfunc = jit(nopython=True)(pyfunc)
def a_variations():
yield np.arange(-5, 5) > 2
yield np.full(5, fill_value=0)
yield np.full(5, fill_value=1)
yield np.array([])
yield np.array([-1.0, 0.0, 1.0])
a = self.random.randn(100)
yield a > 0.2
yield a.reshape(5, 5, 4) > 0.5
yield a.reshape(50, 2, order='F') > 0.5
yield a.reshape(25, 4)[1::2] > 0.5
yield a == a - 1
yield a > -a
for a in a_variations():
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
@staticmethod
def array_like_variations():
yield ((1.1, 2.2), (3.3, 4.4), (5.5, 6.6))
yield (0.0, 1.0, 0.0, -6.0)
yield ([0, 1], [2, 3])
yield ()
yield np.nan
yield 0
yield 1
yield False
yield True
yield (True, False, True)
yield 2 + 1j
# the following are not array-like, but NumPy does not raise
yield None
yield 'a_string'
yield ''
def test_flatnonzero_array_like(self):
pyfunc = numpy_flatnonzero
cfunc = jit(nopython=True)(pyfunc)
for a in self.array_like_variations():
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def test_argwhere_array_like(self):
pyfunc = numpy_argwhere
cfunc = jit(nopython=True)(pyfunc)
for a in self.array_like_variations():
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "stuartarchibald/numba",
"path": "numba/tests/test_array_manipulation.py",
"copies": "5",
"size": "27786",
"license": "bsd-2-clause",
"hash": 6846337517345113000,
"line_mean": 31.1597222222,
"line_max": 102,
"alpha_frac": 0.5493413949,
"autogenerated": false,
"ratio": 3.4375850550538165,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00038904723642569,
"num_lines": 864
} |
from functools import partial
from itertools import product
from .core import make_vjp, make_jvp, vspace
from .util import subvals
from .wrap_util import unary_to_nary, get_name
TOL = 1e-6
RTOL = 1e-6
def scalar_close(a, b):
return abs(a - b) < TOL or abs(a - b) / abs(a + b) < RTOL
EPS = 1e-6
def make_numerical_jvp(f, x):
y = f(x)
x_vs, y_vs = vspace(x), vspace(y)
def jvp(v):
# (f(x + v*eps/2) - f(x - v*eps/2)) / eps
f_x_plus = f(x_vs.add(x, x_vs.scalar_mul(v, EPS/2)))
f_x_minus = f(x_vs.add(x, x_vs.scalar_mul(v, -EPS/2)))
neg_f_x_minus = y_vs.scalar_mul(f_x_minus, -1.0)
return y_vs.scalar_mul(y_vs.add(f_x_plus, neg_f_x_minus), 1.0 / EPS)
return jvp
def check_vjp(f, x):
vjp, y = make_vjp(f, x)
jvp = make_numerical_jvp(f, x)
x_vs, y_vs = vspace(x), vspace(y)
x_v, y_v = x_vs.randn(), y_vs.randn()
vjp_y = x_vs.covector(vjp(y_vs.covector(y_v)))
assert vspace(vjp_y) == x_vs
vjv_exact = x_vs.inner_prod(x_v, vjp_y)
vjv_numeric = y_vs.inner_prod(y_v, jvp(x_v))
assert scalar_close(vjv_numeric, vjv_exact), \
("Derivative (VJP) check of {} failed with arg {}:\n"
"analytic: {}\nnumeric: {}".format(
get_name(f), x, vjv_exact, vjv_numeric))
def check_jvp(f, x):
jvp = make_jvp(f, x)
jvp_numeric = make_numerical_jvp(f, x)
x_v = vspace(x).randn()
check_equivalent(jvp(x_v)[1], jvp_numeric(x_v))
def check_equivalent(x, y):
x_vs, y_vs = vspace(x), vspace(y)
assert x_vs == y_vs, "VSpace mismatch:\nx: {}\ny: {}".format(x_vs, y_vs)
v = x_vs.randn()
assert scalar_close(x_vs.inner_prod(x, v), x_vs.inner_prod(y, v)), \
"Value mismatch:\nx: {}\ny: {}".format(x, y)
@unary_to_nary
def check_grads(f, x, modes=['fwd', 'rev'], order=2):
assert all(m in ['fwd', 'rev'] for m in modes)
if 'fwd' in modes:
check_jvp(f, x)
if order > 1:
grad_f = lambda x, v: make_jvp(f, x)(v)[1]
grad_f.__name__ = 'jvp_{}'.format(get_name(f))
v = vspace(x).randn()
check_grads(grad_f, (0, 1), modes, order=order-1)(x, v)
if 'rev' in modes:
check_vjp(f, x)
if order > 1:
grad_f = lambda x, v: make_vjp(f, x)[0](v)
grad_f.__name__ = 'vjp_{}'.format(get_name(f))
v = vspace(f(x)).randn()
check_grads(grad_f, (0, 1), modes, order=order-1)(x, v)
def combo_check(fun, *args, **kwargs):
# Tests all combinations of args and kwargs given.
_check_grads = lambda f: check_grads(f, *args, **kwargs)
def _combo_check(*args, **kwargs):
kwarg_key_vals = [[(k, x) for x in xs] for k, xs in kwargs.items()]
for _args in product(*args):
for _kwargs in product(*kwarg_key_vals):
_check_grads(fun)(*_args, **dict(_kwargs))
return _combo_check
| {
"repo_name": "HIPS/autograd",
"path": "autograd/test_util.py",
"copies": "3",
"size": "2881",
"license": "mit",
"hash": -5422958327604389000,
"line_mean": 35.9358974359,
"line_max": 76,
"alpha_frac": 0.5501561958,
"autogenerated": false,
"ratio": 2.5117698343504795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45619260301504794,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import product
from django import forms
from django.core.exceptions import ValidationError
from django.db import transaction
from django.forms import BaseInlineFormSet
from django.forms.widgets import flatatt
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from pretix.base.forms import VersionedModelForm
from pretix.base.models import ItemVariation, Item
class I18nInlineFormSet(BaseInlineFormSet):
"""
This is equivalent to a normal BaseInlineFormset, but cares for the special needs
of I18nForms (see there for more information).
"""
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event', None)
super().__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
kwargs['event'] = self.event
return super()._construct_form(i, **kwargs)
class TolerantFormsetModelForm(VersionedModelForm):
"""
This is equivalent to a normal VersionedModelForm, but works around a problem that
arises when the form is used inside a FormSet with can_order=True and django-formset-js
enabled. In this configuration, even empty "extra" forms might have an ORDER value
sent and Django marks the form as empty and raises validation errors because the other
fields have not been filled.
"""
def has_changed(self) -> bool:
"""
Returns True if data differs from initial. Contrary to the default
implementation, the ORDER field is being ignored.
"""
for name, field in self.fields.items():
if name == 'ORDER' or name == 'id':
continue
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
if callable(initial_value):
initial_value = initial_value()
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except forms.ValidationError:
# Always assume data has changed if validation fails.
self._changed_data.append(name)
continue
# We're using a private API of Django here. This is not nice, but no problem as it seems
# like this will become a public API in future Django.
if field._has_changed(initial_value, data_value):
return True
return False
class RestrictionForm(TolerantFormsetModelForm):
"""
The restriction form provides useful functionality for all forms
representing a restriction instance. To be concret, this form does
the necessary magic to make the 'variations' field work correctly
and look beautiful.
"""
def __init__(self, *args, **kwargs):
if 'item' in kwargs:
self.item = kwargs['item']
del kwargs['item']
super().__init__(*args, **kwargs)
if 'variations' in self.fields and isinstance(self.fields['variations'], VariationsField):
self.fields['variations'].set_item(self.item)
class RestrictionInlineFormset(forms.BaseInlineFormSet):
"""
This is the base class you should use for any formset you return
from a ``restriction_formset`` signal receiver that contains
RestrictionForm objects as its forms, as it correcly handles the
necessary item parameter for the RestrictionForm. While this could
be achieved with a regular formset, this also adds a
``initialized_empty_form`` method which is the only way to correctly
render a working empty form for a JavaScript-enabled restriction formset.
"""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
super().__init__(
data, files, instance, save_as_new, prefix, queryset, **kwargs
)
if isinstance(self.instance, Item):
self.queryset = self.queryset.as_of().prefetch_related("variations")
def initialized_empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
item=self.instance
)
self.add_fields(form, None)
return form
def _construct_form(self, i, **kwargs):
kwargs['item'] = self.instance
return super()._construct_form(i, **kwargs)
class Meta:
exclude = ['item']
def selector(values, prop):
# Given an iterable of PropertyValue objects, this will return a
# list of their primary keys, ordered by the primary keys of the
# properties they belong to EXCEPT the value for the property prop2.
# We'll see later why we need this.
return [
v.identity for v in sorted(values, key=lambda v: v.prop.identity)
if v.prop.identity != prop.identity
]
def sort(v, prop):
# Given a list of variations, this will sort them by their position
# on the x-axis
return v[prop.identity].sortkey
class VariationsFieldRenderer(forms.widgets.CheckboxFieldRenderer):
"""
This is the default renderer for a VariationsField. Based on the choice input class
this renders a list or a matrix of checkboxes/radio buttons/...
"""
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def render(self):
"""
Outputs a grid for this set of choice fields.
"""
if len(self.choices) == 0:
raise ValueError("Can't handle empty lists")
variations = []
for key, value in self.choices:
value['key'] = key
variations.append(value)
properties = [v.prop for v in variations[0].relevant_values()]
dimension = len(properties)
id_ = self.attrs.get('id', None)
start_tag = format_html('<div class="variations" id="{0}">', id_) if id_ else '<div class="variations">'
output = [start_tag]
# TODO: This is very duplicate to pretixcontrol.views.item.ItemVariations.get_forms()
# Find a common abstraction to avoid the repetition.
if dimension == 0:
output.append(format_html('<em>{0}</em>', _("not applicable")))
elif dimension == 1:
output = self.render_1d(output, variations, properties)
else:
output = self.render_nd(output, variations, properties)
output.append(
('<div class="help-block"><a href="#" class="variations-select-all">{0}</a> · '
'<a href="#" class="variations-select-none">{1}</a></div></div>').format(
_("Select all"),
_("Deselect all")
)
)
return mark_safe('\n'.join(output))
def render_1d(self, output, variations, properties):
output.append('<ul>')
for i, variation in enumerate(variations):
final_attrs = dict(
self.attrs.copy(), type=self.choice_input_class.input_type,
name=self.name, value=variation['key']
)
if variation['key'] in self.value:
final_attrs['checked'] = 'checked'
w = self.choice_input_class(
self.name, self.value, self.attrs.copy(),
(variation['key'], variation[properties[0].identity].value),
i
)
output.append(format_html('<li>{0}</li>', force_text(w)))
output.append('</ul>')
return output
def render_nd(self, output, variations, properties):
# prop1 is the property on all the grid's y-axes
prop1 = properties[0]
prop1v = list(prop1.values.current.all())
# prop2 is the property on all the grid's x-axes
prop2 = properties[1]
prop2v = list(prop2.values.current.all())
# We now iterate over the cartesian product of all the other
# properties which are NOT on the axes of the grid because we
# create one grid for any combination of them.
for gridrow in product(*[prop.values.current.all() for prop in properties[2:]]):
if len(gridrow) > 0:
output.append('<strong>')
output.append(", ".join([value.value for value in gridrow]))
output.append('</strong>')
output.append('<table class="table"><thead><tr><th></th>')
for val2 in prop2v:
output.append(format_html('<th>{0}</th>', val2.value))
output.append('</thead><tbody>')
for val1 in prop1v:
output.append(format_html('<tr><th>{0}</th>', val1.value))
# We are now inside one of the rows of the grid and have to
# select the variations to display in this row. In order to
# achieve this, we use the 'selector' lambda defined above.
# It gives us a normalized, comparable version of a set of
# PropertyValue objects. In this case, we compute the
# selector of our row as the selector of the sum of the
# values defining our grind and the value defining our row.
selection = selector(gridrow + (val1,), prop2)
# We now iterate over all variations who generate the same
# selector as 'selection'.
filtered = [v for v in variations if selector(v.relevant_values(), prop2) == selection]
for variation in sorted(filtered, key=partial(sort, prop=prop2)):
final_attrs = dict(
self.attrs.copy(), type=self.choice_input_class.input_type,
name=self.name, value=variation['key']
)
if variation['key'] in self.value:
final_attrs['checked'] = 'checked'
output.append(format_html('<td><label><input{0} /></label></td>', flatatt(final_attrs)))
output.append('</td>')
output.append('</tbody></table>')
return output
class VariationsCheckboxRenderer(VariationsFieldRenderer):
"""
This is the same as VariationsFieldRenderer but with the choice input class
forced to checkboxes
"""
choice_input_class = forms.widgets.CheckboxChoiceInput
class VariationsSelectMultiple(forms.CheckboxSelectMultiple):
"""
This is the default widget for a VariationsField
"""
renderer = VariationsCheckboxRenderer
_empty_value = []
class VariationsField(forms.ModelMultipleChoiceField):
"""
This form field is intended to be used to let the user select a
variation of a certain item, for example in a restriction plugin.
As this field expects the non-standard keyword parameter ``item``
at initialization time, this is field is normally named ``variations``
and lives inside a ``pretixcontrol.views.forms.RestrictionForm``, which
does some magic to provide this parameter.
"""
def __init__(self, *args, item=None, **kwargs):
self.item = item
if 'widget' not in args or kwargs['widget'] is None:
kwargs['widget'] = VariationsSelectMultiple
super().__init__(*args, **kwargs)
def set_item(self, item: Item):
assert isinstance(item, Item)
self.item = item
self._set_choices(self._get_choices())
def _get_choices(self) -> "list[(str, VariationDict)]":
"""
We can't use a normal QuerySet as there theoretically might be
two types of variations: Some who already have a ItemVariation
object associated with them and some who don't. We therefore use
the item's ``get_all_variations`` method. In the first case, we
use the ItemVariation objects primary key as our choice, key,
in the latter case we use a string constructed from the values
(see VariationDict.key() for implementation details).
"""
if self.item is None:
return ()
variations = self.item.get_all_variations(use_cache=True)
return (
(
v['variation'].identity if 'variation' in v else v.key(),
v
) for v in variations
)
def clean(self, value: "list[int]"):
"""
At cleaning time, we have to clean up the mess we produced with our
_get_choices implementation. In the case of ItemVariation object ids
we don't to anything to them, but if one of the selected items is a
list of PropertyValue objects (see _get_choices), we need to create
a new ItemVariation object for this combination and then add this to
our list of selected items.
"""
if self.item is None:
raise ValueError(
"VariationsField object was not properly initialized. Please"
"use a pretixcontrol.views.forms.RestrictionForm form instead of"
"a plain Django ModelForm"
)
# Standard validation foo
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return []
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
cleaned_value = self._clean_value(value)
qs = self.item.variations.current.filter(identity__in=cleaned_value)
# Re-check for consistency
pks = set(force_text(getattr(o, "identity")) for o in qs)
for val in cleaned_value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(cleaned_value)
return qs
def _clean_value(self, value):
# Build up a cache of variations having an ItemVariation object
# For implementation details, see ItemVariation.get_all_variations()
# which uses a very similar method
all_variations = self.item.variations.all().prefetch_related("values")
variations_cache = {
var.to_variation_dict().identify(): var.identity for var in all_variations
}
cleaned_value = []
# Wrap this in a transaction to prevent strange database state if we
# get a ValidationError half-way through
with transaction.atomic():
for pk in value:
if ":" in pk:
# A combination of PropertyValues was given
# Hash the combination in the same way as in our cache above
key = ",".join([pair.split(":")[1] for pair in sorted(pk.split(","))])
if key in variations_cache:
# An ItemVariation object already exists for this variation,
# so use this. (This might occur if the variation object was
# created _after_ the user loaded the form but _before_ he
# submitted it.)
cleaned_value.append(str(variations_cache[key]))
continue
# No ItemVariation present, create one!
var = ItemVariation()
var.item_id = self.item.identity
var.save()
# Add the values to the ItemVariation object
try:
var.add_values_from_string(pk)
except:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': value},
)
variations_cache[key] = var.identity
cleaned_value.append(str(var.identity))
else:
# An ItemVariation id was given
cleaned_value.append(pk)
return cleaned_value
choices = property(_get_choices, forms.ChoiceField._set_choices)
| {
"repo_name": "Unicorn-rzl/pretix",
"path": "src/pretix/control/forms/__init__.py",
"copies": "1",
"size": "16922",
"license": "apache-2.0",
"hash": 6316455510951900000,
"line_mean": 40.8836633663,
"line_max": 112,
"alpha_frac": 0.597009633,
"autogenerated": false,
"ratio": 4.472905101771081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5569914734771081,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import product
from operator import eq
class StreamMapper:
"""The stream mapper can be used to simplify the process of creating
stream objects from data.
:param cmp: This callable is used to compare each mapping's key
with a value.
"""
def __init__(self, cmp=eq):
self._map = []
self._cmp = cmp
def map(self, key, func, *args, **kwargs):
"""Creates a key-function mapping.
The return value from the function should be either
- A tuple containing a name and stream
- A iterator of tuples containing a name and stream
Any extra arguments will be passed to the function.
"""
self._map.append((key, partial(func, *args, **kwargs)))
def _cmp_filter(self, args):
value, (key, func) = args
return self._cmp(key, value)
def _mapped_func(self, args):
value, (key, func) = args
return func(value)
def __call__(self, values):
"""Runs through each value and transform it with a mapped function."""
values = product(values, self._map)
for value in map(self._mapped_func, filter(self._cmp_filter, values)):
if isinstance(value, tuple) and len(value) == 2:
yield value
else:
try:
if isinstance(value, dict):
yield from value.items()
else:
yield from value
except TypeError:
# Non-iterable returned
continue
| {
"repo_name": "streamlink/streamlink",
"path": "src/streamlink/plugin/api/mapper.py",
"copies": "3",
"size": "1634",
"license": "bsd-2-clause",
"hash": 8431959147377303000,
"line_mean": 31.0392156863,
"line_max": 78,
"alpha_frac": 0.5575275398,
"autogenerated": false,
"ratio": 4.722543352601156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6780070892401157,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import product
from operator import eq
class StreamMapper(object):
"""The stream mapper can be used to simplify the process of creating
stream objects from data.
:param cmp: This callable is used to compare each mapping's key
with a value.
"""
def __init__(self, cmp=eq):
self._map = []
self._cmp = cmp
def map(self, key, func, *args, **kwargs):
"""Creates a key-function mapping.
The return value from the function should be either
- A tuple containing a name and stream
- A iterator of tuples containing a name and stream
Any extra arguments will be passed to the function.
"""
self._map.append((key, partial(func, *args, **kwargs)))
def _cmp_filter(self, args):
value, (key, func) = args
return self._cmp(key, value)
def _mapped_func(self, args):
value, (key, func) = args
return func(value)
def __call__(self, values):
"""Runs through each value and transform it with a mapped function."""
values = product(values, self._map)
for value in map(self._mapped_func, filter(self._cmp_filter, values)):
if isinstance(value, tuple) and len(value) == 2:
yield value
else:
try:
# TODO: Replace with "yield from" when dropping Python 2.
for __ in value:
yield __
except TypeError:
# Non-iterable returned
continue
| {
"repo_name": "intact/livestreamer",
"path": "src/livestreamer/plugin/api/mapper.py",
"copies": "37",
"size": "1625",
"license": "bsd-2-clause",
"hash": -1954877691183692500,
"line_mean": 32.1632653061,
"line_max": 78,
"alpha_frac": 0.5643076923,
"autogenerated": false,
"ratio": 4.62962962962963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 49
} |
from functools import partial
from itertools import product
from pgcli.packages.parseutils.meta import FunctionMetadata, ForeignKey
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
from mock import Mock
import pytest
parametrize = pytest.mark.parametrize
qual = ['if_more_than_one_table', 'always']
no_qual = ['if_more_than_one_table', 'never']
def escape(name):
if not name.islower() or name in ('select', 'localtimestamp'):
return '"' + name + '"'
return name
def completion(display_meta, text, pos=0):
return Completion(text, start_position=pos, display_meta=display_meta)
def function(text, pos=0, display=None):
return Completion(
text,
display=display or text,
start_position=pos,
display_meta='function'
)
def get_result(completer, text, position=None):
position = len(text) if position is None else position
return completer.get_completions(
Document(text=text, cursor_position=position), Mock()
)
def result_set(completer, text, position=None):
return set(get_result(completer, text, position))
# The code below is quivalent to
# def schema(text, pos=0):
# return completion('schema', text, pos)
# and so on
schema = partial(completion, 'schema')
table = partial(completion, 'table')
view = partial(completion, 'view')
column = partial(completion, 'column')
keyword = partial(completion, 'keyword')
datatype = partial(completion, 'datatype')
alias = partial(completion, 'table alias')
name_join = partial(completion, 'name join')
fk_join = partial(completion, 'fk join')
join = partial(completion, 'join')
def wildcard_expansion(cols, pos=-1):
return Completion(
cols, start_position=pos, display_meta='columns', display='*')
class MetaData(object):
def __init__(self, metadata):
self.metadata = metadata
def builtin_functions(self, pos=0):
return [function(f, pos) for f in self.completer.functions]
def builtin_datatypes(self, pos=0):
return [datatype(dt, pos) for dt in self.completer.datatypes]
def keywords(self, pos=0):
return [keyword(kw, pos) for kw in self.completer.keywords_tree.keys()]
def columns(self, tbl, parent='public', typ='tables', pos=0):
if typ == 'functions':
fun = [x for x in self.metadata[typ][parent] if x[0] == tbl][0]
cols = fun[1]
else:
cols = self.metadata[typ][parent][tbl]
return [column(escape(col), pos) for col in cols]
def datatypes(self, parent='public', pos=0):
return [
datatype(escape(x), pos)
for x in self.metadata.get('datatypes', {}).get(parent, [])]
def tables(self, parent='public', pos=0):
return [
table(escape(x), pos)
for x in self.metadata.get('tables', {}).get(parent, [])]
def views(self, parent='public', pos=0):
return [
view(escape(x), pos)
for x in self.metadata.get('views', {}).get(parent, [])]
def functions(self, parent='public', pos=0):
return [
function(
escape(x[0]) + '(' + ', '.join(
arg_name + ' := '
for (arg_name, arg_mode) in zip(x[1], x[3])
if arg_mode in ('b', 'i')
) + ')',
pos,
escape(x[0]) + '(' + ', '.join(
arg_name
for (arg_name, arg_mode) in zip(x[1], x[3])
if arg_mode in ('b', 'i')
) + ')'
)
for x in self.metadata.get('functions', {}).get(parent, [])
]
def schemas(self, pos=0):
schemas = set(sch for schs in self.metadata.values() for sch in schs)
return [schema(escape(s), pos=pos) for s in schemas]
def functions_and_keywords(self, parent='public', pos=0):
return (
self.functions(parent, pos) + self.builtin_functions(pos) +
self.keywords(pos)
)
# Note that the filtering parameters here only apply to the columns
def columns_functions_and_keywords(
self, tbl, parent='public', typ='tables', pos=0
):
return (
self.functions_and_keywords(pos=pos) +
self.columns(tbl, parent, typ, pos)
)
def from_clause_items(self, parent='public', pos=0):
return (
self.functions(parent, pos) + self.views(parent, pos) +
self.tables(parent, pos)
)
def schemas_and_from_clause_items(self, parent='public', pos=0):
return self.from_clause_items(parent, pos) + self.schemas(pos)
def types(self, parent='public', pos=0):
return self.datatypes(parent, pos) + self.tables(parent, pos)
@property
def completer(self):
return self.get_completer()
def get_completers(self, casing):
"""
Returns a function taking three bools `casing`, `filtr`, `aliasing` and
the list `qualify`, all defaulting to None.
Returns a list of completers.
These parameters specify the allowed values for the corresponding
completer parameters, `None` meaning any, i.e. (None, None, None, None)
results in all 24 possible completers, whereas e.g.
(True, False, True, ['never']) results in the one completer with
casing, without `search_path` filtering of objects, with table
aliasing, and without column qualification.
"""
def _cfg(_casing, filtr, aliasing, qualify):
cfg = {'settings': {}}
if _casing:
cfg['casing'] = casing
cfg['settings']['search_path_filter'] = filtr
cfg['settings']['generate_aliases'] = aliasing
cfg['settings']['qualify_columns'] = qualify
return cfg
def _cfgs(casing, filtr, aliasing, qualify):
casings = [True, False] if casing is None else [casing]
filtrs = [True, False] if filtr is None else [filtr]
aliases = [True, False] if aliasing is None else [aliasing]
qualifys = qualify or ['always', 'if_more_than_one_table', 'never']
return [
_cfg(*p) for p in product(casings, filtrs, aliases, qualifys)
]
def completers(casing=None, filtr=None, aliasing=None, qualify=None):
get_comp = self.get_completer
return [
get_comp(**c) for c in _cfgs(casing, filtr, aliasing, qualify)
]
return completers
def _make_col(self, sch, tbl, col):
defaults = self.metadata.get('defaults', {}).get(sch, {})
return (sch, tbl, col, 'text', (tbl, col) in defaults, defaults.get((tbl, col)))
def get_completer(self, settings=None, casing=None):
metadata = self.metadata
from pgcli.pgcompleter import PGCompleter
comp = PGCompleter(smart_completion=True, settings=settings)
schemata, tables, tbl_cols, views, view_cols = [], [], [], [], []
for sch, tbls in metadata['tables'].items():
schemata.append(sch)
for tbl, cols in tbls.items():
tables.append((sch, tbl))
# Let all columns be text columns
tbl_cols.extend([self._make_col(sch, tbl, col)
for col in cols])
for sch, tbls in metadata.get('views', {}).items():
for tbl, cols in tbls.items():
views.append((sch, tbl))
# Let all columns be text columns
view_cols.extend([self._make_col(sch, tbl, col)
for col in cols])
functions = [
FunctionMetadata(sch, *func_meta, arg_defaults=None)
for sch, funcs in metadata['functions'].items()
for func_meta in funcs]
datatypes = [
(sch, typ)
for sch, datatypes in metadata['datatypes'].items()
for typ in datatypes]
foreignkeys = [
ForeignKey(*fk) for fks in metadata['foreignkeys'].values()
for fk in fks]
comp.extend_schemata(schemata)
comp.extend_relations(tables, kind='tables')
comp.extend_relations(views, kind='views')
comp.extend_columns(tbl_cols, kind='tables')
comp.extend_columns(view_cols, kind='views')
comp.extend_functions(functions)
comp.extend_datatypes(datatypes)
comp.extend_foreignkeys(foreignkeys)
comp.set_search_path(['public'])
comp.extend_casing(casing or [])
return comp
| {
"repo_name": "koljonen/pgcli",
"path": "tests/metadata.py",
"copies": "1",
"size": "8649",
"license": "bsd-3-clause",
"hash": -1002618050899870700,
"line_mean": 34.1585365854,
"line_max": 88,
"alpha_frac": 0.5826107064,
"autogenerated": false,
"ratio": 3.8525612472160358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4935171953616036,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import product
from string import ascii_letters
import warnings
import numpy as np
from pandas import (
Categorical, DataFrame, MultiIndex, Series, TimeGrouper, Timestamp,
date_range, period_range)
import pandas.util.testing as tm
method_blacklist = {
'object': {'median', 'prod', 'sem', 'cumsum', 'sum', 'cummin', 'mean',
'max', 'skew', 'cumprod', 'cummax', 'rank', 'pct_change', 'min',
'var', 'mad', 'describe', 'std'},
'datetime': {'median', 'prod', 'sem', 'cumsum', 'sum', 'mean', 'skew',
'cumprod', 'cummax', 'pct_change', 'var', 'mad', 'describe',
'std'}
}
class ApplyDictReturn(object):
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(lambda x: {'first': x.values[0],
'last': x.values[-1]})
class Apply(object):
def setup_cache(self):
N = 10**4
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)
})
return df
def time_scalar_function_multi_col(self, df):
df.groupby(['key', 'key2']).apply(lambda x: 1)
def time_scalar_function_single_col(self, df):
df.groupby('key').apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, df):
df.groupby(['key', 'key2']).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, df):
df.groupby('key').apply(self.df_copy_function)
class Groups(object):
param_names = ['key']
params = ['int64_small', 'int64_large', 'object_small', 'object_large']
def setup_cache(self):
size = 10**6
data = {'int64_small': Series(np.random.randint(0, 100, size=size)),
'int64_large': Series(np.random.randint(0, 10000, size=size)),
'object_small': Series(
tm.makeStringIndex(100).take(
np.random.randint(0, 100, size=size))),
'object_large': Series(
tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=size)))}
return data
def setup(self, data, key):
self.ser = data[key]
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
class GroupManyLabels(object):
params = [1, 1000]
param_names = ['ncols']
def setup(self, ncols):
N = 1000
data = np.random.randn(N, ncols)
self.labels = np.random.randint(0, 100, size=N)
self.df = DataFrame(data)
def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
class Nth(object):
param_names = ['dtype']
params = ['float32', 'float64', 'datetime', 'object']
def setup(self, dtype):
N = 10**5
# with datetimes (GH7555)
if dtype == 'datetime':
values = date_range('1/1/2011', periods=N, freq='s')
elif dtype == 'object':
values = ['foo'] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({'key': key, 'values': values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby('key').nth(0, dropna='any')
def time_groupby_nth_all(self, dtype):
self.df.groupby('key').nth(0, dropna='all')
def time_frame_nth(self, dtype):
self.df.groupby('key').nth(0)
def time_series_nth_any(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='any')
def time_series_nth_all(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='all')
def time_series_nth(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0)
class DateAttributes(object):
def setup(self):
rng = date_range('1/1/2000', '12/31/2005', freq='H')
self.year, self.month, self.day = rng.year, rng.month, rng.day
self.ts = Series(np.random.randn(len(rng)), index=rng)
def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
class Int64(object):
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
i = np.random.choice(len(arr), len(arr) * 5)
arr = np.vstack((arr, arr[i]))
i = np.random.permutation(len(arr))
arr = arr[i]
self.cols = list('abcde')
self.df = DataFrame(arr, columns=self.cols)
self.df['jim'], self.df['joe'] = np.random.randn(2, len(self.df)) * 10
def time_overflow(self):
self.df.groupby(self.cols).max()
class CountMultiDtype(object):
def setup_cache(self):
n = 10000
offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
dates = np.datetime64('now') + offsets
dates[np.random.rand(n) > 0.5] = np.datetime64('nat')
offsets[np.random.rand(n) > 0.5] = np.timedelta64('nat')
value2 = np.random.randn(n)
value2[np.random.rand(n) > 0.5] = np.nan
obj = np.random.choice(list('ab'), size=n).astype(object)
obj[np.random.randn(n) > 0.5] = np.nan
df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'dates': dates,
'value2': value2,
'value3': np.random.randn(n),
'ints': np.random.randint(0, 1000, size=n),
'obj': obj,
'offsets': offsets})
return df
def time_multi_count(self, df):
df.groupby(['key1', 'key2']).count()
class CountMultiInt(object):
def setup_cache(self):
n = 10000
df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'ints': np.random.randint(0, 1000, size=n),
'ints2': np.random.randint(0, 1000, size=n)})
return df
def time_multi_int_count(self, df):
df.groupby(['key1', 'key2']).count()
def time_multi_int_nunique(self, df):
df.groupby(['key1', 'key2']).nunique()
class AggFunctions(object):
def setup_cache(self):
N = 10**5
fac1 = np.array(['A', 'B', 'C'], dtype='O')
fac2 = np.array(['one', 'two'], dtype='O')
df = DataFrame({'key1': fac1.take(np.random.randint(0, 3, size=N)),
'key2': fac2.take(np.random.randint(0, 2, size=N)),
'value1': np.random.randn(N),
'value2': np.random.randn(N),
'value3': np.random.randn(N)})
return df
def time_different_str_functions(self, df):
df.groupby(['key1', 'key2']).agg({'value1': 'mean',
'value2': 'var',
'value3': 'sum'})
def time_different_numpy_functions(self, df):
df.groupby(['key1', 'key2']).agg({'value1': np.mean,
'value2': np.var,
'value3': np.sum})
def time_different_python_functions_multicol(self, df):
df.groupby(['key1', 'key2']).agg([sum, min, max])
def time_different_python_functions_singlecol(self, df):
df.groupby('key1').agg([sum, min, max])
class GroupStrings(object):
def setup(self):
n = 2 * 10**5
alpha = list(map(''.join, product(ascii_letters, repeat=4)))
data = np.random.choice(alpha, (n // 5, 4), replace=False)
data = np.repeat(data, 5, axis=0)
self.df = DataFrame(data, columns=list('abcd'))
self.df['joe'] = (np.random.randn(len(self.df)) * 10).round(3)
self.df = self.df.sample(frac=1).reset_index(drop=True)
def time_multi_columns(self):
self.df.groupby(list('abcd')).max()
class MultiColumn(object):
def setup_cache(self):
N = 10**5
key1 = np.tile(np.arange(100, dtype=object), 1000)
key2 = key1.copy()
np.random.shuffle(key1)
np.random.shuffle(key2)
df = DataFrame({'key1': key1,
'key2': key2,
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
return df
def time_lambda_sum(self, df):
df.groupby(['key1', 'key2']).agg(lambda x: x.values.sum())
def time_cython_sum(self, df):
df.groupby(['key1', 'key2']).sum()
def time_col_select_lambda_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(lambda x: x.values.sum())
def time_col_select_numpy_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(np.sum)
class Size(object):
def setup(self):
n = 10**5
offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
dates = np.datetime64('now') + offsets
self.df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'value1': np.random.randn(n),
'value2': np.random.randn(n),
'value3': np.random.randn(n),
'dates': dates})
self.draws = Series(np.random.randn(n))
labels = Series(['foo', 'bar', 'baz', 'qux'] * (n // 4))
self.cats = labels.astype('category')
def time_multi_size(self):
self.df.groupby(['key1', 'key2']).size()
def time_dt_timegrouper_size(self):
with warnings.catch_warnings(record=True):
self.df.groupby(TimeGrouper(key='dates', freq='M')).size()
def time_category_size(self):
self.draws.groupby(self.cats).size()
class GroupByMethods(object):
param_names = ['dtype', 'method', 'application']
params = [['int', 'float', 'object', 'datetime'],
['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin',
'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head',
'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique',
'pct_change', 'prod', 'rank', 'sem', 'shift', 'size', 'skew',
'std', 'sum', 'tail', 'unique', 'value_counts', 'var'],
['direct', 'transformation']]
def setup(self, dtype, method, application):
if method in method_blacklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups)
values = rng.take(np.random.randint(0, ngroups, size=size))
if dtype == 'int':
key = np.random.randint(0, size, size=size)
elif dtype == 'float':
key = np.concatenate([np.random.random(ngroups) * 0.1,
np.random.random(ngroups) * 10.0])
elif dtype == 'object':
key = ['foo'] * size
elif dtype == 'datetime':
key = date_range('1/1/2011', periods=size, freq='s')
df = DataFrame({'values': values, 'key': key})
if application == 'transform':
if method == 'describe':
raise NotImplementedError
self.as_group_method = lambda: df.groupby(
'key')['values'].transform(method)
self.as_field_method = lambda: df.groupby(
'values')['key'].transform(method)
else:
self.as_group_method = getattr(df.groupby('key')['values'], method)
self.as_field_method = getattr(df.groupby('values')['key'], method)
def time_dtype_as_group(self, dtype, method, application):
self.as_group_method()
def time_dtype_as_field(self, dtype, method, application):
self.as_field_method()
class RankWithTies(object):
# GH 21237
param_names = ['dtype', 'tie_method']
params = [['float64', 'float32', 'int64', 'datetime64'],
['first', 'average', 'dense', 'min', 'max']]
def setup(self, dtype, tie_method):
N = 10**4
if dtype == 'datetime64':
data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
else:
data = np.array([1] * N, dtype=dtype)
self.df = DataFrame({'values': data, 'key': ['foo'] * N})
def time_rank_ties(self, dtype, tie_method):
self.df.groupby('key').rank(method=tie_method)
class Float32(object):
# GH 13335
def setup(self):
tmp1 = (np.random.random(10000) * 0.1).astype(np.float32)
tmp2 = (np.random.random(10000) * 10.0).astype(np.float32)
tmp = np.concatenate((tmp1, tmp2))
arr = np.repeat(tmp, 10)
self.df = DataFrame(dict(a=arr, b=arr))
def time_sum(self):
self.df.groupby(['a'])['b'].sum()
class Categories(object):
def setup(self):
N = 10**5
arr = np.random.random(N)
data = {'a': Categorical(np.random.randint(10000, size=N)),
'b': arr}
self.df = DataFrame(data)
data = {'a': Categorical(np.random.randint(10000, size=N),
ordered=True),
'b': arr}
self.df_ordered = DataFrame(data)
data = {'a': Categorical(np.random.randint(100, size=N),
categories=np.arange(10000)),
'b': arr}
self.df_extra_cat = DataFrame(data)
def time_groupby_sort(self):
self.df.groupby('a')['b'].count()
def time_groupby_nosort(self):
self.df.groupby('a', sort=False)['b'].count()
def time_groupby_ordered_sort(self):
self.df_ordered.groupby('a')['b'].count()
def time_groupby_ordered_nosort(self):
self.df_ordered.groupby('a', sort=False)['b'].count()
def time_groupby_extra_cat_sort(self):
self.df_extra_cat.groupby('a')['b'].count()
def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby('a', sort=False)['b'].count()
class Datelike(object):
# GH 14338
params = ['period_range', 'date_range', 'date_range_tz']
param_names = ['grouper']
def setup(self, grouper):
N = 10**4
rng_map = {'period_range': period_range,
'date_range': date_range,
'date_range_tz': partial(date_range, tz='US/Central')}
self.grouper = rng_map[grouper]('1900-01-01', freq='D', periods=N)
self.df = DataFrame(np.random.randn(10**4, 2))
def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
class SumBools(object):
# GH 2692
def setup(self):
N = 500
self.df = DataFrame({'ii': range(N),
'bb': [True] * N})
def time_groupby_sum_booleans(self):
self.df.groupby('ii').sum()
class SumMultiLevel(object):
# GH 9049
timeout = 120.0
def setup(self):
N = 50
self.df = DataFrame({'A': list(range(N)) * 2,
'B': range(N * 2),
'C': 1}).set_index(['A', 'B'])
def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
class Transform(object):
def setup(self):
n1 = 400
n2 = 250
index = MultiIndex(levels=[np.arange(n1), tm.makeStringIndex(n2)],
codes=[np.repeat(range(n1), n2).tolist(),
list(range(n2)) * n1],
names=['lev1', 'lev2'])
arr = np.random.randn(n1 * n2, 3)
arr[::10000, 0] = np.nan
arr[1::10000, 1] = np.nan
arr[2::10000, 2] = np.nan
data = DataFrame(arr, index=index, columns=['col1', 'col20', 'col3'])
self.df = data
n = 20000
self.df1 = DataFrame(np.random.randint(1, n, (n, 3)),
columns=['jim', 'joe', 'jolie'])
self.df2 = self.df1.copy()
self.df2['jim'] = self.df2['joe']
self.df3 = DataFrame(np.random.randint(1, (n / 10), (n, 3)),
columns=['jim', 'joe', 'jolie'])
self.df4 = self.df3.copy()
self.df4['jim'] = self.df4['joe']
def time_transform_lambda_max(self):
self.df.groupby(level='lev1').transform(lambda x: max(x))
def time_transform_ufunc_max(self):
self.df.groupby(level='lev1').transform(np.max)
def time_transform_multi_key1(self):
self.df1.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key2(self):
self.df2.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key3(self):
self.df3.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key4(self):
self.df4.groupby(['jim', 'joe'])['jolie'].transform('max')
class TransformBools(object):
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
transitions = np.zeros(N, dtype=np.bool)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({'signal': np.random.rand(N)})
def time_transform_mean(self):
self.df['signal'].groupby(self.g).transform(np.mean)
class TransformNaN(object):
# GH 12737
def setup(self):
self.df_nans = DataFrame({'key': np.repeat(np.arange(1000), 10),
'B': np.nan,
'C': np.nan})
self.df_nans.loc[4::10, 'B':'C'] = 5
def time_first(self):
self.df_nans.groupby('key').transform('first')
from .pandas_vb_common import setup # noqa: F401
| {
"repo_name": "GuessWhoSamFoo/pandas",
"path": "asv_bench/benchmarks/groupby.py",
"copies": "1",
"size": "18257",
"license": "bsd-3-clause",
"hash": 3655812322681993700,
"line_mean": 32.684501845,
"line_max": 79,
"alpha_frac": 0.5313030618,
"autogenerated": false,
"ratio": 3.425971101519985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9457274163319985,
"avg_score": 0,
"num_lines": 542
} |
from functools import partial
from itertools import product
from string import ascii_letters
import numpy as np
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp,
date_range, period_range)
import pandas.util.testing as tm
method_blacklist = {
'object': {'median', 'prod', 'sem', 'cumsum', 'sum', 'cummin', 'mean',
'max', 'skew', 'cumprod', 'cummax', 'rank', 'pct_change', 'min',
'var', 'mad', 'describe', 'std', 'quantile'},
'datetime': {'median', 'prod', 'sem', 'cumsum', 'sum', 'mean', 'skew',
'cumprod', 'cummax', 'pct_change', 'var', 'mad', 'describe',
'std'}
}
class ApplyDictReturn:
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(lambda x: {'first': x.values[0],
'last': x.values[-1]})
class Apply:
def setup_cache(self):
N = 10**4
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)
})
return df
def time_scalar_function_multi_col(self, df):
df.groupby(['key', 'key2']).apply(lambda x: 1)
def time_scalar_function_single_col(self, df):
df.groupby('key').apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, df):
df.groupby(['key', 'key2']).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, df):
df.groupby('key').apply(self.df_copy_function)
class Groups:
param_names = ['key']
params = ['int64_small', 'int64_large', 'object_small', 'object_large']
def setup_cache(self):
size = 10**6
data = {'int64_small': Series(np.random.randint(0, 100, size=size)),
'int64_large': Series(np.random.randint(0, 10000, size=size)),
'object_small': Series(
tm.makeStringIndex(100).take(
np.random.randint(0, 100, size=size))),
'object_large': Series(
tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=size)))}
return data
def setup(self, data, key):
self.ser = data[key]
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
class GroupManyLabels:
params = [1, 1000]
param_names = ['ncols']
def setup(self, ncols):
N = 1000
data = np.random.randn(N, ncols)
self.labels = np.random.randint(0, 100, size=N)
self.df = DataFrame(data)
def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
class Nth:
param_names = ['dtype']
params = ['float32', 'float64', 'datetime', 'object']
def setup(self, dtype):
N = 10**5
# with datetimes (GH7555)
if dtype == 'datetime':
values = date_range('1/1/2011', periods=N, freq='s')
elif dtype == 'object':
values = ['foo'] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({'key': key, 'values': values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby('key').nth(0, dropna='any')
def time_groupby_nth_all(self, dtype):
self.df.groupby('key').nth(0, dropna='all')
def time_frame_nth(self, dtype):
self.df.groupby('key').nth(0)
def time_series_nth_any(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='any')
def time_series_nth_all(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='all')
def time_series_nth(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0)
class DateAttributes:
def setup(self):
rng = date_range('1/1/2000', '12/31/2005', freq='H')
self.year, self.month, self.day = rng.year, rng.month, rng.day
self.ts = Series(np.random.randn(len(rng)), index=rng)
def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
class Int64:
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
i = np.random.choice(len(arr), len(arr) * 5)
arr = np.vstack((arr, arr[i]))
i = np.random.permutation(len(arr))
arr = arr[i]
self.cols = list('abcde')
self.df = DataFrame(arr, columns=self.cols)
self.df['jim'], self.df['joe'] = np.random.randn(2, len(self.df)) * 10
def time_overflow(self):
self.df.groupby(self.cols).max()
class CountMultiDtype:
def setup_cache(self):
n = 10000
offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
dates = np.datetime64('now') + offsets
dates[np.random.rand(n) > 0.5] = np.datetime64('nat')
offsets[np.random.rand(n) > 0.5] = np.timedelta64('nat')
value2 = np.random.randn(n)
value2[np.random.rand(n) > 0.5] = np.nan
obj = np.random.choice(list('ab'), size=n).astype(object)
obj[np.random.randn(n) > 0.5] = np.nan
df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'dates': dates,
'value2': value2,
'value3': np.random.randn(n),
'ints': np.random.randint(0, 1000, size=n),
'obj': obj,
'offsets': offsets})
return df
def time_multi_count(self, df):
df.groupby(['key1', 'key2']).count()
class CountMultiInt:
def setup_cache(self):
n = 10000
df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'ints': np.random.randint(0, 1000, size=n),
'ints2': np.random.randint(0, 1000, size=n)})
return df
def time_multi_int_count(self, df):
df.groupby(['key1', 'key2']).count()
def time_multi_int_nunique(self, df):
df.groupby(['key1', 'key2']).nunique()
class AggFunctions:
def setup_cache(self):
N = 10**5
fac1 = np.array(['A', 'B', 'C'], dtype='O')
fac2 = np.array(['one', 'two'], dtype='O')
df = DataFrame({'key1': fac1.take(np.random.randint(0, 3, size=N)),
'key2': fac2.take(np.random.randint(0, 2, size=N)),
'value1': np.random.randn(N),
'value2': np.random.randn(N),
'value3': np.random.randn(N)})
return df
def time_different_str_functions(self, df):
df.groupby(['key1', 'key2']).agg({'value1': 'mean',
'value2': 'var',
'value3': 'sum'})
def time_different_numpy_functions(self, df):
df.groupby(['key1', 'key2']).agg({'value1': np.mean,
'value2': np.var,
'value3': np.sum})
def time_different_python_functions_multicol(self, df):
df.groupby(['key1', 'key2']).agg([sum, min, max])
def time_different_python_functions_singlecol(self, df):
df.groupby('key1').agg([sum, min, max])
class GroupStrings:
def setup(self):
n = 2 * 10**5
alpha = list(map(''.join, product(ascii_letters, repeat=4)))
data = np.random.choice(alpha, (n // 5, 4), replace=False)
data = np.repeat(data, 5, axis=0)
self.df = DataFrame(data, columns=list('abcd'))
self.df['joe'] = (np.random.randn(len(self.df)) * 10).round(3)
self.df = self.df.sample(frac=1).reset_index(drop=True)
def time_multi_columns(self):
self.df.groupby(list('abcd')).max()
class MultiColumn:
def setup_cache(self):
N = 10**5
key1 = np.tile(np.arange(100, dtype=object), 1000)
key2 = key1.copy()
np.random.shuffle(key1)
np.random.shuffle(key2)
df = DataFrame({'key1': key1,
'key2': key2,
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
return df
def time_lambda_sum(self, df):
df.groupby(['key1', 'key2']).agg(lambda x: x.values.sum())
def time_cython_sum(self, df):
df.groupby(['key1', 'key2']).sum()
def time_col_select_lambda_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(lambda x: x.values.sum())
def time_col_select_numpy_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(np.sum)
class Size:
def setup(self):
n = 10**5
offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
dates = np.datetime64('now') + offsets
self.df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'value1': np.random.randn(n),
'value2': np.random.randn(n),
'value3': np.random.randn(n),
'dates': dates})
self.draws = Series(np.random.randn(n))
labels = Series(['foo', 'bar', 'baz', 'qux'] * (n // 4))
self.cats = labels.astype('category')
def time_multi_size(self):
self.df.groupby(['key1', 'key2']).size()
def time_category_size(self):
self.draws.groupby(self.cats).size()
class GroupByMethods:
param_names = ['dtype', 'method', 'application']
params = [['int', 'float', 'object', 'datetime'],
['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin',
'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head',
'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique',
'pct_change', 'prod', 'quantile', 'rank', 'sem', 'shift',
'size', 'skew', 'std', 'sum', 'tail', 'unique', 'value_counts',
'var'],
['direct', 'transformation']]
def setup(self, dtype, method, application):
if method in method_blacklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups)
values = rng.take(np.random.randint(0, ngroups, size=size))
if dtype == 'int':
key = np.random.randint(0, size, size=size)
elif dtype == 'float':
key = np.concatenate([np.random.random(ngroups) * 0.1,
np.random.random(ngroups) * 10.0])
elif dtype == 'object':
key = ['foo'] * size
elif dtype == 'datetime':
key = date_range('1/1/2011', periods=size, freq='s')
df = DataFrame({'values': values, 'key': key})
if application == 'transform':
if method == 'describe':
raise NotImplementedError
self.as_group_method = lambda: df.groupby(
'key')['values'].transform(method)
self.as_field_method = lambda: df.groupby(
'values')['key'].transform(method)
else:
self.as_group_method = getattr(df.groupby('key')['values'], method)
self.as_field_method = getattr(df.groupby('values')['key'], method)
def time_dtype_as_group(self, dtype, method, application):
self.as_group_method()
def time_dtype_as_field(self, dtype, method, application):
self.as_field_method()
class RankWithTies:
# GH 21237
param_names = ['dtype', 'tie_method']
params = [['float64', 'float32', 'int64', 'datetime64'],
['first', 'average', 'dense', 'min', 'max']]
def setup(self, dtype, tie_method):
N = 10**4
if dtype == 'datetime64':
data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
else:
data = np.array([1] * N, dtype=dtype)
self.df = DataFrame({'values': data, 'key': ['foo'] * N})
def time_rank_ties(self, dtype, tie_method):
self.df.groupby('key').rank(method=tie_method)
class Float32:
# GH 13335
def setup(self):
tmp1 = (np.random.random(10000) * 0.1).astype(np.float32)
tmp2 = (np.random.random(10000) * 10.0).astype(np.float32)
tmp = np.concatenate((tmp1, tmp2))
arr = np.repeat(tmp, 10)
self.df = DataFrame(dict(a=arr, b=arr))
def time_sum(self):
self.df.groupby(['a'])['b'].sum()
class Categories:
def setup(self):
N = 10**5
arr = np.random.random(N)
data = {'a': Categorical(np.random.randint(10000, size=N)),
'b': arr}
self.df = DataFrame(data)
data = {'a': Categorical(np.random.randint(10000, size=N),
ordered=True),
'b': arr}
self.df_ordered = DataFrame(data)
data = {'a': Categorical(np.random.randint(100, size=N),
categories=np.arange(10000)),
'b': arr}
self.df_extra_cat = DataFrame(data)
def time_groupby_sort(self):
self.df.groupby('a')['b'].count()
def time_groupby_nosort(self):
self.df.groupby('a', sort=False)['b'].count()
def time_groupby_ordered_sort(self):
self.df_ordered.groupby('a')['b'].count()
def time_groupby_ordered_nosort(self):
self.df_ordered.groupby('a', sort=False)['b'].count()
def time_groupby_extra_cat_sort(self):
self.df_extra_cat.groupby('a')['b'].count()
def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby('a', sort=False)['b'].count()
class Datelike:
# GH 14338
params = ['period_range', 'date_range', 'date_range_tz']
param_names = ['grouper']
def setup(self, grouper):
N = 10**4
rng_map = {'period_range': period_range,
'date_range': date_range,
'date_range_tz': partial(date_range, tz='US/Central')}
self.grouper = rng_map[grouper]('1900-01-01', freq='D', periods=N)
self.df = DataFrame(np.random.randn(10**4, 2))
def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
class SumBools:
# GH 2692
def setup(self):
N = 500
self.df = DataFrame({'ii': range(N),
'bb': [True] * N})
def time_groupby_sum_booleans(self):
self.df.groupby('ii').sum()
class SumMultiLevel:
# GH 9049
timeout = 120.0
def setup(self):
N = 50
self.df = DataFrame({'A': list(range(N)) * 2,
'B': range(N * 2),
'C': 1}).set_index(['A', 'B'])
def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
class Transform:
def setup(self):
n1 = 400
n2 = 250
index = MultiIndex(levels=[np.arange(n1), tm.makeStringIndex(n2)],
codes=[np.repeat(range(n1), n2).tolist(),
list(range(n2)) * n1],
names=['lev1', 'lev2'])
arr = np.random.randn(n1 * n2, 3)
arr[::10000, 0] = np.nan
arr[1::10000, 1] = np.nan
arr[2::10000, 2] = np.nan
data = DataFrame(arr, index=index, columns=['col1', 'col20', 'col3'])
self.df = data
n = 20000
self.df1 = DataFrame(np.random.randint(1, n, (n, 3)),
columns=['jim', 'joe', 'jolie'])
self.df2 = self.df1.copy()
self.df2['jim'] = self.df2['joe']
self.df3 = DataFrame(np.random.randint(1, (n / 10), (n, 3)),
columns=['jim', 'joe', 'jolie'])
self.df4 = self.df3.copy()
self.df4['jim'] = self.df4['joe']
def time_transform_lambda_max(self):
self.df.groupby(level='lev1').transform(lambda x: max(x))
def time_transform_ufunc_max(self):
self.df.groupby(level='lev1').transform(np.max)
def time_transform_multi_key1(self):
self.df1.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key2(self):
self.df2.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key3(self):
self.df3.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key4(self):
self.df4.groupby(['jim', 'joe'])['jolie'].transform('max')
class TransformBools:
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
transitions = np.zeros(N, dtype=np.bool)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({'signal': np.random.rand(N)})
def time_transform_mean(self):
self.df['signal'].groupby(self.g).transform(np.mean)
class TransformNaN:
# GH 12737
def setup(self):
self.df_nans = DataFrame({'key': np.repeat(np.arange(1000), 10),
'B': np.nan,
'C': np.nan})
self.df_nans.loc[4::10, 'B':'C'] = 5
def time_first(self):
self.df_nans.groupby('key').transform('first')
from .pandas_vb_common import setup # noqa: F401
| {
"repo_name": "cbertinato/pandas",
"path": "asv_bench/benchmarks/groupby.py",
"copies": "1",
"size": "17920",
"license": "bsd-3-clause",
"hash": -1634311501073507000,
"line_mean": 32.3085501859,
"line_max": 79,
"alpha_frac": 0.5272879464,
"autogenerated": false,
"ratio": 3.4224598930481283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9449747839448128,
"avg_score": 0,
"num_lines": 538
} |
from functools import partial
from itertools import product
from string import ascii_letters
import numpy as np
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
period_range,
)
import pandas.util.testing as tm
method_blacklist = {
"object": {
"median",
"prod",
"sem",
"cumsum",
"sum",
"cummin",
"mean",
"max",
"skew",
"cumprod",
"cummax",
"rank",
"pct_change",
"min",
"var",
"mad",
"describe",
"std",
"quantile",
},
"datetime": {
"median",
"prod",
"sem",
"cumsum",
"sum",
"mean",
"skew",
"cumprod",
"cummax",
"pct_change",
"var",
"mad",
"describe",
"std",
},
}
class ApplyDictReturn:
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(
lambda x: {"first": x.values[0], "last": x.values[-1]}
)
class Apply:
def setup_cache(self):
N = 10 ** 4
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame(
{
"key": labels,
"key2": labels2,
"value1": np.random.randn(N),
"value2": ["foo", "bar", "baz", "qux"] * (N // 4),
}
)
return df
def time_scalar_function_multi_col(self, df):
df.groupby(["key", "key2"]).apply(lambda x: 1)
def time_scalar_function_single_col(self, df):
df.groupby("key").apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, df):
df.groupby(["key", "key2"]).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, df):
df.groupby("key").apply(self.df_copy_function)
class Groups:
param_names = ["key"]
params = ["int64_small", "int64_large", "object_small", "object_large"]
def setup_cache(self):
size = 10 ** 6
data = {
"int64_small": Series(np.random.randint(0, 100, size=size)),
"int64_large": Series(np.random.randint(0, 10000, size=size)),
"object_small": Series(
tm.makeStringIndex(100).take(np.random.randint(0, 100, size=size))
),
"object_large": Series(
tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=size))
),
}
return data
def setup(self, data, key):
self.ser = data[key]
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
class GroupManyLabels:
params = [1, 1000]
param_names = ["ncols"]
def setup(self, ncols):
N = 1000
data = np.random.randn(N, ncols)
self.labels = np.random.randint(0, 100, size=N)
self.df = DataFrame(data)
def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
class Nth:
param_names = ["dtype"]
params = ["float32", "float64", "datetime", "object"]
def setup(self, dtype):
N = 10 ** 5
# with datetimes (GH7555)
if dtype == "datetime":
values = date_range("1/1/2011", periods=N, freq="s")
elif dtype == "object":
values = ["foo"] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({"key": key, "values": values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby("key").nth(0, dropna="any")
def time_groupby_nth_all(self, dtype):
self.df.groupby("key").nth(0, dropna="all")
def time_frame_nth(self, dtype):
self.df.groupby("key").nth(0)
def time_series_nth_any(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="any")
def time_series_nth_all(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="all")
def time_series_nth(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0)
class DateAttributes:
def setup(self):
rng = date_range("1/1/2000", "12/31/2005", freq="H")
self.year, self.month, self.day = rng.year, rng.month, rng.day
self.ts = Series(np.random.randn(len(rng)), index=rng)
def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
class Int64:
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
i = np.random.choice(len(arr), len(arr) * 5)
arr = np.vstack((arr, arr[i]))
i = np.random.permutation(len(arr))
arr = arr[i]
self.cols = list("abcde")
self.df = DataFrame(arr, columns=self.cols)
self.df["jim"], self.df["joe"] = np.random.randn(2, len(self.df)) * 10
def time_overflow(self):
self.df.groupby(self.cols).max()
class CountMultiDtype:
def setup_cache(self):
n = 10000
offsets = np.random.randint(n, size=n).astype("timedelta64[ns]")
dates = np.datetime64("now") + offsets
dates[np.random.rand(n) > 0.5] = np.datetime64("nat")
offsets[np.random.rand(n) > 0.5] = np.timedelta64("nat")
value2 = np.random.randn(n)
value2[np.random.rand(n) > 0.5] = np.nan
obj = np.random.choice(list("ab"), size=n).astype(object)
obj[np.random.randn(n) > 0.5] = np.nan
df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"dates": dates,
"value2": value2,
"value3": np.random.randn(n),
"ints": np.random.randint(0, 1000, size=n),
"obj": obj,
"offsets": offsets,
}
)
return df
def time_multi_count(self, df):
df.groupby(["key1", "key2"]).count()
class CountMultiInt:
def setup_cache(self):
n = 10000
df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"ints": np.random.randint(0, 1000, size=n),
"ints2": np.random.randint(0, 1000, size=n),
}
)
return df
def time_multi_int_count(self, df):
df.groupby(["key1", "key2"]).count()
def time_multi_int_nunique(self, df):
df.groupby(["key1", "key2"]).nunique()
class AggFunctions:
def setup_cache(self):
N = 10 ** 5
fac1 = np.array(["A", "B", "C"], dtype="O")
fac2 = np.array(["one", "two"], dtype="O")
df = DataFrame(
{
"key1": fac1.take(np.random.randint(0, 3, size=N)),
"key2": fac2.take(np.random.randint(0, 2, size=N)),
"value1": np.random.randn(N),
"value2": np.random.randn(N),
"value3": np.random.randn(N),
}
)
return df
def time_different_str_functions(self, df):
df.groupby(["key1", "key2"]).agg(
{"value1": "mean", "value2": "var", "value3": "sum"}
)
def time_different_numpy_functions(self, df):
df.groupby(["key1", "key2"]).agg(
{"value1": np.mean, "value2": np.var, "value3": np.sum}
)
def time_different_python_functions_multicol(self, df):
df.groupby(["key1", "key2"]).agg([sum, min, max])
def time_different_python_functions_singlecol(self, df):
df.groupby("key1").agg([sum, min, max])
class GroupStrings:
def setup(self):
n = 2 * 10 ** 5
alpha = list(map("".join, product(ascii_letters, repeat=4)))
data = np.random.choice(alpha, (n // 5, 4), replace=False)
data = np.repeat(data, 5, axis=0)
self.df = DataFrame(data, columns=list("abcd"))
self.df["joe"] = (np.random.randn(len(self.df)) * 10).round(3)
self.df = self.df.sample(frac=1).reset_index(drop=True)
def time_multi_columns(self):
self.df.groupby(list("abcd")).max()
class MultiColumn:
def setup_cache(self):
N = 10 ** 5
key1 = np.tile(np.arange(100, dtype=object), 1000)
key2 = key1.copy()
np.random.shuffle(key1)
np.random.shuffle(key2)
df = DataFrame(
{
"key1": key1,
"key2": key2,
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
return df
def time_lambda_sum(self, df):
df.groupby(["key1", "key2"]).agg(lambda x: x.values.sum())
def time_cython_sum(self, df):
df.groupby(["key1", "key2"]).sum()
def time_col_select_lambda_sum(self, df):
df.groupby(["key1", "key2"])["data1"].agg(lambda x: x.values.sum())
def time_col_select_numpy_sum(self, df):
df.groupby(["key1", "key2"])["data1"].agg(np.sum)
class Size:
def setup(self):
n = 10 ** 5
offsets = np.random.randint(n, size=n).astype("timedelta64[ns]")
dates = np.datetime64("now") + offsets
self.df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"value1": np.random.randn(n),
"value2": np.random.randn(n),
"value3": np.random.randn(n),
"dates": dates,
}
)
self.draws = Series(np.random.randn(n))
labels = Series(["foo", "bar", "baz", "qux"] * (n // 4))
self.cats = labels.astype("category")
def time_multi_size(self):
self.df.groupby(["key1", "key2"]).size()
def time_category_size(self):
self.draws.groupby(self.cats).size()
class GroupByMethods:
param_names = ["dtype", "method", "application"]
params = [
["int", "float", "object", "datetime"],
[
"all",
"any",
"bfill",
"count",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"describe",
"ffill",
"first",
"head",
"last",
"mad",
"max",
"min",
"median",
"mean",
"nunique",
"pct_change",
"prod",
"quantile",
"rank",
"sem",
"shift",
"size",
"skew",
"std",
"sum",
"tail",
"unique",
"value_counts",
"var",
],
["direct", "transformation"],
]
def setup(self, dtype, method, application):
if method in method_blacklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups)
values = rng.take(np.random.randint(0, ngroups, size=size))
if dtype == "int":
key = np.random.randint(0, size, size=size)
elif dtype == "float":
key = np.concatenate(
[np.random.random(ngroups) * 0.1, np.random.random(ngroups) * 10.0]
)
elif dtype == "object":
key = ["foo"] * size
elif dtype == "datetime":
key = date_range("1/1/2011", periods=size, freq="s")
df = DataFrame({"values": values, "key": key})
if application == "transform":
if method == "describe":
raise NotImplementedError
self.as_group_method = lambda: df.groupby("key")["values"].transform(method)
self.as_field_method = lambda: df.groupby("values")["key"].transform(method)
else:
self.as_group_method = getattr(df.groupby("key")["values"], method)
self.as_field_method = getattr(df.groupby("values")["key"], method)
def time_dtype_as_group(self, dtype, method, application):
self.as_group_method()
def time_dtype_as_field(self, dtype, method, application):
self.as_field_method()
class RankWithTies:
# GH 21237
param_names = ["dtype", "tie_method"]
params = [
["float64", "float32", "int64", "datetime64"],
["first", "average", "dense", "min", "max"],
]
def setup(self, dtype, tie_method):
N = 10 ** 4
if dtype == "datetime64":
data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
else:
data = np.array([1] * N, dtype=dtype)
self.df = DataFrame({"values": data, "key": ["foo"] * N})
def time_rank_ties(self, dtype, tie_method):
self.df.groupby("key").rank(method=tie_method)
class Float32:
# GH 13335
def setup(self):
tmp1 = (np.random.random(10000) * 0.1).astype(np.float32)
tmp2 = (np.random.random(10000) * 10.0).astype(np.float32)
tmp = np.concatenate((tmp1, tmp2))
arr = np.repeat(tmp, 10)
self.df = DataFrame(dict(a=arr, b=arr))
def time_sum(self):
self.df.groupby(["a"])["b"].sum()
class Categories:
def setup(self):
N = 10 ** 5
arr = np.random.random(N)
data = {"a": Categorical(np.random.randint(10000, size=N)), "b": arr}
self.df = DataFrame(data)
data = {
"a": Categorical(np.random.randint(10000, size=N), ordered=True),
"b": arr,
}
self.df_ordered = DataFrame(data)
data = {
"a": Categorical(
np.random.randint(100, size=N), categories=np.arange(10000)
),
"b": arr,
}
self.df_extra_cat = DataFrame(data)
def time_groupby_sort(self):
self.df.groupby("a")["b"].count()
def time_groupby_nosort(self):
self.df.groupby("a", sort=False)["b"].count()
def time_groupby_ordered_sort(self):
self.df_ordered.groupby("a")["b"].count()
def time_groupby_ordered_nosort(self):
self.df_ordered.groupby("a", sort=False)["b"].count()
def time_groupby_extra_cat_sort(self):
self.df_extra_cat.groupby("a")["b"].count()
def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby("a", sort=False)["b"].count()
class Datelike:
# GH 14338
params = ["period_range", "date_range", "date_range_tz"]
param_names = ["grouper"]
def setup(self, grouper):
N = 10 ** 4
rng_map = {
"period_range": period_range,
"date_range": date_range,
"date_range_tz": partial(date_range, tz="US/Central"),
}
self.grouper = rng_map[grouper]("1900-01-01", freq="D", periods=N)
self.df = DataFrame(np.random.randn(10 ** 4, 2))
def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
class SumBools:
# GH 2692
def setup(self):
N = 500
self.df = DataFrame({"ii": range(N), "bb": [True] * N})
def time_groupby_sum_booleans(self):
self.df.groupby("ii").sum()
class SumMultiLevel:
# GH 9049
timeout = 120.0
def setup(self):
N = 50
self.df = DataFrame(
{"A": list(range(N)) * 2, "B": range(N * 2), "C": 1}
).set_index(["A", "B"])
def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
class Transform:
def setup(self):
n1 = 400
n2 = 250
index = MultiIndex(
levels=[np.arange(n1), tm.makeStringIndex(n2)],
codes=[np.repeat(range(n1), n2).tolist(), list(range(n2)) * n1],
names=["lev1", "lev2"],
)
arr = np.random.randn(n1 * n2, 3)
arr[::10000, 0] = np.nan
arr[1::10000, 1] = np.nan
arr[2::10000, 2] = np.nan
data = DataFrame(arr, index=index, columns=["col1", "col20", "col3"])
self.df = data
n = 20000
self.df1 = DataFrame(
np.random.randint(1, n, (n, 3)), columns=["jim", "joe", "jolie"]
)
self.df2 = self.df1.copy()
self.df2["jim"] = self.df2["joe"]
self.df3 = DataFrame(
np.random.randint(1, (n / 10), (n, 3)), columns=["jim", "joe", "jolie"]
)
self.df4 = self.df3.copy()
self.df4["jim"] = self.df4["joe"]
def time_transform_lambda_max(self):
self.df.groupby(level="lev1").transform(lambda x: max(x))
def time_transform_ufunc_max(self):
self.df.groupby(level="lev1").transform(np.max)
def time_transform_multi_key1(self):
self.df1.groupby(["jim", "joe"])["jolie"].transform("max")
def time_transform_multi_key2(self):
self.df2.groupby(["jim", "joe"])["jolie"].transform("max")
def time_transform_multi_key3(self):
self.df3.groupby(["jim", "joe"])["jolie"].transform("max")
def time_transform_multi_key4(self):
self.df4.groupby(["jim", "joe"])["jolie"].transform("max")
class TransformBools:
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
transitions = np.zeros(N, dtype=np.bool)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({"signal": np.random.rand(N)})
def time_transform_mean(self):
self.df["signal"].groupby(self.g).transform(np.mean)
class TransformNaN:
# GH 12737
def setup(self):
self.df_nans = DataFrame(
{"key": np.repeat(np.arange(1000), 10), "B": np.nan, "C": np.nan}
)
self.df_nans.loc[4::10, "B":"C"] = 5
def time_first(self):
self.df_nans.groupby("key").transform("first")
from .pandas_vb_common import setup # noqa: F401
| {
"repo_name": "toobaz/pandas",
"path": "asv_bench/benchmarks/groupby.py",
"copies": "1",
"size": "18203",
"license": "bsd-3-clause",
"hash": -4320618054754957300,
"line_mean": 27.9395866455,
"line_max": 88,
"alpha_frac": 0.5190902598,
"autogenerated": false,
"ratio": 3.3696778970751575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.938821311808687,
"avg_score": 0.00011100775765744183,
"num_lines": 629
} |
from functools import partial
from itertools import product
from django import forms
from django.core.exceptions import ValidationError
from django.db import transaction
from django.forms import BaseInlineFormSet
from django.forms.widgets import flatatt
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from pretix.base.forms import VersionedModelForm
from pretix.base.models import Item, ItemVariation
class I18nInlineFormSet(BaseInlineFormSet):
"""
This is equivalent to a normal BaseInlineFormset, but cares for the special needs
of I18nForms (see there for more information).
"""
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event', None)
super().__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
kwargs['event'] = self.event
return super()._construct_form(i, **kwargs)
class TolerantFormsetModelForm(VersionedModelForm):
"""
This is equivalent to a normal VersionedModelForm, but works around a problem that
arises when the form is used inside a FormSet with can_order=True and django-formset-js
enabled. In this configuration, even empty "extra" forms might have an ORDER value
sent and Django marks the form as empty and raises validation errors because the other
fields have not been filled.
"""
def has_changed(self) -> bool:
"""
Returns True if data differs from initial. Contrary to the default
implementation, the ORDER field is being ignored.
"""
for name, field in self.fields.items():
if name == 'ORDER' or name == 'id':
continue
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
if callable(initial_value):
initial_value = initial_value()
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except forms.ValidationError:
# Always assume data has changed if validation fails.
self._changed_data.append(name)
continue
# We're using a private API of Django here. This is not nice, but no problem as it seems
# like this will become a public API in future Django.
if field._has_changed(initial_value, data_value):
return True
return False
class RestrictionForm(TolerantFormsetModelForm):
"""
The restriction form provides useful functionality for all forms
representing a restriction instance. To be concret, this form does
the necessary magic to make the 'variations' field work correctly
and look beautiful.
"""
def __init__(self, *args, **kwargs):
if 'item' in kwargs:
self.item = kwargs['item']
del kwargs['item']
super().__init__(*args, **kwargs)
if 'variations' in self.fields and isinstance(self.fields['variations'], VariationsField):
self.fields['variations'].set_item(self.item)
class RestrictionInlineFormset(forms.BaseInlineFormSet):
"""
This is the base class you should use for any formset you return
from a ``restriction_formset`` signal receiver that contains
RestrictionForm objects as its forms, as it correcly handles the
necessary item parameter for the RestrictionForm. While this could
be achieved with a regular formset, this also adds a
``initialized_empty_form`` method which is the only way to correctly
render a working empty form for a JavaScript-enabled restriction formset.
"""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
super().__init__(
data, files, instance, save_as_new, prefix, queryset, **kwargs
)
if isinstance(self.instance, Item):
self.queryset = self.queryset.as_of().prefetch_related("variations")
def initialized_empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
item=self.instance
)
self.add_fields(form, None)
return form
def _construct_form(self, i, **kwargs):
kwargs['item'] = self.instance
return super()._construct_form(i, **kwargs)
class Meta:
exclude = ['item']
def selector(values, prop):
# Given an iterable of PropertyValue objects, this will return a
# list of their primary keys, ordered by the primary keys of the
# properties they belong to EXCEPT the value for the property prop2.
# We'll see later why we need this.
return [
v.identity for v in sorted(values, key=lambda v: v.prop.identity)
if v.prop.identity != prop.identity
]
def sort(v, prop):
# Given a list of variations, this will sort them by their position
# on the x-axis
return v[prop.identity].sortkey
class VariationsFieldRenderer(forms.widgets.CheckboxFieldRenderer):
"""
This is the default renderer for a VariationsField. Based on the choice input class
this renders a list or a matrix of checkboxes/radio buttons/...
"""
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def render(self):
"""
Outputs a grid for this set of choice fields.
"""
if len(self.choices) == 0:
raise ValueError("Can't handle empty lists")
variations = []
for key, value in self.choices:
value['key'] = key
variations.append(value)
properties = [v.prop for v in variations[0].relevant_values()]
dimension = len(properties)
id_ = self.attrs.get('id', None)
start_tag = format_html('<div class="variations" id="{0}">', id_) if id_ else '<div class="variations">'
output = [start_tag]
# TODO: This is very duplicate to pretixcontrol.views.item.ItemVariations.get_forms()
# Find a common abstraction to avoid the repetition.
if dimension == 0:
output.append(format_html('<em>{0}</em>', _("not applicable")))
elif dimension == 1:
output = self.render_1d(output, variations, properties)
else:
output = self.render_nd(output, variations, properties)
output.append(
('<div class="help-block"><a href="#" class="variations-select-all">{0}</a> · '
'<a href="#" class="variations-select-none">{1}</a></div></div>').format(
_("Select all"),
_("Deselect all")
)
)
return mark_safe('\n'.join(output))
def render_1d(self, output, variations, properties):
output.append('<ul>')
for i, variation in enumerate(variations):
final_attrs = dict(
self.attrs.copy(), type=self.choice_input_class.input_type,
name=self.name, value=variation['key']
)
if variation['key'] in self.value:
final_attrs['checked'] = 'checked'
w = self.choice_input_class(
self.name, self.value, self.attrs.copy(),
(variation['key'], variation[properties[0].identity].value),
i
)
output.append(format_html('<li>{0}</li>', force_text(w)))
output.append('</ul>')
return output
def render_nd(self, output, variations, properties):
# prop1 is the property on all the grid's y-axes
prop1 = properties[0]
prop1v = list(prop1.values.current.all())
# prop2 is the property on all the grid's x-axes
prop2 = properties[1]
prop2v = list(prop2.values.current.all())
# We now iterate over the cartesian product of all the other
# properties which are NOT on the axes of the grid because we
# create one grid for any combination of them.
for gridrow in product(*[prop.values.current.all() for prop in properties[2:]]):
if len(gridrow) > 0:
output.append('<strong>')
output.append(", ".join([value.value for value in gridrow]))
output.append('</strong>')
output.append('<table class="table"><thead><tr><th></th>')
for val2 in prop2v:
output.append(format_html('<th>{0}</th>', val2.value))
output.append('</thead><tbody>')
for val1 in prop1v:
output.append(format_html('<tr><th>{0}</th>', val1.value))
# We are now inside one of the rows of the grid and have to
# select the variations to display in this row. In order to
# achieve this, we use the 'selector' lambda defined above.
# It gives us a normalized, comparable version of a set of
# PropertyValue objects. In this case, we compute the
# selector of our row as the selector of the sum of the
# values defining our grind and the value defining our row.
selection = selector(gridrow + (val1,), prop2)
# We now iterate over all variations who generate the same
# selector as 'selection'.
filtered = [v for v in variations if selector(v.relevant_values(), prop2) == selection]
for variation in sorted(filtered, key=partial(sort, prop=prop2)):
final_attrs = dict(
self.attrs.copy(), type=self.choice_input_class.input_type,
name=self.name, value=variation['key']
)
if variation['key'] in self.value:
final_attrs['checked'] = 'checked'
output.append(format_html('<td><label><input{0} /></label></td>', flatatt(final_attrs)))
output.append('</td>')
output.append('</tbody></table>')
return output
class VariationsCheckboxRenderer(VariationsFieldRenderer):
"""
This is the same as VariationsFieldRenderer but with the choice input class
forced to checkboxes
"""
choice_input_class = forms.widgets.CheckboxChoiceInput
class VariationsSelectMultiple(forms.CheckboxSelectMultiple):
"""
This is the default widget for a VariationsField
"""
renderer = VariationsCheckboxRenderer
_empty_value = []
class VariationsField(forms.ModelMultipleChoiceField):
"""
This form field is intended to be used to let the user select a
variation of a certain item, for example in a restriction plugin.
As this field expects the non-standard keyword parameter ``item``
at initialization time, this is field is normally named ``variations``
and lives inside a ``pretixcontrol.views.forms.RestrictionForm``, which
does some magic to provide this parameter.
"""
def __init__(self, *args, item=None, **kwargs):
self.item = item
if 'widget' not in args or kwargs['widget'] is None:
kwargs['widget'] = VariationsSelectMultiple
super().__init__(*args, **kwargs)
def set_item(self, item: Item):
assert isinstance(item, Item)
self.item = item
self._set_choices(self._get_choices())
def _get_choices(self) -> "list[(str, VariationDict)]":
"""
We can't use a normal QuerySet as there theoretically might be
two types of variations: Some who already have a ItemVariation
object associated with them and some who don't. We therefore use
the item's ``get_all_variations`` method. In the first case, we
use the ItemVariation objects primary key as our choice, key,
in the latter case we use a string constructed from the values
(see VariationDict.key() for implementation details).
"""
if self.item is None:
return ()
variations = self.item.get_all_variations(use_cache=True)
return (
(
v['variation'].identity if 'variation' in v else v.key(),
v
) for v in variations
)
def clean(self, value: "list[int]"):
"""
At cleaning time, we have to clean up the mess we produced with our
_get_choices implementation. In the case of ItemVariation object ids
we don't to anything to them, but if one of the selected items is a
list of PropertyValue objects (see _get_choices), we need to create
a new ItemVariation object for this combination and then add this to
our list of selected items.
"""
if self.item is None:
raise ValueError(
"VariationsField object was not properly initialized. Please"
"use a pretixcontrol.views.forms.RestrictionForm form instead of"
"a plain Django ModelForm"
)
# Standard validation foo
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return []
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
cleaned_value = self._clean_value(value)
qs = self.item.variations.current.filter(identity__in=cleaned_value)
# Re-check for consistency
pks = set(force_text(getattr(o, "identity")) for o in qs)
for val in cleaned_value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(cleaned_value)
return qs
def _clean_value(self, value):
# Build up a cache of variations having an ItemVariation object
# For implementation details, see ItemVariation.get_all_variations()
# which uses a very similar method
all_variations = self.item.variations.all().prefetch_related("values")
variations_cache = {
var.to_variation_dict().identify(): var.identity for var in all_variations
}
cleaned_value = []
# Wrap this in a transaction to prevent strange database state if we
# get a ValidationError half-way through
with transaction.atomic():
for pk in value:
if ":" in pk:
# A combination of PropertyValues was given
# Hash the combination in the same way as in our cache above
key = ",".join([pair.split(":")[1] for pair in sorted(pk.split(","))])
if key in variations_cache:
# An ItemVariation object already exists for this variation,
# so use this. (This might occur if the variation object was
# created _after_ the user loaded the form but _before_ he
# submitted it.)
cleaned_value.append(str(variations_cache[key]))
continue
# No ItemVariation present, create one!
var = ItemVariation()
var.item_id = self.item.identity
var.save()
# Add the values to the ItemVariation object
try:
var.add_values_from_string(pk)
except:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': value},
)
variations_cache[key] = var.identity
cleaned_value.append(str(var.identity))
else:
# An ItemVariation id was given
cleaned_value.append(pk)
return cleaned_value
choices = property(_get_choices, forms.ChoiceField._set_choices)
| {
"repo_name": "lab2112/pretix",
"path": "src/pretix/control/forms/__init__.py",
"copies": "1",
"size": "16923",
"license": "apache-2.0",
"hash": 8853770675807326000,
"line_mean": 40.7827160494,
"line_max": 112,
"alpha_frac": 0.5969743529,
"autogenerated": false,
"ratio": 4.471987315010571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5568961667910571,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import product
import numpy as np
from scipy.optimize import OptimizeResult
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from skopt import dummy_minimize
from skopt import gp_minimize
from skopt import forest_minimize
from skopt import gbrt_minimize
from skopt.benchmarks import branin
from skopt.benchmarks import bench4
from skopt.space import Space
# dummy_minimize does not support same parameters so
# treated separately
MINIMIZERS = [gp_minimize]
ACQUISITION = ["LCB", "PI", "EI"]
for est, acq in product(["et", "rf"], ACQUISITION):
MINIMIZERS.append(
partial(forest_minimize, base_estimator=est, acq=acq))
for acq in ACQUISITION:
MINIMIZERS.append(partial(gbrt_minimize, acq=acq))
def check_minimizer_api(result, n_models=None):
assert(isinstance(result.space, Space))
if n_models is not None:
assert_equal(len(result.models), n_models)
assert_equal(len(result.x_iters), 7)
assert_array_equal(result.func_vals.shape, (7,))
assert(isinstance(result.x, list))
assert_equal(len(result.x), 2)
assert(isinstance(result.x_iters, list))
for n in range(7):
assert(isinstance(result.x_iters[n], list))
assert_equal(len(result.x_iters[n]), 2)
assert(isinstance(result.func_vals[n], float))
assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)])
assert_almost_equal(result.fun, branin(result.x))
assert(isinstance(result.specs, dict))
assert("args" in result.specs)
assert("function" in result.specs)
def check_minimizer_bounds(result):
# no values should be below or above the bounds
eps = 10e-9 # check for assert_array_less OR equal
assert_array_less(result.x_iters, np.tile([10+eps, 15+eps], (7, 1)))
assert_array_less(np.tile([-5-eps, 0-eps], (7, 1)), result.x_iters)
def check_result_callable(res):
"""
Check that the result instance is set right at every callable call.
"""
assert(isinstance(res, OptimizeResult))
assert_equal(len(res.x_iters), len(res.func_vals))
assert_equal(np.min(res.func_vals), res.fun)
def test_minimizer_api():
# dummy_minimize is special as it does not support all parameters
# and does not fit any models
call_single = lambda res: res.x
call_list = [call_single, check_result_callable]
for verbose, call in product([True, False], [call_single, call_list]):
result = dummy_minimize(branin, [(-5.0, 10.0), (0.0, 15.0)],
n_calls=7, random_state=1,
verbose=verbose, callback=call)
assert(result.models is None)
yield (check_minimizer_api, result)
yield (check_minimizer_bounds, result)
assert_raise_message(ValueError,
"return a scalar",
dummy_minimize, lambda x: x, [[-5, 10]])
n_calls = 7
n_random_starts = 3
n_models = n_calls - n_random_starts
for minimizer in MINIMIZERS:
result = minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)],
n_random_starts=n_random_starts,
n_calls=n_calls,
random_state=1,
verbose=verbose, callback=call)
yield (check_minimizer_api, result, n_models)
yield (check_minimizer_bounds, result)
assert_raise_message(ValueError,
"return a scalar",
minimizer, lambda x: x, [[-5, 10]])
def test_init_vals():
space = [(-5.0, 10.0), (0.0, 15.0)]
x0 = [[1, 2], [3, 4], [5, 6]]
n_calls = 10
for n_random_starts in [0, 5]:
optimizers = [
dummy_minimize,
partial(gp_minimize, n_random_starts=n_random_starts),
partial(forest_minimize, n_random_starts=n_random_starts),
partial(gbrt_minimize, n_random_starts=n_random_starts)
]
for optimizer in optimizers:
yield (check_init_vals, optimizer, branin, space, x0, n_calls)
space = [("-2", "-1", "0", "1", "2")]
x0 = [["0"], ["1"], ["2"]]
n_calls = 10
for optimizer in optimizers:
yield (check_init_vals, optimizer, bench4, space, x0, n_calls)
def check_init_vals(optimizer, func, space, x0, n_calls):
y0 = list(map(func, x0))
# testing whether the provided points with their evaluations
# are taken into account
res = optimizer(
func, space, x0=x0, y0=y0,
random_state=0, n_calls=n_calls)
assert_array_equal(res.x_iters[0:len(x0)], x0)
assert_array_equal(res.func_vals[0:len(y0)], y0)
assert_equal(len(res.x_iters), len(x0) + n_calls)
assert_equal(len(res.func_vals), len(x0) + n_calls)
# testing whether the provided points are taken into account
res = optimizer(
func, space, x0=x0,
random_state=0, n_calls=n_calls)
assert_array_equal(res.x_iters[0:len(x0)], x0)
assert_array_equal(res.func_vals[0:len(y0)], y0)
assert_equal(len(res.x_iters), n_calls)
assert_equal(len(res.func_vals), n_calls)
# testing whether providing a single point instead of a list
# of points works correctly
res = optimizer(
func, space, x0=x0[0],
random_state=0, n_calls=n_calls)
assert_array_equal(res.x_iters[0], x0[0])
assert_array_equal(res.func_vals[0], y0[0])
assert_equal(len(res.x_iters), n_calls)
assert_equal(len(res.func_vals), n_calls)
# testing whether providing a single point and its evaluation
# instead of a list of points and their evaluations works correctly
res = optimizer(
func, space, x0=x0[0], y0=y0[0],
random_state=0, n_calls=n_calls)
assert_array_equal(res.x_iters[0], x0[0])
assert_array_equal(res.func_vals[0], y0[0])
assert_equal(len(res.x_iters), 1 + n_calls)
assert_equal(len(res.func_vals), 1 + n_calls)
# testing whether it correctly raises an exception when
# the number of input points and the number of evaluations differ
assert_raises(ValueError, dummy_minimize, func,
space, x0=x0, y0=[1])
def test_invalid_n_calls_arguments():
for minimizer in MINIMIZERS:
assert_raise_message(ValueError,
"Expected `n_calls` > 0",
minimizer,
branin, [(-5.0, 10.0), (0.0, 15.0)], n_calls=0,
random_state=1)
assert_raise_message(ValueError,
"set `n_random_starts` > 0, or provide `x0`",
minimizer,
branin, [(-5.0, 10.0), (0.0, 15.0)],
n_random_starts=0,
random_state=1)
# n_calls >= n_random_starts
assert_raise_message(ValueError,
"Expected `n_calls` >= 10",
minimizer, branin, [(-5.0, 10.0), (0.0, 15.0)],
n_calls=1, n_random_starts=10, random_state=1)
# n_calls >= n_random_starts + len(x0)
assert_raise_message(ValueError,
"Expected `n_calls` >= 10",
minimizer, branin, [(-5.0, 10.0), (0.0, 15.0)],
n_calls=1, x0=[[-1, 2], [-3, 3], [2, 5]],
random_state=1, n_random_starts=7)
# n_calls >= n_random_starts when x0 and y0 are provided.
assert_raise_message(ValueError,
"Expected `n_calls` >= 7",
minimizer, branin, [(-5.0, 10.0), (0.0, 15.0)],
n_calls=1, x0=[[-1, 2], [-3, 3], [2, 5]],
y0=[2.0, 3.0, 5.0],
random_state=1, n_random_starts=7)
| {
"repo_name": "glouppe/scikit-optimize",
"path": "skopt/tests/test_common.py",
"copies": "1",
"size": "8198",
"license": "bsd-3-clause",
"hash": -9048855812515792000,
"line_mean": 36.6055045872,
"line_max": 77,
"alpha_frac": 0.5783117834,
"autogenerated": false,
"ratio": 3.403071814030718,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4481383597430718,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import product
import numpy as np
from tlz import curry
from ..base import tokenize
from ..utils import funcname
from .core import Array, normalize_chunks
from .utils import meta_from_array
def _parse_wrap_args(func, args, kwargs, shape):
if isinstance(shape, np.ndarray):
shape = shape.tolist()
if not isinstance(shape, (tuple, list)):
shape = (shape,)
name = kwargs.pop("name", None)
chunks = kwargs.pop("chunks", "auto")
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
dtype = np.dtype(dtype)
chunks = normalize_chunks(chunks, shape, dtype=dtype)
name = name or funcname(func) + "-" + tokenize(
func, shape, chunks, dtype, args, kwargs
)
return {
"shape": shape,
"dtype": dtype,
"kwargs": kwargs,
"chunks": chunks,
"name": name,
}
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, Array):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
func = partial(func, dtype=dtype, **kwargs)
vals = ((func,) + (s,) + args for s in shapes)
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, dtype=dtype)
def wrap_func_like(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get("shape", x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for i, s in enumerate(list(shapes)):
kw[i]["shape"] = s
vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype))
def wrap_func_like_safe(func, func_like, *args, **kwargs):
"""
Safe implementation for wrap_func_like(), attempts to use func_like(),
if the shape keyword argument, falls back to func().
"""
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs)
@curry
def wrap(wrap_func, func, **kwargs):
func_like = kwargs.pop("func_like", None)
if func_like is None:
f = partial(wrap_func, func, **kwargs)
else:
f = partial(wrap_func, func_like, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also features
optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {"name": func.__name__} + func.__doc__
f.__name__ = "blocked_" + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
ones = w(np.ones, dtype="f8")
zeros = w(np.zeros, dtype="f8")
empty = w(np.empty, dtype="f8")
w_like = wrap(wrap_func_like_safe)
empty_like = w_like(np.empty, func_like=np.empty_like)
# full and full_like require special casing due to argument check on fill_value
# Generate wrapped functions only once
_full = w(np.full)
_full_like = w_like(np.full, func_like=np.full_like)
def full(shape, fill_value, *args, **kwargs):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full(shape=shape, fill_value=fill_value, *args, **kwargs)
def full_like(a, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full_like(a=a, fill_value=fill_value, *args, **kwargs,)
full.__doc__ = _full.__doc__
full_like.__doc__ = _full_like.__doc__
| {
"repo_name": "ContinuumIO/dask",
"path": "dask/array/wrap.py",
"copies": "2",
"size": "4804",
"license": "bsd-3-clause",
"hash": -4105232491476505000,
"line_mean": 26.7687861272,
"line_max": 87,
"alpha_frac": 0.6117818485,
"autogenerated": false,
"ratio": 3.4461979913916787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5057979839891679,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from itertools import zip_longest
from math import floor
from operator import is_not
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.db import models
from . import base
class RuneObjectBase(base.Stats, base.Quality, base.Stars):
# Provides basic rune related constants
TYPE_ENERGY = 1
TYPE_FATAL = 2
TYPE_BLADE = 3
TYPE_RAGE = 4
TYPE_SWIFT = 5
TYPE_FOCUS = 6
TYPE_GUARD = 7
TYPE_ENDURE = 8
TYPE_VIOLENT = 9
TYPE_WILL = 10
TYPE_NEMESIS = 11
TYPE_SHIELD = 12
TYPE_REVENGE = 13
TYPE_DESPAIR = 14
TYPE_VAMPIRE = 15
TYPE_DESTROY = 16
TYPE_FIGHT = 17
TYPE_DETERMINATION = 18
TYPE_ENHANCE = 19
TYPE_ACCURACY = 20
TYPE_TOLERANCE = 21
TYPE_CHOICES = (
(TYPE_ENERGY, 'Energy'),
(TYPE_FATAL, 'Fatal'),
(TYPE_BLADE, 'Blade'),
(TYPE_RAGE, 'Rage'),
(TYPE_SWIFT, 'Swift'),
(TYPE_FOCUS, 'Focus'),
(TYPE_GUARD, 'Guard'),
(TYPE_ENDURE, 'Endure'),
(TYPE_VIOLENT, 'Violent'),
(TYPE_WILL, 'Will'),
(TYPE_NEMESIS, 'Nemesis'),
(TYPE_SHIELD, 'Shield'),
(TYPE_REVENGE, 'Revenge'),
(TYPE_DESPAIR, 'Despair'),
(TYPE_VAMPIRE, 'Vampire'),
(TYPE_DESTROY, 'Destroy'),
(TYPE_FIGHT, 'Fight'),
(TYPE_DETERMINATION, 'Determination'),
(TYPE_ENHANCE, 'Enhance'),
(TYPE_ACCURACY, 'Accuracy'),
(TYPE_TOLERANCE, 'Tolerance'),
)
# Mappings from com2us' API data to model defined values
COM2US_TYPE_MAP = {
1: TYPE_ENERGY,
2: TYPE_GUARD,
3: TYPE_SWIFT,
4: TYPE_BLADE,
5: TYPE_RAGE,
6: TYPE_FOCUS,
7: TYPE_ENDURE,
8: TYPE_FATAL,
10: TYPE_DESPAIR,
11: TYPE_VAMPIRE,
13: TYPE_VIOLENT,
14: TYPE_NEMESIS,
15: TYPE_WILL,
16: TYPE_SHIELD,
17: TYPE_REVENGE,
18: TYPE_DESTROY,
19: TYPE_FIGHT,
20: TYPE_DETERMINATION,
21: TYPE_ENHANCE,
22: TYPE_ACCURACY,
23: TYPE_TOLERANCE,
}
class Rune(models.Model, RuneObjectBase):
MAIN_STAT_VALUES = {
# [stat][stars][level]: value
RuneObjectBase.STAT_HP: {
1: [40, 85, 130, 175, 220, 265, 310, 355, 400, 445, 490, 535, 580, 625, 670, 804],
2: [70, 130, 190, 250, 310, 370, 430, 490, 550, 610, 670, 730, 790, 850, 910, 1092],
3: [100, 175, 250, 325, 400, 475, 550, 625, 700, 775, 850, 925, 1000, 1075, 1150, 1380],
4: [160, 250, 340, 430, 520, 610, 700, 790, 880, 970, 1060, 1150, 1240, 1330, 1420, 1704],
5: [270, 375, 480, 585, 690, 795, 900, 1005, 1110, 1215, 1320, 1425, 1530, 1635, 1740, 2088],
6: [360, 480, 600, 720, 840, 960, 1080, 1200, 1320, 1440, 1560, 1680, 1800, 1920, 2040, 2448],
},
RuneObjectBase.STAT_HP_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_ATK: {
1: [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 54],
2: [5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 73],
3: [7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 77, 92],
4: [10, 16, 22, 28, 34, 40, 46, 52, 58, 64, 70, 76, 82, 88, 94, 112],
5: [15, 22, 29, 36, 43, 50, 57, 64, 71, 78, 85, 92, 99, 106, 113, 135],
6: [22, 30, 38, 46, 54, 62, 70, 78, 86, 94, 102, 110, 118, 126, 134, 160],
},
RuneObjectBase.STAT_ATK_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_DEF: {
1: [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 54],
2: [5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 73],
3: [7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 77, 92],
4: [10, 16, 22, 28, 34, 40, 46, 52, 58, 64, 70, 76, 82, 88, 94, 112],
5: [15, 22, 29, 36, 43, 50, 57, 64, 71, 78, 85, 92, 99, 106, 113, 135],
6: [22, 30, 38, 46, 54, 62, 70, 78, 86, 94, 102, 110, 118, 126, 134, 160],
},
RuneObjectBase.STAT_DEF_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_SPD: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [3, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 19, 21, 25],
4: [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, 25, 30],
5: [5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 39],
6: [7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 42],
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 37],
4: [4, 6, 8, 11, 13, 15, 17, 19, 22, 24, 26, 28, 30, 33, 35, 41],
5: [5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 31, 34, 36, 39, 47],
6: [7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43, 46, 49, 58],
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
1: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
2: [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 37],
3: [4, 6, 9, 11, 13, 16, 18, 20, 22, 25, 27, 29, 32, 34, 36, 43],
4: [6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 57],
5: [8, 11, 15, 18, 21, 25, 28, 31, 34, 38, 41, 44, 48, 51, 54, 65],
6: [11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63, 67, 80],
},
RuneObjectBase.STAT_RESIST_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [6, 8, 10, 13, 15, 17, 19, 21, 24, 26, 28, 30, 32, 35, 37, 44],
5: [9, 11, 14, 16, 19, 21, 23, 26, 28, 31, 33, 35, 38, 40, 43, 51],
6: [12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 64],
},
RuneObjectBase.STAT_ACCURACY_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [6, 8, 10, 13, 15, 17, 19, 21, 24, 26, 28, 30, 32, 35, 37, 44],
5: [9, 11, 14, 16, 19, 21, 23, 26, 28, 31, 33, 35, 38, 40, 43, 51],
6: [12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 64],
},
}
MAIN_STATS_BY_SLOT = {
1: [
RuneObjectBase.STAT_ATK,
],
2: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_SPD,
],
3: [
RuneObjectBase.STAT_DEF,
],
4: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_CRIT_RATE_PCT,
RuneObjectBase.STAT_CRIT_DMG_PCT,
],
5: [
RuneObjectBase.STAT_HP,
],
6: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_RESIST_PCT,
RuneObjectBase.STAT_ACCURACY_PCT,
]
}
SUBSTAT_INCREMENTS = {
# [stat][stars]: value
RuneObjectBase.STAT_HP: {
1: 60,
2: 105,
3: 165,
4: 225,
5: 300,
6: 375,
},
RuneObjectBase.STAT_HP_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_ATK: {
1: 4,
2: 5,
3: 8,
4: 10,
5: 15,
6: 20,
},
RuneObjectBase.STAT_ATK_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_DEF: {
1: 4,
2: 5,
3: 8,
4: 10,
5: 15,
6: 20,
},
RuneObjectBase.STAT_DEF_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_SPD: {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 7,
},
RuneObjectBase.STAT_RESIST_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_ACCURACY_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
}
UPGRADE_VALUES = {
rune_type: {
stars: value/level_data[6]
for stars, value in level_data.items()
}
for rune_type, level_data in SUBSTAT_INCREMENTS.items()
}
INNATE_STAT_TITLES = {
RuneObjectBase.STAT_HP: 'Strong',
RuneObjectBase.STAT_HP_PCT: 'Tenacious',
RuneObjectBase.STAT_ATK: 'Ferocious',
RuneObjectBase.STAT_ATK_PCT: 'Powerful',
RuneObjectBase.STAT_DEF: 'Sturdy',
RuneObjectBase.STAT_DEF_PCT: 'Durable',
RuneObjectBase.STAT_SPD: 'Quick',
RuneObjectBase.STAT_CRIT_RATE_PCT: 'Mortal',
RuneObjectBase.STAT_CRIT_DMG_PCT: 'Cruel',
RuneObjectBase.STAT_RESIST_PCT: 'Resistant',
RuneObjectBase.STAT_ACCURACY_PCT: 'Intricate',
}
RUNE_SET_COUNT_REQUIREMENTS = {
RuneObjectBase.TYPE_ENERGY: 2,
RuneObjectBase.TYPE_FATAL: 4,
RuneObjectBase.TYPE_BLADE: 2,
RuneObjectBase.TYPE_RAGE: 4,
RuneObjectBase.TYPE_SWIFT: 4,
RuneObjectBase.TYPE_FOCUS: 2,
RuneObjectBase.TYPE_GUARD: 2,
RuneObjectBase.TYPE_ENDURE: 2,
RuneObjectBase.TYPE_VIOLENT: 4,
RuneObjectBase.TYPE_WILL: 2,
RuneObjectBase.TYPE_NEMESIS: 2,
RuneObjectBase.TYPE_SHIELD: 2,
RuneObjectBase.TYPE_REVENGE: 2,
RuneObjectBase.TYPE_DESPAIR: 4,
RuneObjectBase.TYPE_VAMPIRE: 4,
RuneObjectBase.TYPE_DESTROY: 2,
RuneObjectBase.TYPE_FIGHT: 2,
RuneObjectBase.TYPE_DETERMINATION: 2,
RuneObjectBase.TYPE_ENHANCE: 2,
RuneObjectBase.TYPE_ACCURACY: 2,
RuneObjectBase.TYPE_TOLERANCE: 2,
}
RUNE_SET_BONUSES = {
RuneObjectBase.TYPE_ENERGY: {
'count': 2,
'stat': RuneObjectBase.STAT_HP_PCT,
'value': 15.0,
'team': False,
'description': 'Energy 2 Set: HP +15%',
},
RuneObjectBase.TYPE_FATAL: {
'count': 4,
'stat': RuneObjectBase.STAT_ATK_PCT,
'value': 35.0,
'team': False,
'description': 'Fatal 4 Set: Attack Power +35%',
},
RuneObjectBase.TYPE_BLADE: {
'count': 2,
'stat': RuneObjectBase.STAT_CRIT_RATE_PCT,
'value': 12.0,
'team': False,
'description': 'Blade 2 Set: Critical Rate +12%',
},
RuneObjectBase.TYPE_RAGE: {
'count': 4,
'stat': RuneObjectBase.STAT_CRIT_DMG_PCT,
'value': 40.0,
'team': False,
'description': 'Rage 4 Set: Critical Damage +40%',
},
RuneObjectBase.TYPE_SWIFT: {
'count': 4,
'stat': RuneObjectBase.STAT_SPD_PCT,
'value': 25.0,
'team': False,
'description': 'Swift 4 Set: Attack Speed +25%',
},
RuneObjectBase.TYPE_FOCUS: {
'count': 2,
'stat': RuneObjectBase.STAT_ACCURACY_PCT,
'value': 20.0,
'team': False,
'description': 'Focus 2 Set: Accuracy +20%',
},
RuneObjectBase.TYPE_GUARD: {
'count': 2,
'stat': RuneObjectBase.STAT_DEF_PCT,
'value': 15.0,
'team': False,
'description': 'Guard 2 Set: Defense +15%',
},
RuneObjectBase.TYPE_ENDURE: {
'count': 2,
'stat': RuneObjectBase.STAT_RESIST_PCT,
'value': 20.0,
'team': False,
'description': 'Endure 2 Set: Resistance +20%',
},
RuneObjectBase.TYPE_VIOLENT: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': 'Violent 4 Set: Get Extra Turn +22%',
},
RuneObjectBase.TYPE_WILL: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': 'Will 2 Set: Immunity +1 turn',
},
RuneObjectBase.TYPE_NEMESIS: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': 'Nemesis 2 Set: ATK Gauge +4% (for every 7% HP lost)',
},
RuneObjectBase.TYPE_SHIELD: {
'count': 2,
'stat': None,
'value': None,
'team': True,
'description': 'Shield 2 Set: Ally Shield 3 turns (15% of HP)',
},
RuneObjectBase.TYPE_REVENGE: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': 'Revenge 2 Set: Counterattack +15%',
},
RuneObjectBase.TYPE_DESPAIR: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': 'Despair 4 Set: Stun Rate +25%',
},
RuneObjectBase.TYPE_VAMPIRE: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': 'Vampire 4 Set: Life Drain +35%',
},
RuneObjectBase.TYPE_DESTROY: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': 'Destroy 2 Set: 30% of the damage dealt will reduce up to 4% of the enemy\'s Max HP',
},
RuneObjectBase.TYPE_FIGHT: {
'count': 2,
'stat': RuneObjectBase.STAT_ATK,
'value': 8.0,
'team': True,
'description': 'Fight 2 Set: Increase the Attack Power of all allies by 8%',
},
RuneObjectBase.TYPE_DETERMINATION: {
'count': 2,
'stat': RuneObjectBase.STAT_DEF,
'value': 8.0,
'team': True,
'description': 'Determination 2 Set: Increase the Defense of all allies by 8%',
},
RuneObjectBase.TYPE_ENHANCE: {
'count': 2,
'stat': RuneObjectBase.STAT_HP,
'value': 8.0,
'team': True,
'description': 'Enhance 2 Set: Increase the HP of all allies by 8%',
},
RuneObjectBase.TYPE_ACCURACY: {
'count': 2,
'stat': RuneObjectBase.STAT_ACCURACY_PCT,
'value': 10.0,
'team': True,
'description': 'Accuracy 2 Set: Increase the Accuracy of all allies by 10%',
},
RuneObjectBase.TYPE_TOLERANCE: {
'count': 2,
'stat': RuneObjectBase.STAT_RESIST_PCT,
'value': 10.0,
'team': True,
'description': 'Tolerance 2 Set: Increase the Resistance of all allies by 10%',
},
}
type = models.IntegerField(choices=RuneObjectBase.TYPE_CHOICES)
stars = models.IntegerField()
level = models.IntegerField()
slot = models.IntegerField()
quality = models.IntegerField(default=0, choices=RuneObjectBase.QUALITY_CHOICES)
original_quality = models.IntegerField(choices=RuneObjectBase.QUALITY_CHOICES, blank=True, null=True)
ancient = models.BooleanField(default=False)
value = models.IntegerField(blank=True, null=True)
main_stat = models.IntegerField(choices=RuneObjectBase.STAT_CHOICES)
main_stat_value = models.IntegerField()
innate_stat = models.IntegerField(choices=RuneObjectBase.STAT_CHOICES, null=True, blank=True)
innate_stat_value = models.IntegerField(null=True, blank=True)
substats = ArrayField(
models.IntegerField(choices=RuneObjectBase.STAT_CHOICES, null=True, blank=True),
size=4,
default=list,
)
substat_values = ArrayField(
models.IntegerField(blank=True, null=True),
size=4,
default=list,
)
substats_enchanted = ArrayField(
models.BooleanField(default=False, blank=True),
size=4,
default=list,
)
substats_grind_value = ArrayField(
models.IntegerField(default=0, blank=True),
size=4,
default=list,
)
# The following fields exist purely to allow easier filtering and are updated on model save
has_hp = models.BooleanField(default=False)
has_atk = models.BooleanField(default=False)
has_def = models.BooleanField(default=False)
has_crit_rate = models.BooleanField(default=False)
has_crit_dmg = models.BooleanField(default=False)
has_speed = models.BooleanField(default=False)
has_resist = models.BooleanField(default=False)
has_accuracy = models.BooleanField(default=False)
efficiency = models.FloatField(blank=True, null=True)
max_efficiency = models.FloatField(blank=True, null=True)
substat_upgrades_remaining = models.IntegerField(blank=True, null=True)
has_grind = models.IntegerField(default=0, help_text='Number of grindstones applied')
has_gem = models.BooleanField(default=False, help_text='Has had an enchant gem applied')
class Meta:
abstract = True
def get_main_stat_rune_display(self):
return RuneObjectBase.STAT_DISPLAY.get(self.main_stat, '')
def get_innate_stat_rune_display(self):
return RuneObjectBase.STAT_DISPLAY.get(self.innate_stat, '')
def get_innate_stat_title(self):
if self.innate_stat is not None:
return self.INNATE_STAT_TITLES[self.innate_stat]
else:
return ''
def get_substat_rune_display(self, idx):
if len(self.substats) > idx:
return RuneObjectBase.STAT_DISPLAY.get(self.substats[idx], '')
else:
return ''
# For template rendering
@property
def substat_rune_display(self):
return [self.get_substat_rune_display(x) for x in range(len(self.substats))]
def get_stat(self, stat_type, sub_stats_only=False):
if self.main_stat == stat_type and not sub_stats_only:
return self.main_stat_value
elif self.innate_stat == stat_type and not sub_stats_only:
return self.innate_stat_value
else:
for idx, substat in enumerate(self.substats):
if substat == stat_type:
if len(self.substats_grind_value) < idx + 1:
sub_grind_val = 0
else:
sub_grind_val = self.substats_grind_value[idx]
return self.substat_values[idx] + sub_grind_val
return 0
# Individual functions for each stat to use within templates
def get_hp_pct(self):
return self.get_stat(Rune.STAT_HP_PCT, False)
def get_hp(self):
return self.get_stat(Rune.STAT_HP, False)
def get_def_pct(self):
return self.get_stat(Rune.STAT_DEF_PCT, False)
def get_def(self):
return self.get_stat(Rune.STAT_DEF, False)
def get_atk_pct(self):
return self.get_stat(Rune.STAT_ATK_PCT, False)
def get_atk(self):
return self.get_stat(Rune.STAT_ATK, False)
def get_spd(self):
return self.get_stat(Rune.STAT_SPD, False)
def get_cri_rate(self):
return self.get_stat(Rune.STAT_CRIT_RATE_PCT, False)
def get_cri_dmg(self):
return self.get_stat(Rune.STAT_CRIT_DMG_PCT, False)
def get_res(self):
return self.get_stat(Rune.STAT_RESIST_PCT, False)
def get_acc(self):
return self.get_stat(Rune.STAT_ACCURACY_PCT, False)
@property
def substat_upgrades_received(self):
return int(floor(min(self.level, 12) / 3))
def get_efficiency(self):
# https://www.youtube.com/watch?v=SBWeptNNbYc
# All runes are compared against max stat values for perfect 6* runes.
# Main stat efficiency (max 100%)
running_sum = float(self.MAIN_STAT_VALUES[self.main_stat][self.stars][15]) / float(
self.MAIN_STAT_VALUES[self.main_stat][6][15])
# Substat efficiencies (max 20% per; 1 innate, max 4 initial, 4 upgrades)
if self.innate_stat is not None:
running_sum += self.innate_stat_value / float(self.SUBSTAT_INCREMENTS[self.innate_stat][6] * 5)
for substat, value, grind_value in zip(self.substats, self.substat_values, self.substats_grind_value):
running_sum += (value + grind_value) / float(self.SUBSTAT_INCREMENTS[substat][6] * 5)
return running_sum / 2.8 * 100
def get_max_efficiency(self):
# Max efficiency does not include grinds
efficiency = self.get_efficiency()
new_stats = min(4 - len(self.substats), self.substat_upgrades_remaining)
old_stats = self.substat_upgrades_remaining - new_stats
if old_stats > 0:
# we can repeatedly upgrade the most value of the existing stats
best_stat = max(
0, # ensure max() doesn't error if we only have one stat
*[self.UPGRADE_VALUES[stat][self.stars] for stat in self.substats]
)
efficiency += best_stat * old_stats * 0.2 / 2.8 * 100
if new_stats:
# add the top N stats
available_upgrades = sorted(
[
upgrade_value[self.stars]
for stat, upgrade_value in self.UPGRADE_VALUES.items()
if stat not in self.substats
],
reverse=True
)
efficiency += sum(available_upgrades[:new_stats]) * 0.2 / 2.8 * 100
return efficiency
def update_fields(self):
# Set filterable fields
rune_stat_types = [self.main_stat, self.innate_stat] + self.substats
self.has_hp = any([i for i in rune_stat_types if i in [self.STAT_HP, self.STAT_HP_PCT]])
self.has_atk = any([i for i in rune_stat_types if i in [self.STAT_ATK, self.STAT_ATK_PCT]])
self.has_def = any([i for i in rune_stat_types if i in [self.STAT_DEF, self.STAT_DEF_PCT]])
self.has_crit_rate = self.STAT_CRIT_RATE_PCT in rune_stat_types
self.has_crit_dmg = self.STAT_CRIT_DMG_PCT in rune_stat_types
self.has_speed = self.STAT_SPD in rune_stat_types
self.has_resist = self.STAT_RESIST_PCT in rune_stat_types
self.has_accuracy = self.STAT_ACCURACY_PCT in rune_stat_types
self.quality = len([substat for substat in self.substats if substat])
self.substat_upgrades_remaining = 4 - self.substat_upgrades_received
self.efficiency = self.get_efficiency()
self.max_efficiency = self.get_max_efficiency()
self.has_grind = sum([bool(x) for x in self.substats_grind_value])
self.has_gem = any(self.substats_enchanted)
# Cap stat values to appropriate value
# Very old runes can have different values, but never higher than the cap
if self.main_stat_value:
self.main_stat_value = min(self.MAIN_STAT_VALUES[self.main_stat][self.stars][self.level], self.main_stat_value)
else:
self.main_stat_value = self.MAIN_STAT_VALUES[self.main_stat][self.stars][self.level]
if self.innate_stat and self.innate_stat_value and self.innate_stat_value > self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]:
self.innate_stat_value = self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]
for idx, substat in enumerate(self.substats):
max_sub_value = self.SUBSTAT_INCREMENTS[substat][self.stars] * (self.substat_upgrades_received + 1)
if self.substat_values[idx] > max_sub_value:
self.substat_values[idx] = max_sub_value
def clean(self):
# Check slot, level, etc for valid ranges
stars_message = 'Must be between 1 and 6'
if self.stars is None:
raise ValidationError({'stars': ValidationError(stars_message, code='stars_missing')})
elif self.stars < 1 or self.stars > 6:
raise ValidationError({'stars': ValidationError(stars_message, code='stars_invalid')})
level_message = 'Must be between 0 and 15'
if self.level is None:
raise ValidationError({'level': ValidationError(level_message, code='level_missing')})
elif self.level < 0 or self.level > 15:
raise ValidationError({'level': ValidationError(level_message, code='level_invalid')})
slot_message = 'Must be between 1 and 6'
if self.slot is None:
raise ValidationError({'slot': ValidationError(slot_message, code='slot_missing')})
elif self.slot < 1 or self.slot > 6:
raise ValidationError({'slot': ValidationError(slot_message, code='slot_invalid')})
# Check main stat is appropriate for this slot
if self.slot and self.main_stat not in self.MAIN_STATS_BY_SLOT[self.slot]:
raise ValidationError({
'main_stat': ValidationError(
'Unacceptable stat for slot %(slot)s. Must be %(valid_stats)s.',
params={
'slot': self.slot,
'valid_stats': ', '.join([RuneObjectBase.STAT_CHOICES[stat - 1][1] for stat in self.MAIN_STATS_BY_SLOT[self.slot]])
},
code='invalid_main_stat_for_slot'
),
})
# Check that the same stat type was not used multiple times
stat_list = list(filter(
partial(is_not, None),
[self.main_stat, self.innate_stat] + self.substats
))
if len(stat_list) != len(set(stat_list)):
raise ValidationError(
'All stats and substats must be unique.',
code='duplicate_stats'
)
# Check if stat type was specified that it has value > 0
if self.main_stat_value is None:
raise ValidationError({
'main_stat_value': ValidationError(
'Missing main stat value.',
code='main_stat_missing',
)
})
max_main_stat_value = self.MAIN_STAT_VALUES[self.main_stat][self.stars][self.level]
if self.main_stat_value > max_main_stat_value:
raise ValidationError({
'main_stat_value': ValidationError(
f'Main stat value for {self.get_main_stat_display()} at {self.stars}* lv. {self.level} must be less than {max_main_stat_value}',
code='main_stat_too_high',
)
})
if self.innate_stat is not None:
if self.innate_stat_value is None:
raise ValidationError({
'innate_stat_value': ValidationError(
'Missing value',
code='innate_stat_missing'
)
})
if self.innate_stat_value <= 0:
raise ValidationError({
'innate_stat_value': ValidationError(
'Must be greater than 0',
code='innate_stat_too_low'
)
})
max_sub_value = self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]
# TODO: Remove not ancient check once ancient max substat values are found
if self.innate_stat_value > max_sub_value and not self.ancient:
raise ValidationError({
'innate_stat_value': ValidationError(
'Must be less than or equal to %(max)s',
params={'max': max_sub_value},
code='innate_stat_too_high'
)
})
else:
self.innate_stat_value = None
# Check that a minimum number of substats are present based on the level
if len(self.substats) < self.substat_upgrades_received:
raise ValidationError({
'substats': ValidationError(
'A lv. %(level)s rune requires at least %(upgrades)s substat(s)',
params={
'level': self.level,
'upgrades': self.substat_upgrades_received,
},
code='not_enough_substats'
)
})
# Trim substat values to match length of defined substats
num_substats = len(self.substats)
self.substat_values = self.substat_values[0:num_substats]
self.substats_enchanted = self.substats_enchanted[0:num_substats]
self.substats_grind_value = self.substats_grind_value[0:num_substats]
# Pad with 0 if too short
self.substat_values += [0] * (num_substats - len(self.substat_values))
self.substats_enchanted += [0] * (num_substats - len(self.substats_enchanted))
self.substats_grind_value += [0] * (num_substats - len(self.substats_grind_value))
for index, (substat, value, grind_value) in enumerate(zip(
self.substats,
self.substat_values,
self.substats_grind_value
)):
if value is None or value <= 0:
raise ValidationError({
'substat_values': ValidationError(
'Substat %(nth)s: Must be greater than 0.',
params={'nth': index + 1},
code=f'substat_too_low'
)
})
max_sub_value = self.SUBSTAT_INCREMENTS[substat][self.stars] * (self.substat_upgrades_received + 1)
# TODO: Remove not ancient check once ancient max substat values are found
if value > max_sub_value and not self.ancient:
raise ValidationError({
'substat_values': ValidationError(
'Substat %(nth)s: Must be less than or equal to %(max_val)s.',
params={
'nth': index + 1,
'max_val': max_sub_value,
},
code=f'substat_too_high'
)
})
# Validate grind value
max_grind_value = RuneCraft.CRAFT_VALUE_RANGES[RuneCraft.CRAFT_ANCIENT_GRINDSTONE][substat][RuneCraft.QUALITY_LEGEND]['max']
if grind_value > max_grind_value:
raise ValidationError({
'substats_grind_value': ValidationError(
f'Substat Grind %(nth)s: Must be less than or equal to {max_grind_value}.',
params={'nth': index + 1},
code=f'grind_too_high'
)
})
# Validate minimum level if enchant gem is applied
if self.level < 12 and any(self.substats_enchanted):
raise ValidationError({
'level': ValidationError(
'Level must be 12 or higher when Enchant Gem is applied',
code='level_invalid'
)
})
# Validate number of gems applied
if sum(self.substats_enchanted) > 1:
raise ValidationError({
'substats_enchanted': ValidationError(
'Only one substat may have an enchant gem applied.',
code='too_many_enchants',
),
})
def save(self, *args, **kwargs):
self.update_fields()
super(Rune, self).save(*args, **kwargs)
def __str__(self):
return self.get_innate_stat_title() + ' ' + self.get_type_display() + ' ' + 'Rune'
class RuneCraft(models.Model, RuneObjectBase):
CRAFT_GRINDSTONE = 0
CRAFT_ENCHANT_GEM = 1
CRAFT_IMMEMORIAL_GRINDSTONE = 2
CRAFT_IMMEMORIAL_GEM = 3
CRAFT_ANCIENT_GRINDSTONE = 4
CRAFT_ANCIENT_GEM = 5
CRAFT_CHOICES = (
(CRAFT_GRINDSTONE, 'Grindstone'),
(CRAFT_ENCHANT_GEM, 'Enchant Gem'),
(CRAFT_IMMEMORIAL_GRINDSTONE, 'Immemorial Grindstone'),
(CRAFT_IMMEMORIAL_GEM, 'Immemorial Gem'),
(CRAFT_ANCIENT_GRINDSTONE, 'Ancient Grindstone'),
(CRAFT_ANCIENT_GEM, 'Ancient Gem'),
)
CRAFT_ENCHANT_GEMS = [
CRAFT_ENCHANT_GEM,
CRAFT_IMMEMORIAL_GEM,
CRAFT_ANCIENT_GEM,
]
CRAFT_GRINDSTONES = [
CRAFT_GRINDSTONE,
CRAFT_IMMEMORIAL_GRINDSTONE,
CRAFT_ANCIENT_GRINDSTONE,
]
# Type > Stat > Quality > Min/Max
CRAFT_VALUE_RANGES = {
CRAFT_GRINDSTONE: {
RuneObjectBase.STAT_HP: {
RuneObjectBase.QUALITY_NORMAL: {'min': 80, 'max': 120},
RuneObjectBase.QUALITY_MAGIC: {'min': 100, 'max': 200},
RuneObjectBase.QUALITY_RARE: {'min': 180, 'max': 250},
RuneObjectBase.QUALITY_HERO: {'min': 230, 'max': 450},
RuneObjectBase.QUALITY_LEGEND: {'min': 430, 'max': 550},
},
RuneObjectBase.STAT_HP_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_ATK: {
RuneObjectBase.QUALITY_NORMAL: {'min': 4, 'max': 8},
RuneObjectBase.QUALITY_MAGIC: {'min': 6, 'max': 12},
RuneObjectBase.QUALITY_RARE: {'min': 10, 'max': 18},
RuneObjectBase.QUALITY_HERO: {'min': 12, 'max': 22},
RuneObjectBase.QUALITY_LEGEND: {'min': 18, 'max': 30},
},
RuneObjectBase.STAT_ATK_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_DEF: {
RuneObjectBase.QUALITY_NORMAL: {'min': 4, 'max': 8},
RuneObjectBase.QUALITY_MAGIC: {'min': 6, 'max': 12},
RuneObjectBase.QUALITY_RARE: {'min': 10, 'max': 18},
RuneObjectBase.QUALITY_HERO: {'min': 12, 'max': 22},
RuneObjectBase.QUALITY_LEGEND: {'min': 18, 'max': 30},
},
RuneObjectBase.STAT_DEF_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_SPD: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_MAGIC: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 3},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 4},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 5},
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_MAGIC: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 6},
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 7},
},
RuneObjectBase.STAT_RESIST_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 8},
},
RuneObjectBase.STAT_ACCURACY_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 8},
},
},
CRAFT_ENCHANT_GEM: {
RuneObjectBase.STAT_HP: {
RuneObjectBase.QUALITY_NORMAL: {'min': 100, 'max': 150},
RuneObjectBase.QUALITY_MAGIC: {'min': 130, 'max': 220},
RuneObjectBase.QUALITY_RARE: {'min': 200, 'max': 310},
RuneObjectBase.QUALITY_HERO: {'min': 290, 'max': 420},
RuneObjectBase.QUALITY_LEGEND: {'min': 400, 'max': 580},
},
RuneObjectBase.STAT_HP_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_ATK: {
RuneObjectBase.QUALITY_NORMAL: {'min': 8, 'max': 12},
RuneObjectBase.QUALITY_MAGIC: {'min': 10, 'max': 16},
RuneObjectBase.QUALITY_RARE: {'min': 15, 'max': 23},
RuneObjectBase.QUALITY_HERO: {'min': 20, 'max': 30},
RuneObjectBase.QUALITY_LEGEND: {'min': 28, 'max': 40},
},
RuneObjectBase.STAT_ATK_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_DEF: {
RuneObjectBase.QUALITY_NORMAL: {'min': 8, 'max': 12},
RuneObjectBase.QUALITY_MAGIC: {'min': 10, 'max': 16},
RuneObjectBase.QUALITY_RARE: {'min': 15, 'max': 23},
RuneObjectBase.QUALITY_HERO: {'min': 20, 'max': 30},
RuneObjectBase.QUALITY_LEGEND: {'min': 28, 'max': 40},
},
RuneObjectBase.STAT_DEF_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_SPD: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_LEGEND: {'min': 7, 'max': 10},
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 6, 'max': 9},
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 4, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_LEGEND: {'min': 7, 'max': 10},
},
RuneObjectBase.STAT_RESIST_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 6, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 8, 'max': 11},
},
RuneObjectBase.STAT_ACCURACY_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 6, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 8, 'max': 11},
},
},
CRAFT_ANCIENT_GRINDSTONE: {
RuneObjectBase.STAT_HP: {
RuneObjectBase.QUALITY_NORMAL: {'min': 80, 'max': 180},
RuneObjectBase.QUALITY_MAGIC: {'min': 100, 'max': 260},
RuneObjectBase.QUALITY_RARE: {'min': 180, 'max': 310},
RuneObjectBase.QUALITY_HERO: {'min': 230, 'max': 510},
RuneObjectBase.QUALITY_LEGEND: {'min': 430, 'max': 610},
},
RuneObjectBase.STAT_HP_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 5},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 12},
},
RuneObjectBase.STAT_ATK: {
RuneObjectBase.QUALITY_NORMAL: {'min': 4, 'max': 12},
RuneObjectBase.QUALITY_MAGIC: {'min': 6, 'max': 16},
RuneObjectBase.QUALITY_RARE: {'min': 10, 'max': 22},
RuneObjectBase.QUALITY_HERO: {'min': 12, 'max': 26},
RuneObjectBase.QUALITY_LEGEND: {'min': 18, 'max': 34},
},
RuneObjectBase.STAT_ATK_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 5},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 12},
},
RuneObjectBase.STAT_DEF: {
RuneObjectBase.QUALITY_NORMAL: {'min': 4, 'max': 12},
RuneObjectBase.QUALITY_MAGIC: {'min': 6, 'max': 16},
RuneObjectBase.QUALITY_RARE: {'min': 10, 'max': 22},
RuneObjectBase.QUALITY_HERO: {'min': 12, 'max': 26},
RuneObjectBase.QUALITY_LEGEND: {'min': 18, 'max': 34},
},
RuneObjectBase.STAT_DEF_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 5},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 12},
},
RuneObjectBase.STAT_SPD: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 6},
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_MAGIC: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 6},
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 7},
},
RuneObjectBase.STAT_RESIST_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 8},
},
RuneObjectBase.STAT_ACCURACY_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 8},
},
},
CRAFT_ANCIENT_GEM: {
RuneObjectBase.STAT_HP: {
RuneObjectBase.QUALITY_NORMAL: {'min': 100, 'max': 210},
RuneObjectBase.QUALITY_MAGIC: {'min': 130, 'max': 280},
RuneObjectBase.QUALITY_RARE: {'min': 200, 'max': 370},
RuneObjectBase.QUALITY_HERO: {'min': 290, 'max': 480},
RuneObjectBase.QUALITY_LEGEND: {'min': 400, 'max': 640},
},
RuneObjectBase.STAT_HP_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 6},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 9},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 11},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 13},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 15},
},
RuneObjectBase.STAT_ATK: {
RuneObjectBase.QUALITY_NORMAL: {'min': 8, 'max': 16},
RuneObjectBase.QUALITY_MAGIC: {'min': 10, 'max': 20},
RuneObjectBase.QUALITY_RARE: {'min': 15, 'max': 27},
RuneObjectBase.QUALITY_HERO: {'min': 20, 'max': 34},
RuneObjectBase.QUALITY_LEGEND: {'min': 28, 'max': 44},
},
RuneObjectBase.STAT_ATK_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 6},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 9},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 11},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 13},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 15},
},
RuneObjectBase.STAT_DEF: {
RuneObjectBase.QUALITY_NORMAL: {'min': 8, 'max': 16},
RuneObjectBase.QUALITY_MAGIC: {'min': 10, 'max': 20},
RuneObjectBase.QUALITY_RARE: {'min': 15, 'max': 27},
RuneObjectBase.QUALITY_HERO: {'min': 20, 'max': 34},
RuneObjectBase.QUALITY_LEGEND: {'min': 28, 'max': 44},
},
RuneObjectBase.STAT_DEF_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 6},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 9},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 11},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 13},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 15},
},
RuneObjectBase.STAT_SPD: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_HERO: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 7, 'max': 11},
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 8},
RuneObjectBase.QUALITY_LEGEND: {'min': 6, 'max': 10},
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 6},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 4, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 5, 'max': 10},
RuneObjectBase.QUALITY_LEGEND: {'min': 7, 'max': 12},
},
RuneObjectBase.STAT_RESIST_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 6},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 8},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 10},
RuneObjectBase.QUALITY_HERO: {'min': 6, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 8, 'max': 13},
},
RuneObjectBase.STAT_ACCURACY_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 6},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 8},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 10},
RuneObjectBase.QUALITY_HERO: {'min': 6, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 8, 'max': 13},
},
},
}
CRAFT_VALUE_RANGES[CRAFT_IMMEMORIAL_GEM] = CRAFT_VALUE_RANGES[CRAFT_ENCHANT_GEM]
CRAFT_VALUE_RANGES[CRAFT_IMMEMORIAL_GRINDSTONE] = CRAFT_VALUE_RANGES[CRAFT_GRINDSTONE]
# Mappings from com2us' API data to model defined values
COM2US_CRAFT_TYPE_MAP = {
1: CRAFT_ENCHANT_GEM,
2: CRAFT_GRINDSTONE,
3: CRAFT_IMMEMORIAL_GEM,
4: CRAFT_IMMEMORIAL_GRINDSTONE,
5: CRAFT_ANCIENT_GEM,
6: CRAFT_ANCIENT_GRINDSTONE,
}
type = models.IntegerField(choices=CRAFT_CHOICES)
rune = models.IntegerField(choices=RuneObjectBase.TYPE_CHOICES, blank=True, null=True)
stat = models.IntegerField(choices=RuneObjectBase.STAT_CHOICES)
quality = models.IntegerField(choices=RuneObjectBase.QUALITY_CHOICES)
value = models.IntegerField(blank=True, null=True)
class Meta:
abstract = True
def get_min_value(self):
try:
return self.CRAFT_VALUE_RANGES[self.type][self.stat][self.quality]['min']
except KeyError:
return None
except TypeError as e:
print(e)
return None
def get_max_value(self):
try:
return self.CRAFT_VALUE_RANGES[self.type][self.stat][self.quality]['max']
except KeyError:
return None
@staticmethod
def get_valid_stats_for_type(craft_type):
try:
valid_stats = RuneCraft.CRAFT_VALUE_RANGES[craft_type].keys()
except KeyError:
return None
else:
stat_names = {stat: RuneObjectBase.STAT_CHOICES[stat - 1][1] for stat in valid_stats}
return stat_names
def __str__(self):
if self.stat in RuneObjectBase.PERCENT_STATS:
percent = '%'
else:
percent = ''
return RuneCraft.STAT_DISPLAY.get(self.stat) + ' +' + str(self.get_min_value()) + percent + ' - ' + str(
self.get_max_value()) + percent
| {
"repo_name": "PeteAndersen/swarfarm",
"path": "bestiary/models/runes.py",
"copies": "1",
"size": "55214",
"license": "apache-2.0",
"hash": -4914527458287914000,
"line_mean": 41.4723076923,
"line_max": 148,
"alpha_frac": 0.5129858369,
"autogenerated": false,
"ratio": 3.3054358237547894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9311310616421408,
"avg_score": 0.001422208846676324,
"num_lines": 1300
} |
from functools import partial
from kivent_cython import GameSystem
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import StringProperty
from kivy.uix.widget import Widget
from math import radians
from random import randint
import kivent_cython # import needed for kv to work!
class DebugPanel(Widget):
"""docstring for DebugPanel"""
fps = StringProperty(None)
def __init__(self, *args, **kwargs):
super(DebugPanel, self).__init__(*args, **kwargs)
Clock.schedule_interval(self.update_fps, .1)
def update_fps(self, dt):
self.fps = str(int(Clock.get_fps()))
class AsteroidSystem(GameSystem):
"""docstring for AsteroidSystem"""
def create_asteroid(self, pos):
x, y = pos
x_vel = randint(-100, 100)
y_vel = randint(-100, 100)
angle = radians(randint(0, 360))
angular_velocity = radians(randint(-150, 150))
shape_dict = {
'inner_radius': 0,
'outer_radius': 32,
'mass': 50,
'offset': (0, 0),
}
col_shape = {
'shape_type': 'circle',
'elasticity': .5,
'collision_type': 1,
'shape_info': shape_dict,
'friction': 1.0,
}
col_shapes = [col_shape]
physics_component = {
'main_shape': 'circle',
'velocity': (x_vel, y_vel),
'position': (x, y),
'angle': angle,
'angular_velocity': angular_velocity,
'vel_limit': 250,
'ang_vel_limit': radians(200),
'mass': 50,
'col_shapes': col_shapes,
}
asteroid_component = {'health': 2}
create_component_dict = {
'cymunk-physics': physics_component,
'physics_renderer': {'texture': 'asteroid.png'},
'asteroids': asteroid_component,
}
component_order = ['cymunk-physics', 'physics_renderer', 'asteroids']
self.gameworld.init_entity(create_component_dict, component_order)
def asteroids_collide(self, space, arbiter):
gameworld = self.gameworld
entities = gameworld.entities
asteroid1_id = arbiter.shapes[0].body.data
asteroid2_id = arbiter.shapes[1].body.data
asteroid1 = entities[asteroid1_id]
asteroid2 = entities[asteroid2_id]
asteroid1_data = asteroid1['asteroids']
asteroid2_data = asteroid2['asteroids']
asteroid1_data['health'] -= 1
asteroid2_data['health'] -= 1
if asteroid1_data['health'] <= 0:
Clock.schedule_once(partial(
gameworld.timed_remove_entity,
asteroid1_id
))
if asteroid2_data['health'] <= 0:
Clock.schedule_once(partial(
gameworld.timed_remove_entity,
asteroid2_id
))
print('{0} hit {1}'.format(asteroid1_id, asteroid2_id))
class TestGame(Widget):
def __init__(self, *args, **kwargs):
super(TestGame, self).__init__(*args, **kwargs)
Clock.schedule_once(self._init_game)
def init_game(self, dt):
self.setup_states()
self.set_state()
self.setup_map()
self.setup_collision_callbacks()
self.load_stars()
self.load_asteroids()
Clock.schedule_interval(self.update, 1./60.)
def _init_game(self, dt):
try:
self.init_game(0)
except:
print("Rescheduling init")
Clock.schedule_once(self._init_game)
def update(self, dt):
self.gameworld.update(dt)
def setup_states(self):
self.gameworld.add_state(
state_name='main',
systems_added=['quadtree_renderer', 'physics_renderer'],
systems_removed=[],
systems_paused=[],
systems_unpaused=['quadtree_renderer', 'physics_renderer'],
screenmanager_screen='main'
)
def set_state(self):
self.gameworld.state = 'main'
def setup_map(self):
self.gameworld.currentmap = self.gameworld.systems['map']
def load_stars(self):
star_graphic = 'star.png'
star_size = (28, 28)
for i in range(50):
rand_x = randint(0, self.gameworld.currentmap.map_size[0])
rand_y = randint(0, self.gameworld.currentmap.map_size[1])
create_component_dict = {
'position': {'position': (rand_x, rand_y)},
'quadtree_renderer': {
'texture': star_graphic,
'size': star_size,
}
}
component_order = ['position', 'quadtree_renderer']
self.gameworld.init_entity(create_component_dict, component_order)
def load_asteroids(self):
asteroid_system = self.gameworld.systems['asteroids']
for i in range(50):
rand_x = randint(0, self.gameworld.currentmap.map_size[0])
rand_y = randint(0, self.gameworld.currentmap.map_size[1])
asteroid_system.create_asteroid((rand_x, rand_y))
def setup_collision_callbacks(self):
systems = self.gameworld.systems
physics = systems['cymunk-physics']
asteroid_system = systems['asteroids']
physics.add_collision_handler(
1, 1,
separate_func=asteroid_system.asteroids_collide
)
class BasicApp(App):
def build(self):
pass
if __name__ == '__main__':
BasicApp().run()
| {
"repo_name": "nightmarebadger/kivy-trying-out",
"path": "KivEnt/tutorials/4-physics-callback/main.py",
"copies": "1",
"size": "5529",
"license": "mit",
"hash": 1371928492169876200,
"line_mean": 28.8864864865,
"line_max": 78,
"alpha_frac": 0.5604991861,
"autogenerated": false,
"ratio": 3.710738255033557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9768037307037196,
"avg_score": 0.0006400268192721022,
"num_lines": 185
} |
from functools import partial
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.utils import get_color_from_hex as to_rgba
from kivy.properties import (
BooleanProperty, DictProperty,
StringProperty, NumericProperty
)
from kivy.logger import Logger
from kivy.uix.bubble import Bubble
from kivy.uix.bubble import BubbleButton
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.image import Image
try:
from border_behavior import BorderBehavior
from hover_behavior import HoverBehavior
except ModuleNotFoundError:
from .border_behavior import BorderBehavior
from .hover_behavior import HoverBehavior
try:
Builder.load_file('src/player.kv')
except FileNotFoundError:
Builder.load_file('../src/player.kv')
class Player(BoxLayout, BorderBehavior):
name = StringProperty("player")
number = NumericProperty(0)
icon = StringProperty()
alive = BooleanProperty(True)
mafia = BooleanProperty(False)
agent = BooleanProperty(False)
is_on_trial = BooleanProperty(False)
strategic_value = NumericProperty(0)
# Holds a reference to the action that is to be taken on another player.
current_action = StringProperty()
actions = DictProperty()
def __iter__(self):
flattened_actions = self.actions.copy()
for action, decision in self.actions.items():
flattened_actions[action] = decision.copy()
for key, value in decision.items():
if hasattr(value, "number"):
flattened_actions[action][key] = value.number
return iter([('name', self.name),
('agent', self.agent),
('number', self.number),
('is_on_trial', self.is_on_trial),
('icon', self.icon),
('mafia', self.mafia),
('alive', self.alive),
('strategic_value', self.strategic_value),
('current_action', self.current_action),
('actions', flattened_actions)])
def suspect(self, other):
pass
def accuse(self, other):
pass
def kill(self, other):
pass
def die(self):
stage = App.get_running_app().root.current_screen
stage.players[self.number].alive = self.alive = False
stage.players[self.number].icon = self.icon = "data/icons/player_dead.png"
def set_strategic_value(self, strategic_value):
tolerance = 1.5
if strategic_value > tolerance:
self.borders = (2, "solid", to_rgba("05F5F5"))
elif strategic_value < tolerance:
self.borders = (2, "solid", to_rgba("05F5F5"))
else:
self.borders = (2, "solid", to_rgba("05F5F5"))
self.update_borders()
def ready_action(self, action) -> None:
"""
Designate the current player as the one who will be performing actions.
This is done by setting the player instance as the selected player.
"""
stage = App.get_running_app().root.current_screen
self.current_action = action.lower()
stage.selected_player = self
Logger.info(f"Player: {self.name} readies {self.current_action}")
if self.current_action == "die":
self.die()
if self.current_action == "guilty" or self.current_action == "innocent":
stage.players[self.number].actions["vote"]["decision"] = self.current_action
if self.current_action == "abstain":
# Fix Issue #17
stage.players[self.number].actions["accuse"]["player"] = None
stage.players[self.number].actions["suspect"]["player"] = None
stage.selected_player = None
def act_on(self, player) -> None:
assert self.actions is not None
assert player is not None
assert issubclass(type(self), Player)
assert self.actions != {}
self.current_action = self.current_action.lower()
if self == player:
Logger.warning(f"Player: {self.name} tried to act on themselves.")
return
if self.current_action == 'suspect' and self.actions["accuse"]["player"] != player:
self.actions["suspect"]['player'] = player
elif self.current_action == 'accuse' and self.actions["suspect"]["player"] != player:
self.actions["accuse"]['player'] = player
Logger.info(f"Player: {self.name} {self.current_action} {player.name}")
def show_bubble(self) -> None:
self.bubb = Bubble(size_hint=(None, None),
size=(160, 30),
pos_hint={'center_x': .5, 'y': .6})
accuse = BubbleButton(text='Accuse')
suspect = BubbleButton(text='Suspect')
accuse.bind(on_press=partial(self.hide_bubble, accuse))
suspect.bind(on_press=partial(self.hide_bubble, suspect))
self.bubb.add_widget(accuse)
self.bubb.add_widget(suspect)
self.ids.empty.add_widget(self.bubb)
def hide_bubble(self, instance, *args):
self.ids.empty.remove_widget(self.bubb)
class PlayerIcon(Player):
"""
Used for dead players and other unclickable player icons.
"""
pass
class DiscussionPlayer(Player):
pass
class NightMafiaPlayer(Player):
pass
class NightSleepingPlayer(Player):
pass
class TrialPlayer(Player):
pass
class TrialAgent(Player):
pass
class ImageButton(BorderBehavior, ButtonBehavior, Image, HoverBehavior):
pass
| {
"repo_name": "Zenohm/mafiademonstration",
"path": "src/player.py",
"copies": "1",
"size": "5582",
"license": "mit",
"hash": -1711779030140454700,
"line_mean": 31.0804597701,
"line_max": 93,
"alpha_frac": 0.6184163382,
"autogenerated": false,
"ratio": 3.884481558803062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5002897897003062,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.uix.accordion import AccordionItem
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.modalview import ModalView
from kivy.uix.togglebutton import ToggleButton
class PlaygroundSizeSelector(Button):
'''Button to open playground size selection view
'''
view = ObjectProperty()
'''This property refers to the
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
instance.
:data:`view` is an :class:`~kivy.properties.ObjectProperty`
'''
playground = ObjectProperty()
'''This property holds a reference to the
:class:`~designer.playground.Playground` instance.
:data:`playground` is an :class:`~kivy.properties.ObjectProperty`
'''
def on_playground(self, *_):
'''Create a
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
for the current playground.
'''
self.view = PlaygroundSizeView(selected_size=self.playground.size)
self.view.bind(selected_size=self._update_playground)
self.view.bind(selected_size_name=self.setter('text'))
self.text = self.view.selected_size_name
def _update_playground(self, _, size):
'''Callback to update the playground size on :data:`selected_size`
changes
'''
if self.playground:
self.playground.size = size
if self.playground.root:
self.playground.root.size = size
def on_press(self):
'''Open the
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
'''
self.view.size_hint = None, None
self.view.width = self.get_root_window().width / 2.
self.view.height = self.get_root_window().height / 2.
self.view.attach_to = self
self.view.open()
class PlaygroundSizeView(ModalView):
'''Dialog for playground size selection
'''
accordion = ObjectProperty()
'''This property holds a reference to the
:class:`~kivy.uix.accordion.Accordion` inside the dialog.
:data:`accordion` is an :class:`~kivy.properties.ObjectProperty`
'''
selected_size = ObjectProperty()
'''This property contains the currently selected playground size.
:data:`selected_size` is an :class:`~kivy.properties.ObjectProperty`
'''
selected_size_name = StringProperty('')
'''This property contains the name associated with :data:`selected_size`.
:data:`selected_size_name` is a :class:`~kivy.properties.StringProperty`
'''
selected_orientation = OptionProperty(
'landscape', options=('portrait', 'landscape')
)
'''This property contains the screen orientation for :data:`selected_size`.
:data:`selected_orientation` is an
:class:`~kivy.properties.OptionProperty`
'''
default_sizes = (
('Desktop - SD', (
('Default', (550, 350)),
('Small', (800, 600)),
('Medium', (1024, 768)),
('Large', (1280, 1024)),
('XLarge', (1600, 1200))
)),
('Desktop - HD', (
('720p', (1280, 720)),
('LVDS', (1366, 768)),
('1080p', (1920, 1080)),
('4K', (3840, 2160)),
('4K Cinema', (4096, 2160))
)),
('Generic', (
('QVGA', (320, 240)),
('WQVGA400', (400, 240)),
('WQVGA432', (432, 240)),
('HVGA', (480, 320)),
('WVGA800', (800, 480)),
('WVGA854', (854, 480)),
('1024x600', (1024, 600)),
('1024x768', (1024, 768)),
('1280x768', (1280, 768)),
('WXGA', (1280, 800)),
('640x480', (640, 480)),
('1536x1152', (1536, 1152)),
('1920x1152', (1920, 1152)),
('1920x1200', (1920, 1200)),
('960x640', (960, 640)),
('2048x1536', (2048, 1536)),
('2560x1536', (2560, 1536)),
('2560x1600', (2560, 1600)),
)),
('Android', (
('HTC One', (1920, 1080)),
('HTC One X', (1920, 720)),
('HTC One SV', (800, 480)),
('Galaxy S3', (1280, 720)),
('Galaxy Note 2', (1280, 720)),
('Motorola Droid 2', (854, 480)),
('Motorola Xoom', (1280, 800)),
('Xperia E', (480, 320)),
('Nexus 4', (1280, 768)),
('Nexus 7 (2012)', (1280, 800)),
('Nexus 7 (2013)', (1920, 1200)),
)),
('iOS', (
('iPad 1/2', (1024, 768)),
('iPad 3', (2048, 1536)),
('iPhone 4', (960, 640)),
('iPhone 5', (1136, 640)),
)),
)
'''Ordered map of default selectable sizes.
'''
def __init__(self, **kwargs):
self._buttons = {}
super(PlaygroundSizeView, self).__init__(**kwargs)
for title, values in self.default_sizes:
grid = GridLayout(rows=4)
def sort_sizes(a, b):
sa, sb = a[1], b[1]
if sa[1] > sb[1]:
return -1
elif sa[1] < sb[1]:
return 1
else:
if sa[0] > sb[0]:
return -1
elif sa[0] < sb[0]:
return 1
return 0
values = sorted(values, sort_sizes)
for name, size in values:
btn = ToggleButton(text='', markup=True)
btntext = ('%s\n[color=777777][size=%d]%dx%d[/size][/color]' %
(name, btn.font_size * 0.8, size[0], size[1]))
btn.text = btntext
btn.bind(on_press=partial(self.set_size, size))
grid.add_widget(btn)
self._buttons[name] = btn
item = AccordionItem(title=title)
item.add_widget(grid)
self.accordion.add_widget(item)
self.accordion.select(self.accordion.children[-1])
self.update_buttons()
def find_size(self):
'''Find the size name and orientation for the current size.
'''
orientation = self.check_orientation(self.selected_size)
check_size = tuple(sorted(self.selected_size, reverse=True)).__eq__
for _, values in self.default_sizes:
for name, size in values:
if check_size(size):
return name, size, orientation
return 'Custom', self.selected_size, orientation
def check_orientation(self, size):
'''Determine if the provided size is portrait or landscape.
'''
return 'portrait' if size[1] > size[0] else 'landscape'
def update_buttons(self, size_name=None):
'''Update the toggle state of the size buttons and open the
appropriate accordion section.
'''
if not size_name:
size_name = self.find_size()[0]
for name, btn in self._buttons.iteritems():
if name == size_name:
btn.state = 'down'
self.accordion.select(btn.parent.parent.parent.parent.parent)
else:
btn.state = 'normal'
def on_selected_size(self, *_):
'''Callback to update properties on changes to :data:`selected_size`.
'''
size_info = self.find_size()
self.selected_size_name = ('%s\n[color=777777](%s, %dx%d)[/color]' %
(size_info[0], size_info[2],
size_info[1][0], size_info[1][1]))
self.selected_orientation = size_info[2]
self.update_buttons(size_info[0])
def update_size(self, size):
'''Set :data:`selected_size` while taking orientation into account.
'''
size = sorted(size, reverse=self.selected_orientation == 'landscape')
self.selected_size = size
def set_size(self, size, *_):
'''Set :data:`selected_size` and close the dialog.
'''
self.update_size(size)
self.dismiss()
def on_selected_orientation(self, _, value):
'''Callback to update size on changes to :data:`selected_orientation`.
'''
self.update_size(self.selected_size)
| {
"repo_name": "kived/kivy-designer",
"path": "designer/uix/playground_size_selector.py",
"copies": "1",
"size": "8430",
"license": "mit",
"hash": 6181774279963239000,
"line_mean": 34.4201680672,
"line_max": 79,
"alpha_frac": 0.534282325,
"autogenerated": false,
"ratio": 3.82486388384755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.985914620884755,
"avg_score": 0,
"num_lines": 238
} |
from functools import partial
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.uix.accordion import AccordionItem
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.modalview import ModalView
from kivy.uix.togglebutton import ToggleButton
class PlaygroundSizeSelector(Button):
'''Button to open playground size selection view
'''
view = ObjectProperty()
'''This property refers to the
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
instance.
:data:`view` is an :class:`~kivy.properties.ObjectProperty`
'''
playground = ObjectProperty()
'''This property holds a reference to the
:class:`~designer.playground.Playground` instance.
:data:`playground` is an :class:`~kivy.properties.ObjectProperty`
'''
def on_playground(self, *_):
'''Create a
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
for the current playground.
'''
self.view = PlaygroundSizeView(selected_size=self.playground.size)
self.view.bind(selected_size=self._update_playground)
self.view.bind(selected_size_name=self.setter('text'))
self.text = self.view.selected_size_name
def _update_playground(self, _, size):
'''Callback to update the playground size on :data:`selected_size`
changes
'''
if self.playground:
self.playground.size = size
if self.playground.root:
self.playground.root.size = size
def on_press(self):
'''Open the
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
'''
self.view.size_hint = None, None
self.view.width = self.get_root_window().width / 2.
self.view.height = self.get_root_window().height / 2.
self.view.attach_to = self
self.view.open()
class PlaygroundSizeView(ModalView):
'''Dialog for playground size selection
'''
accordion = ObjectProperty()
'''This property holds a reference to the
:class:`~kivy.uix.accordion.Accordion` inside the dialog.
:data:`accordion` is an :class:`~kivy.properties.ObjectProperty`
'''
selected_size = ObjectProperty()
'''This property contains the currently selected playground size.
:data:`selected_size` is an :class:`~kivy.properties.ObjectProperty`
'''
selected_size_name = StringProperty('')
'''This property contains the name associated with :data:`selected_size`.
:data:`selected_size_name` is a :class:`~kivy.properties.StringProperty`
'''
selected_orientation = OptionProperty(
'landscape', options=('portrait', 'landscape')
)
'''This property contains the screen orientation for :data:`selected_size`.
:data:`selected_orientation` is an
:class:`~kivy.properties.OptionProperty`
'''
default_sizes = (
('Desktop - SD', (
('Default', (550, 350)),
('Small', (800, 600)),
('Medium', (1024, 768)),
('Large', (1280, 1024)),
('XLarge', (1600, 1200))
)),
('Desktop - HD', (
('720p', (1280, 720)),
('LVDS', (1366, 768)),
('1080p', (1920, 1080)),
('4K', (3840, 2160)),
('4K Cinema', (4096, 2160))
)),
('Generic', (
('QVGA', (320, 240)),
('WQVGA400', (400, 240)),
('WQVGA432', (432, 240)),
('HVGA', (480, 320)),
('WVGA800', (800, 480)),
('WVGA854', (854, 480)),
('1024x600', (1024, 600)),
('1024x768', (1024, 768)),
('1280x768', (1280, 768)),
('WXGA', (1280, 800)),
('640x480', (640, 480)),
('1536x1152', (1536, 1152)),
('1920x1152', (1920, 1152)),
('1920x1200', (1920, 1200)),
('960x640', (960, 640)),
('2048x1536', (2048, 1536)),
('2560x1536', (2560, 1536)),
('2560x1600', (2560, 1600)),
)),
('Android', (
('HTC One', (1920, 1080)),
('HTC One X', (1920, 720)),
('HTC One SV', (800, 480)),
('Galaxy S3', (1280, 720)),
('Galaxy Note 2', (1280, 720)),
('Motorola Droid 2', (854, 480)),
('Motorola Xoom', (1280, 800)),
('Xperia E', (480, 320)),
('Nexus 4', (1280, 768)),
('Nexus 7 (2012)', (1280, 800)),
('Nexus 7 (2013)', (1920, 1200)),
)),
('iOS', (
('iPad 1/2', (1024, 768)),
('iPad 3', (2048, 1536)),
('iPhone 4', (960, 640)),
('iPhone 5', (1136, 640)),
)),
)
'''Ordered map of default selectable sizes.
'''
def __init__(self, **kwargs):
self._buttons = {}
super(PlaygroundSizeView, self).__init__(**kwargs)
for title, values in self.default_sizes:
grid = GridLayout(rows=4)
def sort_sizes(item):
return item[1][1] * item[1][0]
values = sorted(values, key=sort_sizes, reverse=True)
for name, size in values:
btn = ToggleButton(text='', markup=True)
btntext = ('%s\n[color=777777][size=%d]%dx%d[/size][/color]' %
(name, btn.font_size * 0.8, size[0], size[1]))
btn.text = btntext
btn.bind(on_press=partial(self.set_size, size))
grid.add_widget(btn)
self._buttons[name] = btn
item = AccordionItem(title=title)
item.add_widget(grid)
self.accordion.add_widget(item)
self.accordion.select(self.accordion.children[-1])
self.update_buttons()
def find_size(self):
'''Find the size name and orientation for the current size.
'''
orientation = self.check_orientation(self.selected_size)
check_size = tuple(sorted(self.selected_size, reverse=True)).__eq__
for _, values in self.default_sizes:
for name, size in values:
if check_size(size):
return name, size, orientation
return 'Custom', self.selected_size, orientation
def check_orientation(self, size):
'''Determine if the provided size is portrait or landscape.
'''
return 'portrait' if size[1] > size[0] else 'landscape'
def update_buttons(self, size_name=None):
'''Update the toggle state of the size buttons and open the
appropriate accordion section.
'''
if not size_name:
size_name = self.find_size()[0]
for name, btn in list(self._buttons.items()):
if name == size_name:
btn.state = 'down'
self.accordion.select(btn.parent.parent.parent.parent.parent)
else:
btn.state = 'normal'
def on_selected_size(self, *_):
'''Callback to update properties on changes to :data:`selected_size`.
'''
size_info = self.find_size()
self.selected_size_name = ('%s\n[color=777777](%s, %dx%d)[/color]' %
(size_info[0], size_info[2],
size_info[1][0], size_info[1][1]))
self.selected_orientation = size_info[2]
self.update_buttons(size_info[0])
def update_size(self, size):
'''Set :data:`selected_size` while taking orientation into account.
'''
size = sorted(size, reverse=self.selected_orientation == 'landscape')
self.selected_size = size
def set_size(self, size, *_):
'''Set :data:`selected_size` and close the dialog.
'''
self.update_size(size)
self.dismiss()
def on_selected_orientation(self, _, value):
'''Callback to update size on changes to :data:`selected_orientation`.
'''
self.update_size(self.selected_size)
| {
"repo_name": "mohammadj22/kivy-designer",
"path": "designer/uix/playground_size_selector.py",
"copies": "4",
"size": "8095",
"license": "mit",
"hash": -59340151062976990,
"line_mean": 34.6607929515,
"line_max": 79,
"alpha_frac": 0.5460160593,
"autogenerated": false,
"ratio": 3.7880205896116053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6334036648911605,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from .lexerconstants import HAS_ARGS, NUM_CONST, NUM_SIGN_CONST
from .stack import Stack
from .heap import Heap
from .ws_io import IO as ws_io
import sys
class Parser(object):
def __init__(self, tokens):
self.token_list = tokens
self.stack = Stack()
self.heap = Heap(self.stack)
self.io = ws_io(self.stack)
self.labels = self.create_labels()
self.num_of_tokens = len(tokens)
self.instruction_ptr = 0
self.call_ptr = []
self.method_map = {
'STACK_MANIPULATION': {
'PUSH': partial(self.stack.push),
'DUP': self.stack.dup,
'SWAP': self.stack.swap,
'POP': self.stack.pop
},
'IO': {
'OUTPUT_CHAR': self.io.o_chr,
'OUTPUT_NUM': self.io.o_int,
'READ_CHAR': partial(self.io.i_chr, self.heap),
'READ_NUM': partial(self.io.i_int, self.heap)
},
'FLOW_CONTROL': {
'MARK': (lambda x: None),
'CALL': partial(self.call_sub),
'JUMP': partial(self.jump_loc),
'JUMP_IF_ZERO': partial(self.jump_zero),
'JUMP_IF_NEG': partial(self.jump_neg),
'END_SUB': partial(self.end_sub),
'END': self.end
},
'HEAP_ACCESS': {
'STORE': partial(self.heap.set),
'RETR': partial(self.heap.get)
},
'ARITHMETIC': {
'+': self.stack.math.add,
'-': self.stack.math.subtract,
'*': self.stack.math.multiply,
'/': self.stack.math.divide,
'%': self.stack.math.modulo
}
}
def _get_value(self, ws_int, signed=False):
if signed:
sign = '-' if NUM_SIGN_CONST[ws_int[0]] == 'NEGATIVE' else ''
ws_int = ws_int[1:]
number = int(''.join([NUM_CONST[i] for i in ws_int]), 2)
return int('{}{}'.format(sign, number)) if signed else number
def create_labels(self):
labels = dict(
(
(
(self._get_value(t[2].value), idx) for
idx, t in enumerate(self.token_list)
if t[0].type == 'FLOW_CONTROL' and t[1].type == 'MARK'
)
)
)
return labels
def call_sub(self, lbl):
self.call_ptr.append(self.instruction_ptr)
self.instruction_ptr = self.labels[lbl]
def jump_loc(self, lbl):
self.instruction_ptr = self.labels[lbl]
def jump_zero(self, lbl):
val = self.stack.pop()
if val == 0:
self.instruction_ptr = self.labels[lbl]
else:
pass
def jump_neg(self, lbl):
if self.stack.pop() < 0:
self.instruction_ptr = self.labels[lbl]
else:
pass
def end_sub(self):
self.instruction_ptr = self.call_ptr.pop()
def end(self):
sys.exit(0)
def parse(self):
while self.instruction_ptr < self.num_of_tokens:
token = self.token_list[self.instruction_ptr]
if token[1].type in HAS_ARGS:
signed = True if token[1].type is 'PUSH' else False
int_value = self._get_value(token[2].value, signed=signed)
self.method_map[token[0].type][token[1].type](int_value)
else:
self.method_map[token[0].type][token[1].type]()
self.instruction_ptr += 1
| {
"repo_name": "yasn77/whitepy",
"path": "whitepy/parser.py",
"copies": "1",
"size": "3610",
"license": "apache-2.0",
"hash": -4993445505638697000,
"line_mean": 32.4259259259,
"line_max": 74,
"alpha_frac": 0.491966759,
"autogenerated": false,
"ratio": 3.717816683831102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4709783442831102,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from locust import events
from statsd.timer import Timer as BaseTimer
from stuf import stuf
import statsd
class Timer(BaseTimer):
def send_raw(self, subname, ms):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:keyword ms: milleseconds to report
'''
name = self._get_name(self.name, subname)
self.logger.info('%s: %dms', name, ms)
return self._send({name: '%d|ms' % ms})
class LoadStats(stuf):
default_prefix = 'lw'
def __init__(self, **kw):
stuf.__init__(self, **kw)
self.prefix = self.get('prefix', self.default_prefix)
self.statsd_cxn = statsd.Connection(self.host, self.port, self.sample_rate)
self.rescount = statsd.Counter(self.prefix, self.statsd_cxn)
self.response_timer = Timer(self.prefix, self.statsd_cxn)
self.loci_error = statsd.Counter(self.prefix, self.statsd_cxn)
self.users_g = statsd.Gauge(self.prefix, self.statsd_cxn)
def report_request(self, method, path, res_time, response, slug=''):
self.rescount.increment(slug)
if response is not None:
self.rescount.increment(slug + ".%s" %response.status)
self.response_timer.send_raw(slug, res_time)
def report_failure(self, method, path, res_time, exc, response):
slug = "failure"
self.report_request(method, path, res_time, response, slug)
def locust_error(self, loc, exc, tb):
self.loci_error.increment("error.%s" %exc)
def users(self, user_count):
self.users_g.send("count", user_count)
def register_statsd_emitters(port, host, sample_rate=0.5, loader=LoadStats, hooks=events):
"""
Hook up our statsd reporting
"""
stats = loader(port=port,
host=host,
sample_rate=sample_rate)
hooks.request_success += partial(stats.report_request, slug="success")
hooks.request_failure += stats.report_failure
hooks.locust_error += stats.locust_error
hooks.quitting += partial(stats.users, 0)
hooks.hatch_complete += stats.users
return stats
| {
"repo_name": "whitmo/loadwarrior",
"path": "loadwarrior/statsdext.py",
"copies": "1",
"size": "2223",
"license": "bsd-2-clause",
"hash": 5790581802225346000,
"line_mean": 33.2,
"line_max": 90,
"alpha_frac": 0.6401259559,
"autogenerated": false,
"ratio": 3.4680187207488298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9522836475478593,
"avg_score": 0.01706164023404719,
"num_lines": 65
} |
from functools import partial
from logging import getLogger
from threading import Thread
import os
import zlib
import signal
from zmq.eventloop.ioloop import IOLoop, install
from engine.utils.mathutils import random_id
from engine.utils.timeutils import milliseconds
class ProcessException(Exception):
def __init__(self, process, description):
Exception.__init__(self, description)
self.process_id = str(process.info)
self.description = description
PATTERN = '{process_id} - {description}'
def __str__(self):
return self.PATTERN.format(process_id=self.process_id, description=self.description)
class ProcessInfo(object):
def __init__(self, process_type='', process_index=0, machine_id=None,
ports=None, sockets=None, external_address=None, pid=None, crc=None):
self.process_type = process_type
self.process_index = process_index
self.machine_id = machine_id
self.pid = pid or os.getpid()
self.ports = ports or {}
self.sockets = sockets or {}
self._crc = crc
if external_address:
self.external_address = external_address
_process_name_pattern = '{process_type}@{machine_id}.{index:03d}'
@classmethod
def get_process_name(cls, process_type, machine_id, index):
return cls._process_name_pattern.format(process_type=process_type, machine_id=machine_id, index=int(index))
_process_crc_pattern = '{name}.{time:.0f}'
@classmethod
def get_process_crc(cls, process_name):
return zlib.crc32(
bytes(cls._process_crc_pattern.format(name=process_name, time=milliseconds()), 'utf-8')) & 0xfff
_name = None
@property
def name(self):
if self._name is None:
self._name = self.get_process_name(self.process_type, self.machine_id, self.process_index)
return self._name
def init_crc(self):
self._crc = self.get_process_crc(self.name)
return self._crc
_crc = None
@property
def crc(self):
if self._crc is None:
self.init_crc()
return self._crc
_id = None
@property
def id(self):
if self._id is None:
self._id = '{name}.{hash}'.format(name=self.name, hash=self.crc)
return self._id
def __str__(self):
return self.id
def __repr__(self):
return '<ProcessInfo: {}>'.format(self.id)
DEFAULT_MACHINE_ID = 'cherry'
_process_instance = None
class BasicProcess(object):
@classmethod
def get_instance(cls, *args, **kwargs):
"""Just singleton implementation.
:return: Process instance for current application.
:rtype: BasicProcess
"""
global _process_instance
if _process_instance is None:
_process_instance = cls.__new__(cls)
_process_instance.__init__(*args, **kwargs)
return _process_instance
@classmethod
def _init_instance(cls, instance):
cls._instance = instance
def __init__(self, process_type, process_index=0, machine_id=DEFAULT_MACHINE_ID, crc=None, log=True,
ports=None, sockets=None, external_address=None):
global _process_instance
_process_instance = self
self.ports = ports or {}
self.sockets = sockets or {}
self.info = ProcessInfo(process_type, process_index=process_index, machine_id=machine_id,
ports=self.ports, sockets=self.sockets,
external_address=external_address, crc=crc)
if not crc:
self.info.init_crc()
if log:
self.configure_logger()
def configure_logger(self):
self.logger = getLogger('process')
@property
def crc(self):
return self.info.crc
@property
def name(self):
return self.info.name
@property
def external_address(self):
return self.info.external_address
@property
def process_index(self):
return self.info.process_index
def start(self):
self.logger.info('Process started: {!s}'.format(self.info))
def stop(self):
self.logger.info('Process stopped: {!s}'.format(self.info))
class IOLoopProcess(BasicProcess):
"""
Run process with zmq eventloop. Please, implement loop initialization in ther start method if you about to run this
process as daemon.
"""
def __init__(self, process_type, process_index=0, machine_id=DEFAULT_MACHINE_ID, crc=None, log=True,
ports=None, sockets=None, external_address=None, loop=None):
super(IOLoopProcess, self).__init__(process_type, process_index=process_index, machine_id=machine_id,
crc=crc, log=log, ports=ports, sockets=sockets,
external_address=external_address)
self._loop = loop
@property
def loop(self):
if self._loop is None:
install()
self._loop = IOLoop.instance()
return self._loop
def start(self):
super(IOLoopProcess, self).start()
try:
self.loop.start()
except KeyboardInterrupt:
self.stop()
self.logger.error('Exited via Ctrl-C: {}'.format(self.name))
def stop(self):
self.loop.stop()
super(IOLoopProcess, self).stop()
class IOLoopMixin():
"""Add IOLoop getter to object. Use it if you need separate instance of zmq eventloop in subprocess or thread.
"""
_loop = None
@property
def loop(self):
if self._loop is None:
install()
self._loop = IOLoop()
return self._loop
def loop_start(self):
try:
self.loop.start()
except KeyboardInterrupt:
self.stop()
def stop(self):
self.loop.stop()
def init_signal_handler(self, signals=(signal.SIGINT, signal.SIGTERM)):
self._handle_signals = True
try:
for s in signals:
signal.signal(s, self.signal_handler)
except (ValueError, AttributeError):
# Windows?
pass
def signal_handler(self, signum, frame):
if not self._handle_signals:
return
if signum in {signal.SIGKILL, signal.SIGTERM, signal.SIGINT}:
self._handle_signals = False
self.stop()
class IOLoopThread(Thread, IOLoopMixin):
pass
class CallbackWrapper(object):
"""Something similar to functools.partial, but also added a property to mark callback as executed.
Note, that if you've provided args to both callback and wrapper, wrapper's args will be added at
the end of callback args.
"""
def __init__(self, handler, *args, **kwargs):
self.done = False
self.args = args
self.kwargs = kwargs
self.handler = handler
self._callee = partial(handler, *args, **kwargs)
self._hash = random_id()
def __call__(self, *args, **kwargs):
self.done = True
self._callee(*args, **kwargs)
def __hash__(self):
return self._hash
def wrap_callback(handler, *args, **kwargs):
"""Decorate handler with this function to turn function or method to CallbackWrapper
"""
return CallbackWrapper(handler, *args, **kwargs)
def make_lazy(fun, *args, **kwargs):
sentinel = object()
cell = [sentinel]
def evaluate():
if cell[0] is sentinel:
cell[0] = fun(*args, **kwargs)
return cell[0]
return evaluate
| {
"repo_name": "kollad/turbo-ninja",
"path": "utils/process.py",
"copies": "1",
"size": "7587",
"license": "mit",
"hash": 7623930601043482000,
"line_mean": 27.8479087452,
"line_max": 119,
"alpha_frac": 0.602873336,
"autogenerated": false,
"ratio": 4.079032258064516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004360598950595241,
"num_lines": 263
} |
from functools import partial
from logging import getLogger
from contextlib import suppress
from multiprocessing.pool import ThreadPool
from raptiformica.actions.slave import slave_machine
from raptiformica.shell.raptiformica import clean
from raptiformica.utils import load_json
log = getLogger(__name__)
def read_inventory_file(inventory):
"""
Read an inventory file, return the list of dicts
:param str inventory: The inventory file
:return list[dict, ..]: List of hostname and IP definitions
"""
log.info("Reading and validating inventory file")
inventory_hosts = load_json(inventory)
if not all('dst' in i for i in inventory_hosts):
raise ValueError(
"Not all inventory items specified a "
"destination like {'dst': '1.2.3.4'}"
)
all_dst = [i['dst'] for i in inventory_hosts]
all_via = filter(None, [i.get('via') for i in inventory_hosts])
if not set(all_via).issubset(all_dst):
raise ValueError(
"Specified a 'via' item that "
"is not defined as a 'dst' item."
)
# Note that there is no cycle detection here
if any(i['dst'] == i.get('via') for i in inventory_hosts):
raise ValueError(
"You can not specify the "
"same 'via' as 'dst' for one item"
)
return inventory_hosts
# TODO: add unit tests for this function in the file
# tests/unit/raptiformica/actions/deploy/test_deploy_to_host.py
def _deploy_to_host(
host, server_type, provision=False,
assimilate=False, after_assimilate=False,
after_mesh=False
):
"""
Remove any previously existing raptiformica
state from a remote host and (re)deploy to it.
:param dict host: The host to deploy to
:param str server_type: The server type
:param bool provision: whether or not we should assimilate the remote machine
:param bool assimilate: whether or not we should assimilate the remote machine
:param bool after_assimilate: whether or not we should perform the after
assimilation hooks
:param bool after_mesh: Whether or not to perform the after_mesh hooks
:return None:
"""
log.info(
"Attempting to clean up any local state on {} if "
"any".format(host['dst'])
)
# Broad exception clauses because host might be down.
# If so, we'll get it next time.
with suppress(Exception):
clean(host['dst'], port=host.get('port', 22))
log.info("Slaving {}".format(host['dst']))
with suppress(Exception):
slave_machine(
host['dst'],
port=host.get('port', 22),
server_type=server_type,
provision=provision,
assimilate=assimilate,
after_assimilate=after_assimilate,
after_mesh=after_mesh
)
def deploy_network(inventory, server_type=None, concurrent=5,
provision=False, assimilate=False,
after_assimilate=False, after_mesh=False):
"""
Deploy or re-create the raptiformica network to the hostnames or IPs
from the passed inventory file. Will wipe any existing raptiformica
configuration on those machines and deploy a new network.
:param str inventory: The inventory file
:param str server_type: name of the server type to provision the machine as
:param int concurrent: The amount of hosts to deploy to concurrently
:param bool provision: whether or not we should assimilate the remote machine
:param bool assimilate: whether or not we should assimilate the remote machine
:param bool after_assimilate: whether or not we should perform the after
assimilation hooks
:param bool after_mesh: Whether or not to perform the after_mesh hooks
:return None:
"""
inventory_hosts = read_inventory_file(inventory)
if any(i.get('via') for i in inventory_hosts):
raise RuntimeError(
"Via hosts are not supported yet. Please set up "
"ProxyCommand tunnels in your .ssh/config "
"instead for now."
)
log.info(
"Will deploy a new network on {} "
"hosts".format(len(inventory_hosts))
)
pool = ThreadPool(processes=concurrent)
deploy_to_host = partial(
_deploy_to_host,
provision=provision,
assimilate=assimilate,
after_assimilate=after_assimilate,
after_mesh=after_mesh
)
pool.starmap(
deploy_to_host,
zip(
inventory_hosts,
[server_type] * len(inventory_hosts)
)
)
| {
"repo_name": "vdloo/raptiformica",
"path": "raptiformica/actions/deploy.py",
"copies": "1",
"size": "4583",
"license": "mit",
"hash": 3403237994523287000,
"line_mean": 34.5271317829,
"line_max": 82,
"alpha_frac": 0.6500109099,
"autogenerated": false,
"ratio": 3.9238013698630136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00037586534453184264,
"num_lines": 129
} |
from functools import partial
from logging import getLogger
from django.conf import settings
from django.core.paginator import Paginator
from django.http import HttpResponseNotFound
from django.shortcuts import render
from redis.exceptions import ResponseError
from .utils import PY3
from .utils import LazySlicingIterable
try:
from django.utils.datastructures import SortedDict as OrderedDict
except ImportError:
from collections import OrderedDict
logger = getLogger(__name__)
REDISBOARD_ITEMS_PER_PAGE = getattr(settings, 'REDISBOARD_ITEMS_PER_PAGE', 100)
def safeint(value):
try:
return int(value)
except ValueError:
return value
def _fixup_pair(pair):
a, b = pair
return a, safeint(b)
LENGTH_GETTERS = {
b'list': lambda conn, key: conn.llen(key),
b'string': lambda conn, key: conn.strlen(key),
b'set': lambda conn, key: conn.scard(key),
b'zset': lambda conn, key: conn.zcount(key, '-inf', '+inf'),
b'hash': lambda conn, key: conn.hlen(key),
}
def _decode_bytes(value):
if isinstance(value, bytes):
return value.decode('utf8')
else:
return value
def _get_key_info(conn, key):
try:
obj_type = conn.type(key)
length_getter = LENGTH_GETTERS.get(obj_type)
if not length_getter:
return {
'type': 'none',
'name': key,
'length': "n/a",
'error': "The key does not exist",
'ttl': "n/a",
'refcount': "n/a",
'encoding': "n/a",
'idletime': "n/a",
}
pipe = conn.pipeline()
try:
pipe.object('REFCOUNT', key)
pipe.object('ENCODING', key)
pipe.object('IDLETIME', key)
length_getter(pipe, key)
pipe.ttl(key)
refcount, encoding, idletime, obj_length, obj_ttl = pipe.execute()
except ResponseError as exc:
logger.exception("Failed to get object info for key %r: %s", key, exc)
return {
'type': obj_type,
'name': key,
'length': "n/a",
'error': str(exc),
'ttl': "n/a",
'refcount': "n/a",
'encoding': "n/a",
'idletime': "n/a",
}
return {
'type': _decode_bytes(obj_type),
'name': key,
'length': obj_length,
'ttl': obj_ttl,
'refcount': refcount,
'encoding': _decode_bytes(encoding),
'idletime': idletime,
}
except ResponseError as exc:
logger.exception("Failed to get details for key %r: %s", key, exc)
return {
'type': "n/a",
'length': "n/a",
'name': key,
'error': str(exc),
'ttl': "n/a",
'refcount': "n/a",
'encoding': "n/a",
'idletime': "n/a",
}
VALUE_GETTERS = {
'list': lambda conn, key, start=0, end=-1: [
(pos + start, val)
for (pos, val) in enumerate(conn.lrange(key, start, end))
],
'string': lambda conn, key, *args: [('string', conn.get(key))],
'set': lambda conn, key, *args: list(enumerate(conn.smembers(key))),
'zset': lambda conn, key, start=0, end=-1: [
(pos + start, val)
for (pos, val) in enumerate(conn.zrange(key, start, end))
],
'hash': lambda conn, key, *args: conn.hgetall(key).items(),
'n/a': lambda conn, key, *args: (),
'none': lambda conn, key, *args: (),
}
def _get_key_details(conn, db, key, page):
conn.execute_command('SELECT', db)
details = _get_key_info(conn, key)
details['db'] = db
if details['type'] in ('list', 'zset'):
details['data'] = Paginator(
LazySlicingIterable(
lambda: details['length'],
partial(VALUE_GETTERS[details['type']], conn, key)
),
REDISBOARD_ITEMS_PER_PAGE
).page(page)
else:
details['data'] = VALUE_GETTERS[details['type']](conn, key)
return details
def _raw_get_db_summary(server, db):
server.connection.execute_command('SELECT', db)
pipe = server.connection.pipeline()
pipe.dbsize()
for i in range(server.sampling_threshold):
pipe.randomkey()
results = pipe.execute()
size = results.pop(0)
keys = sorted(set(results))
pipe = server.connection.pipeline()
for key in keys:
pipe.execute_command('DEBUG', 'OBJECT', key)
pipe.ttl(key)
total_memory = 0
volatile_memory = 0
persistent_memory = 0
total_keys = 0
volatile_keys = 0
persistent_keys = 0
results = pipe.execute()
for key, details, ttl in zip(keys, results[::2], results[1::2]):
if not isinstance(details, dict):
details = dict(_fixup_pair(i.split(b':'))
for i in details.split() if b':' in i)
length = details[b'serializedlength'] + len(key)
if ttl:
persistent_memory += length
persistent_keys += 1
else:
volatile_memory += length
volatile_keys += 1
total_memory += length
total_keys += 1
if total_keys:
total_memory = (total_memory / total_keys) * size
else:
total_memory = 0
if persistent_keys:
persistent_memory = (persistent_memory / persistent_keys) * size
else:
persistent_memory = 0
if volatile_keys:
volatile_memory = (volatile_memory / volatile_keys) * size
else:
volatile_memory = 0
return dict(
size=size,
total_memory=total_memory,
volatile_memory=volatile_memory,
persistent_memory=persistent_memory,
)
def _get_db_summary(server, db):
try:
return _raw_get_db_summary(server, db)
except ResponseError as exc:
logger.exception("Failed to get summary for db %r: %s", db, exc)
return dict(
size=0,
total_memory=0,
volatile_memory=0,
persistent_memory=0,
)
def _get_db_details(server, db):
conn = server.connection
conn.execute_command('SELECT', db)
size = conn.dbsize()
key_details = {}
if size > server.sampling_threshold:
sampling = True
pipe = conn.pipeline()
for _ in (range if PY3 else xrange)(server.sampling_size): # noqa
pipe.randomkey()
for key in set(pipe.execute()):
key_details[key] = _get_key_info(conn, key)
else:
sampling = False
for key in conn.keys():
key_details[key] = _get_key_info(conn, key)
return dict(
keys=key_details,
sampling=sampling,
)
def inspect(request, server):
stats = server.stats
conn = server.connection
database_details = OrderedDict()
key_details = None
if stats['status'] == 'UP':
if 'key' in request.GET:
key = request.GET['key']
db = request.GET.get('db', 0)
page = request.GET.get('page', 1)
key_details = _get_key_details(conn, db, key, page)
else:
databases = sorted(name[2:] for name in conn.info()
if name.startswith('db'))
total_size = 0
for db in databases:
database_details[db] = summary = _get_db_summary(server, db)
total_size += summary['size']
if total_size < server.sampling_threshold:
for db in databases:
database_details[db].update(
_get_db_details(server, db),
active=True,
)
elif 'db' in request.GET:
db = request.GET['db']
if db in database_details:
database_details[db].update(
_get_db_details(server, db),
active=True,
)
else:
return HttpResponseNotFound("Unknown database.")
return render(request, "redisboard/inspect.html", {
'databases': database_details,
'key_details': key_details,
'original': server,
'stats': stats,
'app_label': 'redisboard',
})
| {
"repo_name": "ionelmc/django-redisboard",
"path": "src/redisboard/views.py",
"copies": "1",
"size": "8372",
"license": "bsd-2-clause",
"hash": 1191330748191314000,
"line_mean": 28.0694444444,
"line_max": 82,
"alpha_frac": 0.5341614907,
"autogenerated": false,
"ratio": 3.8333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9867285654019947,
"avg_score": 0.000041834002677376177,
"num_lines": 288
} |
from functools import partial
from logging import getLogger
from django.contrib import messages
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.urls import NoReverseMatch
from django.utils.functional import cached_property
from django.utils.module_loading import autodiscover_modules
from django.utils.translation import get_language_from_request, gettext_lazy as _
from cms.utils import get_current_site
from cms.utils.i18n import (get_default_language_for_site,
is_language_prefix_patterns_used)
from cms.utils.conf import get_cms_setting
from cms.utils.moderator import use_draft
from menus.base import Menu
from menus.exceptions import NamespaceAlreadyRegistered
from menus.models import CacheKey
logger = getLogger('menus')
def _build_nodes_inner_for_one_menu(nodes, menu_class_name):
'''
This is an easier to test "inner loop" building the menu tree structure
for one menu (one language, one site)
'''
done_nodes = {} # Dict of node.id:Node
final_nodes = []
# This is to prevent infinite loops - we need to compare the number of
# times we see a specific node to "something", and for the time being,
# it's the total number of nodes
list_total_length = len(nodes)
while nodes:
# For when the node has a parent_id but we haven't seen it yet.
# We must not append it to the final list in this case!
should_add_to_final_list = True
node = nodes.pop(0)
# Increment the "seen" counter for this specific node.
node._counter = getattr(node, '_counter', 0) + 1
# Implicit namespacing by menu.__name__
if not node.namespace:
node.namespace = menu_class_name
if node.namespace not in done_nodes:
# We need to create the namespace dict to avoid KeyErrors
done_nodes[node.namespace] = {}
# If we have seen the parent_id already...
if node.parent_id in done_nodes[node.namespace]:
# Implicit parent namespace by menu.__name__
if not node.parent_namespace:
node.parent_namespace = menu_class_name
parent = done_nodes[node.namespace][node.parent_id]
parent.children.append(node)
node.parent = parent
# If it has a parent_id but we haven't seen it yet...
elif node.parent_id:
# We check for infinite loops here, by comparing the number of
# times we "saw" this node to the number of nodes in the list
if node._counter < list_total_length:
nodes.append(node)
# Never add this node to the final list until it has a real
# parent (node.parent)
should_add_to_final_list = False
if should_add_to_final_list:
final_nodes.append(node)
# add it to the "seen" list
done_nodes[node.namespace][node.id] = node
return final_nodes
def _get_menu_class_for_instance(menu_class, instance):
"""
Returns a new menu class that subclasses
menu_class but is bound to instance.
This means it sets the "instance" attribute of the class.
"""
attrs = {'instance': instance}
class_name = menu_class.__name__
meta_class = type(menu_class)
return meta_class(class_name, (menu_class,), attrs)
class MenuRenderer:
# The main logic behind this class is to decouple
# the singleton menu pool from the menu rendering logic.
# By doing this we can be sure that each request has it's
# private instance that will always have the same attributes.
def __init__(self, pool, request):
self.pool = pool
# It's important this happens on init
# because we need to make sure that a menu renderer
# points to the same registered menus as long as the
# instance lives.
self.menus = pool.get_registered_menus(for_rendering=True)
self.request = request
if is_language_prefix_patterns_used():
self.request_language = get_language_from_request(request, check_path=True)
else:
self.request_language = get_default_language_for_site(get_current_site().pk)
self.site = Site.objects.get_current(request)
@property
def cache_key(self):
prefix = get_cms_setting('CACHE_PREFIX')
key = '%smenu_nodes_%s_%s' % (prefix, self.request_language, self.site.pk)
if self.request.user.is_authenticated:
key += '_%s_user' % self.request.user.pk
if self.draft_mode_active:
key += ':draft'
else:
key += ':public'
return key
@cached_property
def draft_mode_active(self):
try:
# Under certain conditions, the request page won't match
# the requested state.
# For example, user requests draft page but gets public.
_use_draft = self.request.current_page.publisher_is_draft
except AttributeError:
_use_draft = use_draft(self.request)
return _use_draft
@cached_property
def is_cached(self):
db_cache_key_lookup = CacheKey.objects.filter(
key=self.cache_key,
language=self.request_language,
site=self.site.pk,
)
return db_cache_key_lookup.exists()
def _build_nodes(self):
"""
This is slow. Caching must be used.
One menu is built per language and per site.
Namespaces: they are ID prefixes to avoid node ID clashes when plugging
multiple trees together.
- We iterate on the list of nodes.
- We store encountered nodes in a dict (with namespaces):
done_nodes[<namespace>][<node's id>] = node
- When a node has a parent defined, we lookup that parent in done_nodes
if it's found:
set the node as the node's parent's child (re-read this)
else:
the node is put at the bottom of the list
"""
key = self.cache_key
cached_nodes = cache.get(key, None)
if cached_nodes and self.is_cached:
# Only use the cache if the key is present in the database.
# This prevents a condition where keys which have been removed
# from the database due to a change in content, are still used.
return cached_nodes
final_nodes = []
toolbar = getattr(self.request, 'toolbar', None)
for menu_class_name in self.menus:
menu = self.get_menu(menu_class_name)
try:
nodes = menu.get_nodes(self.request)
except NoReverseMatch:
# Apps might raise NoReverseMatch if an apphook does not yet
# exist, skip them instead of crashing
nodes = []
if toolbar and toolbar.is_staff:
messages.error(self.request,
_('Menu %s cannot be loaded. Please, make sure all '
'its urls exist and can be resolved.') %
menu_class_name)
logger.error("Menu %s could not be loaded." %
menu_class_name, exc_info=True)
# nodes is a list of navigation nodes (page tree in cms + others)
final_nodes += _build_nodes_inner_for_one_menu(nodes, menu_class_name)
cache.set(key, final_nodes, get_cms_setting('CACHE_DURATIONS')['menus'])
if not self.is_cached:
# No need to invalidate the internal lookup cache,
# just set the value directly.
self.__dict__['is_cached'] = True
# We need to have a list of the cache keys for languages and sites that
# span several processes - so we follow the Django way and share through
# the database. It's still cheaper than recomputing every time!
# This way we can selectively invalidate per-site and per-language,
# since the cache is shared but the keys aren't
CacheKey.objects.create(key=key, language=self.request_language, site=self.site.pk)
return final_nodes
def _mark_selected(self, nodes):
for node in nodes:
node.selected = node.is_selected(self.request)
return nodes
def apply_modifiers(self, nodes, namespace=None, root_id=None,
post_cut=False, breadcrumb=False):
if not post_cut:
nodes = self._mark_selected(nodes)
# Only fetch modifiers when they're needed.
# We can do this because unlike menu classes,
# modifiers can't change on a request basis.
for cls in self.pool.get_registered_modifiers():
inst = cls(renderer=self)
nodes = inst.modify(
self.request, nodes, namespace, root_id, post_cut, breadcrumb)
return nodes
def get_nodes(self, namespace=None, root_id=None, breadcrumb=False):
nodes = self._build_nodes()
nodes = self.apply_modifiers(
nodes=nodes,
namespace=namespace,
root_id=root_id,
post_cut=False,
breadcrumb=breadcrumb,
)
return nodes
def get_menu(self, menu_name):
MenuClass = self.menus[menu_name]
return MenuClass(renderer=self)
class MenuPool:
def __init__(self):
self.menus = {}
self.modifiers = []
self.discovered = False
def get_renderer(self, request):
self.discover_menus()
# Returns a menu pool wrapper that is bound
# to the given request and can perform
# operations based on the given request.
return MenuRenderer(pool=self, request=request)
def discover_menus(self):
if self.discovered:
return
autodiscover_modules('cms_menus')
from menus.modifiers import register
register()
self.discovered = True
def get_registered_menus(self, for_rendering=False):
"""
Returns all registered menu classes.
:param for_rendering: Flag that when True forces us to include
all CMSAttachMenu subclasses, even if they're not attached.
"""
self.discover_menus()
registered_menus = {}
for menu_class_name, menu_cls in self.menus.items():
if isinstance(menu_cls, Menu):
# A Menu **instance** was registered,
# this is non-standard, but acceptable.
menu_cls = menu_cls.__class__
if hasattr(menu_cls, "get_instances"):
# It quacks like a CMSAttachMenu.
# Expand the one CMSAttachMenu into multiple classes.
# Each class is bound to the instance the menu is attached to.
_get_menu_class = partial(_get_menu_class_for_instance, menu_cls)
instances = menu_cls.get_instances() or []
for instance in instances:
# For each instance, we create a unique class
# that is bound to that instance.
# Doing this allows us to delay the instantiation
# of the menu class until it's needed.
# Plus we keep the menus consistent by always
# pointing to a class instead of an instance.
namespace = "{0}:{1}".format(
menu_class_name, instance.pk)
registered_menus[namespace] = _get_menu_class(instance)
if not instances and not for_rendering:
# The menu is a CMSAttachMenu but has no instances,
# normally we'd just ignore it but it's been
# explicitly set that we are not rendering these menus
# via the (for_rendering) flag.
registered_menus[menu_class_name] = menu_cls
elif hasattr(menu_cls, "get_nodes"):
# This is another type of Menu, cannot be expanded, but must be
# instantiated, none-the-less.
registered_menus[menu_class_name] = menu_cls
else:
raise ValidationError(
"Something was registered as a menu, but isn't.")
return registered_menus
def get_registered_modifiers(self):
return self.modifiers
def clear(self, site_id=None, language=None, all=False):
'''
This invalidates the cache for a given menu (site_id and language)
'''
if all:
cache_keys = CacheKey.objects.get_keys()
else:
cache_keys = CacheKey.objects.get_keys(site_id, language)
to_be_deleted = cache_keys.distinct().values_list('key', flat=True)
if to_be_deleted:
cache.delete_many(to_be_deleted)
cache_keys.delete()
def register_menu(self, menu_cls):
from menus.base import Menu
assert issubclass(menu_cls, Menu)
if menu_cls.__name__ in self.menus:
raise NamespaceAlreadyRegistered(
"[{0}] a menu with this name is already registered".format(
menu_cls.__name__))
# Note: menu_cls should still be the menu CLASS at this point.
self.menus[menu_cls.__name__] = menu_cls
def register_modifier(self, modifier_class):
from menus.base import Modifier
assert issubclass(modifier_class, Modifier)
if modifier_class not in self.modifiers:
self.modifiers.append(modifier_class)
def get_menus_by_attribute(self, name, value):
"""
Returns the list of menus that match the name/value criteria provided.
"""
# Note that we are limiting the output to only single instances of any
# specific menu class. This is to address issue (#4041) which has
# cropped-up in 3.0.13/3.0.0.
# By setting for_rendering to False
# we're limiting the output to menus
# that are registered and have instances
# (in case of attached menus).
menus = self.get_registered_menus(for_rendering=False)
return sorted(list(set([(menu.__name__, menu.name)
for menu_class_name, menu in menus.items()
if getattr(menu, name, None) == value])))
def get_nodes_by_attribute(self, nodes, name, value):
return [node for node in nodes if node.attr.get(name, None) == value]
menu_pool = MenuPool()
| {
"repo_name": "rsalmaso/django-cms",
"path": "menus/menu_pool.py",
"copies": "1",
"size": "14606",
"license": "bsd-3-clause",
"hash": 7428805208692991000,
"line_mean": 38.5826558266,
"line_max": 95,
"alpha_frac": 0.6021498015,
"autogenerated": false,
"ratio": 4.313644418192558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5415794219692558,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from logging import getLogger
from os.path import isdir, join
from raptiformica.settings import conf, Config
from raptiformica.shell.execute import run_command_print_ready, raise_failure_factory, log_success_factory
from raptiformica.shell.raptiformica import create_remote_raptiformica_cache
from raptiformica.utils import retry
log = getLogger(__name__)
def download(source, destination, host, port=22, timeout=1800):
"""
Upload the source path from the local host to the destination path on the remote host
:param str source: path to sync from on the local host
:param str destination: path to sync to on the remote machine
:param str host: hostname or ip of the remote machine
:param int port: port to use to connect to the remote machine over ssh
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return bool status: success or failure
"""
download_command = [
'/usr/bin/env', 'rsync', '-q', '--force', '-avz',
'root@{}:{}'.format(host, source), destination,
'--exclude=.venv', '--exclude=*.pyc',
'-e', 'ssh -p {} '
'-oStrictHostKeyChecking=no '
'-oUserKnownHostsFile=/dev/null'.format(port)
]
exit_code, _, _ = run_command_print_ready(
download_command,
success_callback=log_success_factory(
"Successfully downloaded files from the remote host"
),
failure_callback=raise_failure_factory(
"Something went wrong downloading files from the remote host"
),
buffered=False,
timeout=timeout
)
return exit_code
def upload(source, destination, host, port=22, timeout=1800):
"""
Upload the source path from the local host to the destination path on the remote host
:param str source: path to sync from on the local host
:param str destination: path to sync to on the remote machine
:param str host: hostname or ip of the remote machine
:param int port: port to use to connect to the remote machine over ssh
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return bool status: success or failure
"""
upload_command = [
'/usr/bin/env', 'rsync', '-q', '--force', '-avz',
source, 'root@{}:{}'.format(host, destination),
'--exclude=.venv', '--exclude=*.pyc',
'--exclude=modules/compute/vagrant/.vagrant',
'--exclude=var', '--exclude=last_advertised',
'-e', 'ssh -p {} '
'-oStrictHostKeyChecking=no '
'-oUserKnownHostsFile=/dev/null'.format(port)
]
log.info("Uploading local {} to remote {}".format(source, destination))
exit_code, _, _ = run_command_print_ready(
upload_command,
success_callback=log_success_factory(
"Successfully uploaded files to the remote host"
),
failure_callback=raise_failure_factory(
"Something went wrong uploading files to the remote host"
),
buffered=False,
timeout=timeout
)
return exit_code
@retry(attempts=3, expect=(TimeoutError, RuntimeError))
def upload_self(host, port=22):
"""
Upload the source code of the current raptiformica checkout to the remote host.
Excludes non-transferable var files like Virtual Machines (these should be ephemeral by nature)
:param str host: hostname or ip of the remote machine
:param int port: port to use to connect to the remote machine over ssh
:return bool status: True if success, False if failure
"""
log.info("Uploading raptiformica to the remote host")
upload_partial = partial(upload, host=host, port=port, timeout=60)
upload_project_exit_code = upload_partial(
conf().PROJECT_DIR, conf().INSTALL_DIR
)
create_cache_exit_code = create_remote_raptiformica_cache(host, port=port)
upload_config_exit_code = upload_partial(
conf().ABS_CACHE_DIR + '/', "$HOME/{}".format(Config.CACHE_DIR)
) if isdir(conf().ABS_CACHE_DIR) else 0
return not any(
(
upload_project_exit_code,
create_cache_exit_code,
upload_config_exit_code
)
)
@retry(attempts=3, expect=(TimeoutError,))
def download_artifacts(host, port=22):
"""
If new artifacts were created copy those back to this machine so that
if we ever need to do something on a similar machine again later we don't
have to do double work
:param str host: hostname or ip of the remote machine
:param int port: port to use to connect to the remote machine over ssh
:return bool status: True if success, False if failure
"""
log.info("Downloading artifacts from the remote host")
download_partial = partial(download, host=host, port=port, timeout=180)
download_artifacts_exit_code = download_partial(
join(Config.CACHE_DIR, 'artifacts'), conf().ABS_CACHE_DIR
)
return not download_artifacts_exit_code
| {
"repo_name": "vdloo/raptiformica",
"path": "raptiformica/shell/rsync.py",
"copies": "1",
"size": "5032",
"license": "mit",
"hash": -3857957405777932300,
"line_mean": 39.9105691057,
"line_max": 106,
"alpha_frac": 0.6641494436,
"autogenerated": false,
"ratio": 4.032051282051282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5196200725651282,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from lxml.etree import ParseError
from nose.tools import eq_
from requests import RequestException
from _utils import RequestStub, ResponseStub, FunctionStub
from _utils import assert_http_redirect, assert_http_not_found
from ...server_app.handler.pypi_index_handler import PyPIIndexHandler
def typical_usage_as_index_test():
_check_main_index_path(
path=['python', 'nose'],
is_index=True)
def typical_usage_not_index_test():
handler_runner = partial(
_check_main_index_path,
path=['python', 'nose'],
is_index=False)
assert_http_redirect(
run_handler_fn=handler_runner,
expected_url='nose/',
expected_status=301,
failure_description='Index handler did not redirect to directory')
def http_get_fn_exception_test():
handler_runner = partial(
_check_main_index_path,
path=['python', 'nose'],
is_index=True,
http_get_exception=RequestException())
assert_http_not_found(
run_handler_fn=handler_runner,
failure_description='Failed to return 404 on failure to get index')
def parse_index_fn_exception_test():
handler_runner = partial(
_check_main_index_path,
path=['python', 'nose'],
is_index=True,
parse_index_exception=ParseError(None, None, None, None))
assert_http_not_found(
run_handler_fn=handler_runner,
failure_description='Failed to return 404 on failure to parse index')
def non_python_root_path_test():
handler_runner = partial(
_check_main_index_path,
path=['not_python', 'nose'],
is_index=True)
assert_http_not_found(
run_handler_fn=handler_runner,
failure_description='Failed to return 404 on non-"/python/" path')
def _check_main_index_path(
path,
is_index,
http_get_exception=None,
parse_index_exception=None):
pypi_base_url = 'http://dumb_url.com'
builder_response = 'be dumb builder'
parser_response = 'be dumb parser'
html_get_response = 'be dumb html'
py, package_path = path
html_get_stub = FunctionStub(
name='HTML Get',
dummy_result=html_get_response,
dummy_exception=http_get_exception)
parser_stub = FunctionStub(
name='Parser',
dummy_result=parser_response,
dummy_exception=parse_index_exception)
builder_stub = FunctionStub(
name='Builder',
dummy_result=builder_response)
handler = PyPIIndexHandler(
pypi_base_url=pypi_base_url,
http_get_fn=html_get_stub,
parse_index_fn=parser_stub,
build_index_fn=builder_stub)
request = RequestStub(is_index=is_index)
response = ResponseStub()
response_str = handler.handle(
path=path,
request=request,
response=response)
eq_(response.headers, {},
msg='Headers are expected to be unaffected')
eq_(response_str, builder_response,
msg='Handler did not return builder result')
builder_stub.assert_single_kw_call(expected_kwargs={
'index_rows': parser_response})
parser_stub.assert_single_kw_call(expected_kwargs={
'base_url': pypi_base_url,
'package_path': package_path,
'html_str': html_get_response})
| {
"repo_name": "teamfruit/defend_against_fruit",
"path": "pypi_redirect/pypi_redirect/test/unit_tests/pypi_index_handler_test.py",
"copies": "1",
"size": "3321",
"license": "apache-2.0",
"hash": 6697571824741685000,
"line_mean": 27.6293103448,
"line_max": 77,
"alpha_frac": 0.6440831075,
"autogenerated": false,
"ratio": 3.7738636363636364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4917946743863636,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from lxml import etree
from kivy import Logger
from kivy.animation import Animation
from kivy.properties import StringProperty, BooleanProperty
from kivy.clock import Clock
from tests.GUI.tools import execution_step, without_schedule_seconds
class Simulator(object):
TO_WAIT = 1
PROPERTY_CLASSES_TO_ATTRIBUTES = {
StringProperty: lambda x: x if x else str(x),
BooleanProperty: lambda x: str(x).lower(),
}
START_DELAY = 2
EXECUTE_STEP_DELAY = .5
def __init__(self, app):
self.app = app
self.collected_widgets = None
self.tree = None
self.execution_queue = []
def _get_prop_values(self, widget):
result = {}
for prop_name, property_object in widget.properties().items():
if prop_name.startswith("__") or property_object.__class__ in self.PROPERTY_CLASSES_TO_ATTRIBUTES:
result[prop_name.replace("__", "")] = self.PROPERTY_CLASSES_TO_ATTRIBUTES[
property_object.__class__](
getattr(widget, prop_name)
)
return result
def build_deep(self, widget):
props = self._get_prop_values(widget)
try:
element = etree.Element(widget.__class__.__name__, __element_id=str(len(self.collected_widgets)), **props)
except TypeError as ex:
print("Error while creating element. %s, %s" % (props, ex))
raise ex
self.collected_widgets.append(widget)
for child in widget.children[::-1]:
element.append(self.build_deep(child))
return element
def rebuild_tree(self):
self.collected_widgets = []
self.tree = self.build_deep(self.app.root)
def xml_tree(self):
return etree.tostring(self.tree, pretty_print=True)
def trigger_event(self, event, selector):
nodes = self.tree.xpath(selector)
assert nodes, "Attempt to trigger %s for %s, but no nodes were found" % (event, selector)
for node in nodes:
widget = self.node_to_widget(node)
widget.dispatch(event)
self._mark_event(widget)
@execution_step
def press(self, selector, release=False):
Logger.info("Simulation: Press %s" % selector)
self.rebuild_tree()
self.trigger_event('on_press', selector)
if release:
self.trigger_event('on_release', selector)
def tap(self, selector):
self.press(selector, release=True)
def _set_attr(self, obj, attr, value):
setattr(obj, attr, value)
@execution_step
def set_attribute(self, selector, attr, value):
self._set_attr(self.assert_exists(selector, False), attr, value)
def set_text_to(self, selector, text):
Logger.info("Simulation: Setting text %s to widget %s" % (text, selector))
self.set_attribute(selector, "text", text)
@execution_step
def wait(self, seconds):
Logger.info("Simulation: wait %s seconds..." % seconds)
return seconds
def start(self, callback, params):
callback(self, **params)
Clock.schedule_once(self.execute, self.START_DELAY)
if not self.app.built:
self.app.run()
def stop(self, _):
self.app.stop()
def _execute_step(self, index, _):
if index >= len(self.execution_queue):
Clock.schedule_once(self.stop, .1)
return
function, args, kwargs = self.execution_queue[index]
function(self, *args, **kwargs)
if function.__name__ == 'wait':
delay = kwargs.get('seconds', args[0])
else:
delay = self.EXECUTE_STEP_DELAY
Clock.schedule_once(partial(self._execute_step, index + 1), delay)
def clean_queue(self):
Logger.info("Simulation: drop execution queue")
for function, args, kwargs in self.execution_queue:
Clock.unschedule(function)
self.execution_queue = []
def execute(self, *args):
Clock.schedule_once(partial(self._execute_step, 0))
def node_to_widget(self, node):
if isinstance(node, list) and len(node) == 1:
node = node[0]
widget = self.collected_widgets[int(node.get('__element_id'))]
return widget
def get_multiple_elements_with_selector(self, selector):
self.rebuild_tree()
nodes = self.tree.xpath(selector)
assert nodes, ("%s not found in widgets tree" % selector)
return nodes
# ASSERTIONS
def assert_exists(self, selector, to_mark=True):
self.rebuild_tree()
nodes = self.tree.xpath(selector)
assert nodes, ("%s not found in widgets tree" % selector)
if len(nodes) > 1:
raise RuntimeError("Multiple nodes found for selector %s" % selector)
widget = self.node_to_widget(nodes)
if to_mark:
self._mark_assertion(widget)
return widget
@execution_step
def assert_count(self, selector, count):
self.rebuild_tree()
nodes = self.tree.xpath(selector)
for node in nodes:
self._mark_assertion(self.node_to_widget(node))
assert len(nodes) == count, "%s selects %s nodes, not %s" % (selector, len(nodes), count)
@execution_step
def assert_not_exists(self, selector):
self.rebuild_tree()
assert not self.tree.xpath(selector), \
("%s found in widgets tree, but shouldn't be there" % selector)
@execution_step
def assert_disabled(self, selector):
Logger.info("Simulation: assert %s is disabled" % selector)
self.assert_attr(selector, "disabled", True)
@execution_step
def assert_enabled(self, selector):
Logger.info("Simulation: assert %s is enabled" % selector)
self.assert_attr(selector, "disabled", False)
def _get_attr_with_period(self, obj, attrs):
attrs = attrs.split(".")
res = obj
for attr in attrs:
res = getattr(res, attr)
return res
@execution_step
def assert_attr(self, selector, attr, value):
node_attr_value = self._get_attr_with_period(self.assert_exists(selector), attr)
assert node_attr_value == value, "%s.%s is not %s but %s" % (selector, attr, value, node_attr_value)
def assert_text(self, selector, value):
Logger.info("Simulation: assert %s has text %s" % (selector, value))
self.assert_attr(selector, 'text', value)
def _mark_assertion(self, widget):
from kivy.graphics import Color, Rectangle
with widget.canvas.after:
color = Color(0, 1, 0, .8)
rect_pos = (widget.x + widget.width / 2, widget.y + widget.height / 2)
rectangle = Rectangle(pos=rect_pos, size=(0, 0))
Animation(a=0, duration=self.EXECUTE_STEP_DELAY / 2, t='in_out_cubic').start(color)
Animation(size=widget.size,
pos=widget.pos,
duration=self.EXECUTE_STEP_DELAY / 2,
t='in_out_cubic').start(rectangle)
def _mark_event(self, widget):
from kivy.graphics import Ellipse, Color
with widget.canvas.after:
color = Color(1, 0, 1, .8)
d = min(widget.size)
circle_pos = (widget.x + widget.width / 2, widget.y + widget.height / 2)
circle = Ellipse(pos=circle_pos, size=(0, 0))
Animation(a=0, duration=self.EXECUTE_STEP_DELAY / 2, t='in_out_cubic').start(color)
point_pos = (widget.x + widget.width/2 - d / 2, widget.y) if widget.width > widget.height\
else (widget.x, widget.y + widget.height/2 - d / 2)
Animation(size=(d, d),
pos=point_pos,
duration=self.EXECUTE_STEP_DELAY / 2,
t='in_out_cubic').start(circle)
| {
"repo_name": "RemuTeam/Remu",
"path": "project/tests/GUI/simulation.py",
"copies": "1",
"size": "7877",
"license": "mit",
"hash": -4706380895906011000,
"line_mean": 34.9680365297,
"line_max": 118,
"alpha_frac": 0.5961660531,
"autogenerated": false,
"ratio": 3.8330900243309003,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49292560774309,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from math import log, exp, sqrt
class memoize_inst_meth(object):
"""cache the return value of a method
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method:
class Obj(object):
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
def normalize(d):
total = sum(d.values())
for k in d:
d[k] /= float(total)
return d
def log_normalize(d):
log_total = log(sum(map(lambda k: exp(d[k]), d)))
for k in d:
d[k] -= log_total
return d
def mul(a, b):
return a * b
| {
"repo_name": "SsnL/amcmc",
"path": "utils/utils.py",
"copies": "1",
"size": "1573",
"license": "mit",
"hash": 3167870545995790300,
"line_mean": 28.679245283,
"line_max": 78,
"alpha_frac": 0.5880483153,
"autogenerated": false,
"ratio": 3.7995169082125604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9864091983058187,
"avg_score": 0.004694648090874505,
"num_lines": 53
} |
from functools import partial
from math import radians
from typing import (Callable,
Tuple)
# TODO: sort out the mess with numpy and/or pandas
import numpy as np
import pandas as pd
SOLAR_ABSOLUTE_BOLOMETRIC_MAGNITUDE = 4.75
DEC_GPOLE = radians(27.128336)
RA_GPOLE = radians(192.859508)
AUX_ANGLE = radians(122.932)
nan_array = partial(np.full,
fill_value=np.nan)
def bolometric_indexer(*,
min_magnitude: float,
stars_bin_size: float) -> Callable[[np.ndarray],
np.ndarray]:
def bolometric_index(magnitudes: np.ndarray) -> np.ndarray:
magnitude_amplitudes = magnitudes - min_magnitude
return np.floor(magnitude_amplitudes / stars_bin_size).astype(np.int32)
return bolometric_index
def bolometric_magnitude(luminosities: pd.Series) -> pd.Series:
# More info at
# https://en.wikipedia.org/wiki/Absolute_magnitude#Bolometric_magnitude
return 2.5 * luminosities + SOLAR_ABSOLUTE_BOLOMETRIC_MAGNITUDE
def to_cartesian_from_equatorial(stars: pd.DataFrame) -> Tuple[pd.Series,
pd.Series,
pd.Series]:
latitudes = (np.arcsin(np.cos(stars['declination']) * np.cos(DEC_GPOLE)
* np.cos(stars['right_ascension'] - RA_GPOLE)
+ np.sin(stars['declination']) * np.sin(DEC_GPOLE)))
x = np.sin(stars['declination']) - np.sin(latitudes) * np.sin(DEC_GPOLE)
y = (np.cos(stars['declination'])
* np.sin(stars['right_ascension'] - RA_GPOLE) * np.cos(DEC_GPOLE))
longitudes = np.arctan(x / y) + AUX_ANGLE - np.pi / 2.
longitudes[((x > 0.) & (y < 0.)) | ((x <= 0.) & (y <= 0.))] += np.pi
x_coordinates = stars['distance'] * np.cos(latitudes) * np.cos(longitudes)
y_coordinates = stars['distance'] * np.cos(latitudes) * np.sin(longitudes)
z_coordinates = stars['distance'] * np.sin(latitudes)
return x_coordinates, y_coordinates, z_coordinates
| {
"repo_name": "wolvespack/alcor",
"path": "alcor/services/plots/utils.py",
"copies": "2",
"size": "2132",
"license": "mit",
"hash": -3468727625067533000,
"line_mean": 40.8039215686,
"line_max": 79,
"alpha_frac": 0.5844277674,
"autogenerated": false,
"ratio": 3.2699386503067487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9854366417706748,
"avg_score": 0,
"num_lines": 51
} |
from functools import partial
from maya import cmds
from maya import mel
import os
import miUvUtils
import commands
reload(commands)
reload(miUvUtils)
class MiUV(object):
def __init__(self):
self.windowName = "CustomUvEditor"
if cmds.window(self.windowName, q=True, exists=True) is True:
cmds.deleteUI(self.windowName)
else:
pass
self.currentDir = os.path.dirname(__file__)
self.iconDir = os.path.join(self.currentDir, "icons")
self.homeDir = os.path.expanduser("~")
self.uvTexturePath = os.path.normpath(os.path.join(
self.currentDir, "images/uvTexture.jpg"))
def createUI(self):
myWindow = cmds.window(
self.windowName,
h=800,
w=1000,
menuBar=True,
s=True)
mainLayout = cmds.formLayout(numberOfDivisions=100)
selectionLayout = cmds.columnLayout(adj=True, h=45)
cmds.text(label="Selection Constraint")
self.selectionModeRadioButton = cmds.radioButtonGrp(
labelArray4=['None', 'Shell', 'Border', 'Internal'],
numberOfRadioButtons=4,
select=1,
cw4=[100, 100, 100, 100],
on1=partial(self.changeSelectionConstraint, 1),
on2=partial(self.changeSelectionConstraint, 2),
on3=partial(self.changeSelectionConstraint, 3),
on4=partial(self.changeSelectionConstraint, 4))
cmds.setParent('..') # selectionLayout
shaderLayout = cmds.formLayout()
uvRepeatSlider = cmds.attrFieldSliderGrp(
label="Repeat",
min=0.0,
max=20.0,
parent=shaderLayout)
if cmds.objExists("uvShaderPlace2dTexture"):
cmds.attrFieldSliderGrp(
uvRepeatSlider,
edit=True,
at='uvShaderPlace2dTexture.repeatU')
else:
pass
uvShaderButton = cmds.button(
label="Create UV shader",
command=partial(
commands.createUVshader,
self.uvTexturePath,
uvRepeatSlider))
uvShaderAssignButton = cmds.button(
label="Assgin UV shader",
command=partial(
commands.assignUVshader))
cmds.setParent('..')
cmds.formLayout(
shaderLayout,
edit=True,
attachForm=[
(uvShaderButton, 'top', 2),
(uvShaderButton, 'left', 2),
(uvShaderAssignButton, 'top', 2),
(uvRepeatSlider, 'bottom', 2),
(uvRepeatSlider, 'left', 2),
(uvRepeatSlider, 'right', 2)
],
attachControl=[(uvShaderAssignButton, 'left', 2, uvShaderButton)])
# ### LEFT LAYOUT ####
leftLayout = cmds.columnLayout(adj=True, w=120)
cmds.gridLayout(numberOfColumns=3, cellWidthHeight=(40, 40))
cmds.iconTextButton(
image="%s/upLeft.png" % self.iconDir,
commandRepeatable=True,
command="mel.eval('polyEditUV -u -1 -v 1;')")
cmds.iconTextButton(
image="%s/up.png" % self.iconDir,
commandRepeatable=True,
command="mel.eval('polyEditUV -u 0 -v 1;')")
cmds.iconTextButton(
image="%s/upRight.png" % self.iconDir,
commandRepeatable=True,
command="mel.eval('polyEditUV -u 1 -v 1;')")
cmds.iconTextButton(
image="%s/left.png" % self.iconDir,
commandRepeatable=True,
command="mel.eval('polyEditUV -u -1 -v 0;')")
cmds.iconTextButton()
cmds.iconTextButton(
image="%s/right.png" % self.iconDir,
commandRepeatable=True,
command="mel.eval('polyEditUV -u 1 -v 0;')")
cmds.iconTextButton(
image="%s/bottomLeft.png" % self.iconDir,
commandRepeatable=True,
command="mel.eval('polyEditUV -u -1 -v -1;')")
cmds.iconTextButton(
image="%s/bottom.png" % self.iconDir,
commandRepeatable=True,
command="mel.eval('polyEditUV -u 0 -v -1;')")
cmds.iconTextButton(
image="%s/bottomRight.png" % self.iconDir,
commandRepeatable=True,
command="mel.eval('polyEditUV -u 1 -v -1;')")
cmds.setParent('..') # gridLayout
cmds.button(
label="UV Centric",
command="cmds.UVCentricUVLinkingEditor()")
cmds.button(label="UV Set Editor", command="cmds.UVSetEditor()")
cmds.button(
label="Transfer Attr",
command=miUvUtils.transferAttributesOptions)
cmds.separator(h=20)
cmds.text(label="Transfer UVs")
uvSpaceTransferRadioButton = cmds.radioButtonGrp(
labelArray2=["World", "Comp"],
numberOfRadioButtons=2,
select=2,
cw2=[60, 60])
cmds.button(
label="TRANSFER UVs",
command=partial(
self.radioButtonCommand,
uvSpaceTransferRadioButton,
commands.transferUVs))
cmds.text(label="*Select source mesh \nfirst, then targets")
cmds.separator(h=20)
cmds.text(label="Symmetrize UVs")
uvSymRadioButton = cmds.radioButtonGrp(
labelArray3=["X", "Y", "Z"],
numberOfRadioButtons=3,
select=1,
cw3=[40, 40, 40])
cmds.text(label="*Select target \nvertices")
cmds.button(
label="MIRROR UVs",
command=partial(
self.radioButtonCommand,
uvSymRadioButton,
commands.mirroringUVs))
cmds.separator(h=20)
cmds.button(
label="Fix UV radio",
command=partial(commands.fixUVscale))
cmds.button(
label="Copy UVs to map1",
command=partial(commands.copyUVsToMap1))
cmds.button(
label="Flip UVs by world",
command=partial(commands.flipUVsByWorld))
cmds.button(
label="Flip selected UVs",
command=miUvUtils.flipUVs)
cmds.button(
label="Grid UVs",
enable=False)
cmds.button(
label="Delete History",
command="cmds.DeleteHistory()")
cmds.separator(h=20)
cmds.text(label="Repeat UVs")
self.uMax = cmds.textField(h=25, text="type number of U here")
cmds.button(
label="UV horizontal repeat",
enable=True,
command=self.repeatUVs)
cmds.separator(h=20)
cmds.gridLayout(numberOfColumns=3, cellWidthHeight=(40, 40))
cmds.iconTextButton(
image="polyPlanProj.png",
imageOverlayLabel="___X",
olc=[1, 1, 0],
command="cmds.polyPlanarProjection(md='x', ibd=True, kir=True)",
commandRepeatable=True)
cmds.iconTextButton(
image="polyPlanProj.png",
imageOverlayLabel="___Y",
olc=[1, 1, 0],
command="cmds.polyPlanarProjection(md='y', ibd=True, kir=True)",
commandRepeatable=True)
cmds.iconTextButton(
image="polyPlanProj.png",
imageOverlayLabel="___Z",
olc=[1, 1, 0],
command="cmds.polyPlanarProjection(md='z', ibd=True, kir=True)",
commandRepeatable=True)
cmds.iconTextButton(
image="polyCylProj.png",
imageOverlayLabel="___X",
olc=[1, 1, 0],
command=(
"cmds.polyProjection(ch=1, kir=True, "
"type='cylindrical', ibd=True, sf=True, rx=90)"),
commandRepeatable=True)
cmds.iconTextButton(
image="polyCylProj.png",
imageOverlayLabel="___Y",
olc=[1, 1, 0],
command=(
"cmds.polyProjection(ch=1, kir=True, "
"type='cylindrical', ibd=True, sf=True, ry=90)"),
commandRepeatable=True)
cmds.iconTextButton(
image="polyCylProj.png",
imageOverlayLabel="___Z",
olc=[1, 1, 0],
command=(
"cmds.polyProjection(ch=1, kir=True, "
"type='cylindrical', ibd=True, sf=True, rz=90)"),
commandRepeatable=True)
cmds.iconTextButton(
image="polyAutoProj.png",
imageOverlayLabel="Auto",
olc=[1, 1, 0],
commandRepeatable=True,
command="mel.eval('performPolyAutoProj 0;')")
cmds.iconTextButton(
image="polyAutoProj.png",
imageOverlayLabel="Cam",
olc=[1, 1, 0],
commandRepeatable=True,
command="cmds.polyProjection(type='planar', md='p')")
cmds.setParent('..') # gridLayout
cmds.setParent('..') # leftLayout
# #### BOTTOM LAYOUT #####
bottomLayout = cmds.rowColumnLayout(
numberOfColumns=6,
h=45,
columnWidth=[
(1, 500),
(2, 40),
(3, 60),
(4, 60),
(5, 60),
(6, 60)])
cmds.text(label="QuickSnapShot", align="left")
cmds.text(label="Browes", align="left")
cmds.text(label="Size", align="left")
cmds.text(label="Tile Num", align="left")
cmds.text(label="format", align="left")
cmds.text(label="")
self.uvImagePath = cmds.textField(h=2)
initialPath = os.path.join(self.homeDir, "testImage.tif")
cmds.textField(self.uvImagePath, edit=True, text=initialPath)
cmds.symbolButton(
w=25,
h=25,
image="menuIconFile.png",
command=self.browseDirectoryPath)
self.uvImageResolution = cmds.optionMenuGrp(enable=True)
cmds.menuItem(label="8k")
cmds.menuItem(label="4k")
cmds.menuItem(label="2k")
cmds.menuItem(label="1k")
cmds.menuItem(label="512")
self.uvImageTileNumber = cmds.optionMenuGrp(h=25, enable=True)
for i in range(100):
tileNumber = "1" + str(i + 1).zfill(3)
cmds.menuItem(label=tileNumber)
self.uvImageFormat = cmds.optionMenuGrp(enable=True)
cmds.menuItem(label="tif")
cmds.menuItem(label="jpg")
cmds.menuItem(label="iff")
cmds.menuItem(label="sgi")
cmds.menuItem(label="pic")
cmds.menuItem(label="als")
cmds.menuItem(label="gif")
cmds.menuItem(label="rla")
cmds.button(
label="Export",
h=30,
command=self.takeUvSnapshot)
cmds.setParent('..')
# #### TEXTURE WINDOW LAYOUT #####
textureLayout = cmds.paneLayout(configuration='single')
pane = cmds.paneLayout(configuration="vertical2")
cmds.paneLayout(pane, e=True, paneSize=(1, 0, 0))
cmds.modelPanel(cam="perspShape", tearOff=False)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..') # mainLayout
# ## UNPARENT CURRENT UV TEXTURE EDITOR AND RE-PARENT TO MY EIDTOR ###
texturePanel = cmds.getPanel(scriptType='polyTexturePlacementPanel')[0]
cmds.scriptedPanel(texturePanel, edit=True, unParent=True)
mel.eval("fillEmptyPanes;")
cmds.scriptedPanel(texturePanel, edit=True, parent=pane)
# FORM LAYOUT
cmds.formLayout(
mainLayout,
e=True,
attachForm=[
(selectionLayout, 'top', 2),
(shaderLayout, 'top', 2),
(shaderLayout, 'right', 2),
(shaderLayout, 'right', 2),
(leftLayout, 'left', 2),
(leftLayout, 'top', 2),
(leftLayout, 'bottom', 2),
(textureLayout, 'left', 2),
(textureLayout, 'top', 2),
(textureLayout, 'right', 2),
(textureLayout, 'bottom', 2),
(bottomLayout, 'bottom', 2),
(bottomLayout, 'left', 2)],
attachControl=[
(selectionLayout, 'left', 2, leftLayout),
(leftLayout, 'bottom', 2, bottomLayout),
(shaderLayout, 'left', 2, selectionLayout),
(textureLayout, 'top', 2, selectionLayout),
(textureLayout, 'bottom', 2, bottomLayout),
(textureLayout, 'left', 2, leftLayout)])
# SHOW WINDOW
cmds.showWindow(myWindow)
def radioButtonCommand(self, radioButton, function, *args):
value = cmds.radioButtonGrp(
radioButton,
q=True,
select=True)
function(value)
def repeatUVs(self, *args):
value = cmds.textField(self.uMax, q=True, text=True)
commands.uvHorizontalRepeat(value)
def selectUVshellBorder(self):
mode = cmds.radioButtonGrp(
self.selectionModeRadioButton,
q=True,
select=True)
try:
sel = cmds.ls(sl=True, fl=True)[0]
nodeType = cmds.nodeType(sel)
if nodeType == "mesh":
if mode == 3:
cmds.SelectUVBorder()
elif mode == 4:
cmds.SelectUVShell()
shellUVs = cmds.ls(sl=True, fl=True)
cmds.SelectUVBorder()
border = cmds.ls(sl=True, fl=True)
cmds.select(border, toggle=True)
internalUVs = list(set(shellUVs) - set(border))
cmds.select(internalUVs, r=True)
except:
pass
def changeSelectionConstraint(self, *args):
mode = cmds.radioButtonGrp(
self.selectionModeRadioButton,
q=True,
select=True)
if mode == 1:
cmds.polySelectConstraint(disable=True)
elif mode == 2:
cmds.polySelectConstraint(disable=True)
cmds.polySelectConstraint(shell=True)
elif mode == 3:
cmds.polySelectConstraint(disable=True)
elif mode == 4:
cmds.polySelectConstraint(disable=True)
def takeUvSnapshot(self, *args):
fileName = cmds.textField(self.uvImagePath, q=True, text=True)
resolution = cmds.optionMenuGrp(
self.uvImageResolution,
q=True,
value=True)
imageFormat = cmds.optionMenuGrp(
self.uvImageFormat,
q=True, value=True)
tileNumber = cmds.optionMenuGrp(
self.uvImageTileNumber,
q=True,
value=True)
if (tileNumber[3] == "0" or tileNumber[3] == 0):
uMin = 9.0
vMin = float(int(tileNumber[2]) - 1)
else:
uMin = float(int(tileNumber[3]) - 1)
vMin = float(tileNumber[2])
uMax = float(uMin + 1.0)
vMax = float(vMin + 1.0)
if resolution == "8k":
xRes = 8192
yRes = 8192
elif resolution == "4k":
xRes = 4096
yRes = 4096
elif resolution == "2k":
xRes = 2048
yRes = 2048
elif resolution == "1k":
xRes = 1024
yRes = 1024
elif resolution == "512":
xRes = 512
yRes = 512
cmds.uvSnapshot(
name=fileName,
aa=True,
fileFormat=imageFormat,
xResolution=xRes,
yResolution=yRes,
overwrite=True,
redColor=255,
greenColor=255,
blueColor=255,
uMax=uMax,
uMin=uMin,
vMax=vMax,
vMin=vMin)
def browseDirectoryPath(self, *args):
basicFilter = "*All(*.*);;tif(*.tif);;jpg(*.jpg);;exr(*.exr);;tx(*.tx)"
self.returnPath = cmds.fileDialog2(
fileFilter=basicFilter,
ds=2,
startingDirectory=self.homeDir)[0]
cmds.textField(self.uvImagePath, e=True, text=self.returnPath)
def main():
uvwin = MiUV()
uvwin.createUI()
cmds.scriptJob(
event=["SelectionChanged", uvwin.selectUVshellBorder],
parent=uvwin.windowName)
mel.eval("""textureWindow -edit -displaySolidMap 1 polyTexturePlacementPanel1; txtWndUpdateEditor("polyTexturePlacementPanel1", "textureWindow", "null", 101);""")
if __name__ == '__main__':
main()
| {
"repo_name": "minoue/miUV",
"path": "miUV.py",
"copies": "1",
"size": "16517",
"license": "mit",
"hash": 309318118403127940,
"line_mean": 34.5969827586,
"line_max": 166,
"alpha_frac": 0.5352061512,
"autogenerated": false,
"ratio": 3.9223462360484445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9956814369748961,
"avg_score": 0.00014760349989675821,
"num_lines": 464
} |
from functools import partial
from menpobench.config import resolve_cache_dir
from menpobench.managed import WebSource, MENPO_CDN_URL, managed_asset
from menpobench.utils import create_path, extract_archive
MENPO_CDN_DATASET_URL = MENPO_CDN_URL + 'datasets/'
MENPO_GITHUB_URL_TEMPLATE = 'https://github.com/menpo/{name}/releases/download/{version}/{name}.tar.gz'
# ----------- Cache path management ---------- #
@create_path
def dataset_dir():
return resolve_cache_dir() / 'datasets'
@create_path
def download_dataset_dir():
return dataset_dir() / 'dlcache'
# ----------- DatasetSource Classes ---------- #
class DatasetSource(WebSource):
def _download_cache_dir(self):
return download_dataset_dir()
class CDNDatasetSource(DatasetSource):
def __init__(self, name, sha1):
url = MENPO_CDN_DATASET_URL + '{}.tar.gz'.format(name)
super(CDNDatasetSource, self).__init__(name, url, sha1)
def unpack(self):
# Extracts the archive into the unpacked dir - the unpacked
# path will then point to the folder because it is ASSUMED that the
# archive name matches the name of the asset and therefore the asset
# is actually completely contained inside self.unpacked_path()
extract_archive(self.archive_path(), self._unpacked_cache_dir())
class GithubDatasetSource(DatasetSource):
def __init__(self, name, version, sha1):
url = MENPO_GITHUB_URL_TEMPLATE.format(name=name, version=version)
super(GithubDatasetSource, self).__init__(name, url, sha1)
def unpack(self):
# Extracts the archive into the unpacked dir - the unpacked
# path will then point to the folder because it is ASSUMED that the
# archive name matches the name of the asset and therefore the asset
# is actually completely contained inside self.unpacked_path()
extract_archive(self.archive_path(), self._unpacked_cache_dir())
# --------------------------- MANAGED DATASETS ------------------------------ #
#
# Managed datasets that menpobench is aware of. These datasets will ideally be
# downloaded from the Team Menpo CDN dynamically and used for evaluations.
#
# To prepare a dataset for inclusion in menpobench via the CDN:
#
# 1. Prepare the folder for the dataset on disk as normal. Ensure only
# pertinent files are in the dataset folder. The name of the entire dataset
# folder should follow Python variable naming conventions - lower case words
# separated by underscores (e.g. `./dataset_name/`). Note that this name
# needs to be unique among all managed datasets.
#
# 2. tar.gz the entire folder:
# > tar -zcvf dataset_name.tar.gz ./dataset_name/
#
# 3. Record the SHA-1 checksum of the dataset archive:
# > shasum dataset_name.tar.gz
#
# 4. Upload the dataset archive to the Team Menpo CDN contact github/jabooth
# for details)
#
# 5. Add the dataset source to the _MANAGED_DATASET_LIST below as a
# CDNDatasetSource.
#
# Note that menpo-managed datasets are now stored on github, e.g.
# menpo/lfpw-train. See https://gist.github.com/jabooth/990a4c3eae4e188a13e2
# for a helpful gist for preparing releases of these datasets.
#
#
_MANAGED_DATASET_LIST = [
lambda: GithubDatasetSource('lfpw-test', 'v2', 'a90afa0b8997dfe50d89bf361a1a2f7285c5e17a'),
lambda: GithubDatasetSource('lfpw-train', 'v2', 'ba1a6f63b432f3e7ef0805943c699522a3029642')
]
# On import convert the list of datasets into a dict for easy access. Use this
# opportunity to verify the uniqueness of each dataset name.
MANAGED_DATASETS = {}
for dataset in _MANAGED_DATASET_LIST:
name = dataset().name
if name in MANAGED_DATASETS:
raise ValueError("Error - two managed datasets with name "
"'{}'".format(name))
else:
MANAGED_DATASETS[name] = dataset
# ----------- Magic dataset contextmanager ---------- #
managed_dataset = partial(managed_asset, MANAGED_DATASETS, cleanup=True)
| {
"repo_name": "nontas/menpobench",
"path": "menpobench/dataset/managed.py",
"copies": "2",
"size": "3967",
"license": "bsd-3-clause",
"hash": 5874648983486375000,
"line_mean": 35.7314814815,
"line_max": 103,
"alpha_frac": 0.690446181,
"autogenerated": false,
"ratio": 3.5419642857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009941832858499525,
"num_lines": 108
} |
from functools import partial
from menpo.shape.pointcloud import PointCloud
from menpofit.builder import compute_reference_shape
from menpofit.builder import rescale_images_to_reference_shape
from menpofit.fitter import (noisy_shape_from_bounding_box,
align_shape_with_bounding_box)
from pathlib import Path
import joblib
import menpo.feature
import menpo.image
import menpo.io as mio
import numpy as np
import tensorflow as tf
import detect
import utils
FLAGS = tf.app.flags.FLAGS
def build_reference_shape(paths, diagonal=200):
"""Builds the reference shape.
Args:
paths: paths that contain the ground truth landmark files.
diagonal: the diagonal of the reference shape in pixels.
Returns:
the reference shape.
"""
landmarks = []
for path in paths:
path = Path(path).parent.as_posix()
landmarks += [
group.lms
for group in mio.import_landmark_files(path, verbose=True)
if group.lms.n_points == 68
]
return compute_reference_shape(landmarks,
diagonal=diagonal).points.astype(np.float32)
def grey_to_rgb(im):
"""Converts menpo Image to rgb if greyscale
Args:
im: menpo Image with 1 or 3 channels.
Returns:
Converted menpo `Image'.
"""
assert im.n_channels in [1, 3]
if im.n_channels == 3:
return im
im.pixels = np.vstack([im.pixels] * 3)
return im
def align_reference_shape(reference_shape, bb):
min_xy = tf.reduce_min(reference_shape, 0)
max_xy = tf.reduce_max(reference_shape, 0)
min_x, min_y = min_xy[0], min_xy[1]
max_x, max_y = max_xy[0], max_xy[1]
reference_shape_bb = tf.pack([[min_x, min_y], [max_x, min_y],
[max_x, max_y], [min_x, max_y]])
def norm(x):
return tf.sqrt(tf.reduce_sum(tf.square(x - tf.reduce_mean(x, 0))))
ratio = norm(bb) / norm(reference_shape_bb)
return tf.add(
(reference_shape - tf.reduce_mean(reference_shape_bb, 0)) * ratio,
tf.reduce_mean(bb, 0),
name='initial_shape')
def random_shape(gts, reference_shape, pca_model):
"""Generates a new shape estimate given the ground truth shape.
Args:
gts: a numpy array [num_landmarks, 2]
reference_shape: a Tensor of dimensions [num_landmarks, 2]
pca_model: A PCAModel that generates shapes.
Returns:
The aligned shape, as a Tensor [num_landmarks, 2].
"""
def synthesize(lms):
return detect.synthesize_detection(pca_model, menpo.shape.PointCloud(
lms).bounding_box()).points.astype(np.float32)
bb, = tf.py_func(synthesize, [gts], [tf.float32])
shape = align_reference_shape(reference_shape, bb)
shape.set_shape(reference_shape.get_shape())
return shape
def get_noisy_init_from_bb(reference_shape, bb, noise_percentage=.02):
"""Roughly aligns a reference shape to a bounding box.
This adds some uniform noise for translation and scale to the
aligned shape.
Args:
reference_shape: a numpy array [num_landmarks, 2]
bb: bounding box, a numpy array [4, ]
noise_percentage: noise presentation to add.
Returns:
The aligned shape, as a numpy array [num_landmarks, 2]
"""
bb = PointCloud(bb)
reference_shape = PointCloud(reference_shape)
bb = noisy_shape_from_bounding_box(
reference_shape,
bb,
noise_percentage=[noise_percentage, 0, noise_percentage]).bounding_box(
)
return align_shape_with_bounding_box(reference_shape, bb).points
def load_images(paths, group=None, verbose=True):
"""Loads and rescales input images to the diagonal of the reference shape.
Args:
paths: a list of strings containing the data directories.
reference_shape: a numpy array [num_landmarks, 2]
group: landmark group containing the grounth truth landmarks.
verbose: boolean, print debugging info.
Returns:
images: a list of numpy arrays containing images.
shapes: a list of the ground truth landmarks.
reference_shape: a numpy array [num_landmarks, 2].
shape_gen: PCAModel, a shape generator.
"""
images = []
shapes = []
bbs = []
reference_shape = PointCloud(build_reference_shape(paths))
for path in paths:
if verbose:
print('Importing data from {}'.format(path))
for im in mio.import_images(path, verbose=verbose, as_generator=True):
group = group or im.landmarks[group]._group_label
bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
if 'set' not in str(bb_root):
bb_root = im.path.parent.relative_to(im.path.parent.parent)
im.landmarks['bb'] = mio.import_landmark_file(str(Path(
'bbs') / bb_root / (im.path.stem + '.pts')))
im = im.crop_to_landmarks_proportion(0.3, group='bb')
im = im.rescale_to_pointcloud(reference_shape, group=group)
im = grey_to_rgb(im)
images.append(im.pixels.transpose(1, 2, 0))
shapes.append(im.landmarks[group].lms)
bbs.append(im.landmarks['bb'].lms)
train_dir = Path(FLAGS.train_dir)
mio.export_pickle(reference_shape.points, train_dir / 'reference_shape.pkl', overwrite=True)
print('created reference_shape.pkl using the {} group'.format(group))
pca_model = detect.create_generator(shapes, bbs)
# Pad images to max length
max_shape = np.max([im.shape for im in images], axis=0)
max_shape = [len(images)] + list(max_shape)
padded_images = np.random.rand(*max_shape).astype(np.float32)
print(padded_images.shape)
for i, im in enumerate(images):
height, width = im.shape[:2]
dy = max(int((max_shape[1] - height - 1) / 2), 0)
dx = max(int((max_shape[2] - width - 1) / 2), 0)
lms = shapes[i]
pts = lms.points
pts[:, 0] += dy
pts[:, 1] += dx
lms = lms.from_vector(pts)
padded_images[i, dy:(height+dy), dx:(width+dx)] = im
return padded_images, shapes, reference_shape.points, pca_model
def load_image(path, reference_shape, is_training=False, group='PTS',
mirror_image=False):
"""Load an annotated image.
In the directory of the provided image file, there
should exist a landmark file (.pts) with the same
basename as the image file.
Args:
path: a path containing an image file.
reference_shape: a numpy array [num_landmarks, 2]
is_training: whether in training mode or not.
group: landmark group containing the grounth truth landmarks.
mirror_image: flips horizontally the image's pixels and landmarks.
Returns:
pixels: a numpy array [width, height, 3].
estimate: an initial estimate a numpy array [68, 2].
gt_truth: the ground truth landmarks, a numpy array [68, 2].
"""
im = mio.import_image(path)
bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
if 'set' not in str(bb_root):
bb_root = im.path.parent.relative_to(im.path.parent.parent)
im.landmarks['bb'] = mio.import_landmark_file(str(Path('bbs') / bb_root / (
im.path.stem + '.pts')))
im = im.crop_to_landmarks_proportion(0.3, group='bb')
reference_shape = PointCloud(reference_shape)
bb = im.landmarks['bb'].lms.bounding_box()
im.landmarks['__initial'] = align_shape_with_bounding_box(reference_shape,
bb)
im = im.rescale_to_pointcloud(reference_shape, group='__initial')
if mirror_image:
im = utils.mirror_image(im)
lms = im.landmarks[group].lms
initial = im.landmarks['__initial'].lms
# if the image is greyscale then convert to rgb.
pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)
gt_truth = lms.points.astype(np.float32)
estimate = initial.points.astype(np.float32)
return pixels.astype(np.float32).copy(), gt_truth, estimate
def distort_color(image, thread_id=0, stddev=0.1, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.op_scope([image], scope, 'distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image += tf.random_normal(
tf.shape(image),
stddev=stddev,
dtype=tf.float32,
seed=42,
name='add_gaussian_noise')
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def batch_inputs(paths,
reference_shape,
batch_size=32,
is_training=False,
num_landmarks=68,
mirror_image=False):
"""Reads the files off the disk and produces batches.
Args:
paths: a list of directories that contain training images and
the corresponding landmark files.
reference_shape: a numpy array [num_landmarks, 2]
batch_size: the batch size.
is_traininig: whether in training mode.
num_landmarks: the number of landmarks in the training images.
mirror_image: mirrors the image and landmarks horizontally.
Returns:
images: a tf tensor of shape [batch_size, width, height, 3].
lms: a tf tensor of shape [batch_size, 68, 2].
lms_init: a tf tensor of shape [batch_size, 68, 2].
"""
files = tf.concat(0, [map(str, sorted(Path(d).parent.glob(Path(d).name)))
for d in paths])
filename_queue = tf.train.string_input_producer(files,
shuffle=is_training,
capacity=1000)
filename = filename_queue.dequeue()
image, lms, lms_init = tf.py_func(
partial(load_image, is_training=is_training,
mirror_image=mirror_image),
[filename, reference_shape], # input arguments
[tf.float32, tf.float32, tf.float32], # output types
name='load_image'
)
# The image has always 3 channels.
image.set_shape([None, None, 3])
if is_training:
image = distort_color(image)
lms = tf.reshape(lms, [num_landmarks, 2])
lms_init = tf.reshape(lms_init, [num_landmarks, 2])
images, lms, inits, shapes = tf.train.batch(
[image, lms, lms_init, tf.shape(image)],
batch_size=batch_size,
num_threads=4 if is_training else 1,
capacity=1000,
enqueue_many=False,
dynamic_pad=True)
return images, lms, inits, shapes
| {
"repo_name": "trigeorgis/mdm",
"path": "data_provider.py",
"copies": "1",
"size": "11900",
"license": "bsd-3-clause",
"hash": -7535956031189412000,
"line_mean": 34.5223880597,
"line_max": 96,
"alpha_frac": 0.6125210084,
"autogenerated": false,
"ratio": 3.644716692189893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.975607257295555,
"avg_score": 0.0002330255268685477,
"num_lines": 335
} |
from functools import partial
from mimetypes import guess_type
from os import path
from urllib.parse import quote_plus
from aiofiles import open as open_async
from multidict import CIMultiDict
from sanic.cookies import CookieJar
from sanic.helpers import STATUS_CODES, has_message_body, remove_entity_headers
try:
from ujson import dumps as json_dumps
except BaseException:
from json import dumps
# This is done in order to ensure that the JSON response is
# kept consistent across both ujson and inbuilt json usage.
json_dumps = partial(dumps, separators=(",", ":"))
class BaseHTTPResponse:
def _encode_body(self, data):
try:
# Try to encode it regularly
return data.encode()
except AttributeError:
# Convert it to a str if you can't
return str(data).encode()
def _parse_headers(self):
headers = b""
for name, value in self.headers.items():
try:
headers += b"%b: %b\r\n" % (
name.encode(),
value.encode("utf-8"),
)
except AttributeError:
headers += b"%b: %b\r\n" % (
str(name).encode(),
str(value).encode("utf-8"),
)
return headers
@property
def cookies(self):
if self._cookies is None:
self._cookies = CookieJar(self.headers)
return self._cookies
class StreamingHTTPResponse(BaseHTTPResponse):
__slots__ = (
"protocol",
"streaming_fn",
"status",
"content_type",
"headers",
"_cookies",
)
def __init__(
self, streaming_fn, status=200, headers=None, content_type="text/plain"
):
self.content_type = content_type
self.streaming_fn = streaming_fn
self.status = status
self.headers = CIMultiDict(headers or {})
self._cookies = None
async def write(self, data):
"""Writes a chunk of data to the streaming response.
:param data: bytes-ish data to be written.
"""
if type(data) != bytes:
data = self._encode_body(data)
self.protocol.push_data(b"%x\r\n%b\r\n" % (len(data), data))
await self.protocol.drain()
async def stream(
self, version="1.1", keep_alive=False, keep_alive_timeout=None
):
"""Streams headers, runs the `streaming_fn` callback that writes
content to the response body, then finalizes the response body.
"""
headers = self.get_headers(
version,
keep_alive=keep_alive,
keep_alive_timeout=keep_alive_timeout,
)
self.protocol.push_data(headers)
await self.protocol.drain()
await self.streaming_fn(self)
self.protocol.push_data(b"0\r\n\r\n")
# no need to await drain here after this write, because it is the
# very last thing we write and nothing needs to wait for it.
def get_headers(
self, version="1.1", keep_alive=False, keep_alive_timeout=None
):
# This is all returned in a kind-of funky way
# We tried to make this as fast as possible in pure python
timeout_header = b""
if keep_alive and keep_alive_timeout is not None:
timeout_header = b"Keep-Alive: %d\r\n" % keep_alive_timeout
self.headers["Transfer-Encoding"] = "chunked"
self.headers.pop("Content-Length", None)
self.headers["Content-Type"] = self.headers.get(
"Content-Type", self.content_type
)
headers = self._parse_headers()
if self.status == 200:
status = b"OK"
else:
status = STATUS_CODES.get(self.status)
return (b"HTTP/%b %d %b\r\n" b"%b" b"%b\r\n") % (
version.encode(),
self.status,
status,
timeout_header,
headers,
)
class HTTPResponse(BaseHTTPResponse):
__slots__ = ("body", "status", "content_type", "headers", "_cookies")
def __init__(
self,
body=None,
status=200,
headers=None,
content_type="text/plain",
body_bytes=b"",
):
self.content_type = content_type
if body is not None:
self.body = self._encode_body(body)
else:
self.body = body_bytes
self.status = status
self.headers = CIMultiDict(headers or {})
self._cookies = None
def output(self, version="1.1", keep_alive=False, keep_alive_timeout=None):
# This is all returned in a kind-of funky way
# We tried to make this as fast as possible in pure python
timeout_header = b""
if keep_alive and keep_alive_timeout is not None:
timeout_header = b"Keep-Alive: %d\r\n" % keep_alive_timeout
body = b""
if has_message_body(self.status):
body = self.body
self.headers["Content-Length"] = self.headers.get(
"Content-Length", len(self.body)
)
self.headers["Content-Type"] = self.headers.get(
"Content-Type", self.content_type
)
if self.status in (304, 412):
self.headers = remove_entity_headers(self.headers)
headers = self._parse_headers()
if self.status == 200:
status = b"OK"
else:
status = STATUS_CODES.get(self.status, b"UNKNOWN RESPONSE")
return (
b"HTTP/%b %d %b\r\n" b"Connection: %b\r\n" b"%b" b"%b\r\n" b"%b"
) % (
version.encode(),
self.status,
status,
b"keep-alive" if keep_alive else b"close",
timeout_header,
headers,
body,
)
@property
def cookies(self):
if self._cookies is None:
self._cookies = CookieJar(self.headers)
return self._cookies
def json(
body,
status=200,
headers=None,
content_type="application/json",
dumps=json_dumps,
**kwargs
):
"""
Returns response object with body in json format.
:param body: Response data to be serialized.
:param status: Response code.
:param headers: Custom Headers.
:param kwargs: Remaining arguments that are passed to the json encoder.
"""
return HTTPResponse(
dumps(body, **kwargs),
headers=headers,
status=status,
content_type=content_type,
)
def text(
body, status=200, headers=None, content_type="text/plain; charset=utf-8"
):
"""
Returns response object with body in text format.
:param body: Response data to be encoded.
:param status: Response code.
:param headers: Custom Headers.
:param content_type: the content type (string) of the response
"""
return HTTPResponse(
body, status=status, headers=headers, content_type=content_type
)
def raw(
body, status=200, headers=None, content_type="application/octet-stream"
):
"""
Returns response object without encoding the body.
:param body: Response data.
:param status: Response code.
:param headers: Custom Headers.
:param content_type: the content type (string) of the response.
"""
return HTTPResponse(
body_bytes=body,
status=status,
headers=headers,
content_type=content_type,
)
def html(body, status=200, headers=None):
"""
Returns response object with body in html format.
:param body: Response data to be encoded.
:param status: Response code.
:param headers: Custom Headers.
"""
return HTTPResponse(
body,
status=status,
headers=headers,
content_type="text/html; charset=utf-8",
)
async def file(
location,
status=200,
mime_type=None,
headers=None,
filename=None,
_range=None,
):
"""Return a response object with file data.
:param location: Location of file on system.
:param mime_type: Specific mime_type.
:param headers: Custom Headers.
:param filename: Override filename.
:param _range:
"""
headers = headers or {}
if filename:
headers.setdefault(
"Content-Disposition", 'attachment; filename="{}"'.format(filename)
)
filename = filename or path.split(location)[-1]
async with open_async(location, mode="rb") as _file:
if _range:
await _file.seek(_range.start)
out_stream = await _file.read(_range.size)
headers["Content-Range"] = "bytes %s-%s/%s" % (
_range.start,
_range.end,
_range.total,
)
status = 206
else:
out_stream = await _file.read()
mime_type = mime_type or guess_type(filename)[0] or "text/plain"
return HTTPResponse(
status=status,
headers=headers,
content_type=mime_type,
body_bytes=out_stream,
)
async def file_stream(
location,
status=200,
chunk_size=4096,
mime_type=None,
headers=None,
filename=None,
_range=None,
):
"""Return a streaming response object with file data.
:param location: Location of file on system.
:param chunk_size: The size of each chunk in the stream (in bytes)
:param mime_type: Specific mime_type.
:param headers: Custom Headers.
:param filename: Override filename.
:param _range:
"""
headers = headers or {}
if filename:
headers.setdefault(
"Content-Disposition", 'attachment; filename="{}"'.format(filename)
)
filename = filename or path.split(location)[-1]
_file = await open_async(location, mode="rb")
async def _streaming_fn(response):
nonlocal _file, chunk_size
try:
if _range:
chunk_size = min((_range.size, chunk_size))
await _file.seek(_range.start)
to_send = _range.size
while to_send > 0:
content = await _file.read(chunk_size)
if len(content) < 1:
break
to_send -= len(content)
await response.write(content)
else:
while True:
content = await _file.read(chunk_size)
if len(content) < 1:
break
await response.write(content)
finally:
await _file.close()
return # Returning from this fn closes the stream
mime_type = mime_type or guess_type(filename)[0] or "text/plain"
if _range:
headers["Content-Range"] = "bytes %s-%s/%s" % (
_range.start,
_range.end,
_range.total,
)
status = 206
return StreamingHTTPResponse(
streaming_fn=_streaming_fn,
status=status,
headers=headers,
content_type=mime_type,
)
def stream(
streaming_fn,
status=200,
headers=None,
content_type="text/plain; charset=utf-8",
):
"""Accepts an coroutine `streaming_fn` which can be used to
write chunks to a streaming response. Returns a `StreamingHTTPResponse`.
Example usage::
@app.route("/")
async def index(request):
async def streaming_fn(response):
await response.write('foo')
await response.write('bar')
return stream(streaming_fn, content_type='text/plain')
:param streaming_fn: A coroutine accepts a response and
writes content to that response.
:param mime_type: Specific mime_type.
:param headers: Custom Headers.
"""
return StreamingHTTPResponse(
streaming_fn, headers=headers, content_type=content_type, status=status
)
def redirect(
to, headers=None, status=302, content_type="text/html; charset=utf-8"
):
"""Abort execution and cause a 302 redirect (by default).
:param to: path or fully qualified URL to redirect to
:param headers: optional dict of headers to include in the new request
:param status: status code (int) of the new request, defaults to 302
:param content_type: the content type (string) of the response
:returns: the redirecting Response
"""
headers = headers or {}
# URL Quote the URL before redirecting
safe_to = quote_plus(to, safe=":/%#?&=@[]!$&'()*+,;")
# According to RFC 7231, a relative URI is now permitted.
headers["Location"] = safe_to
return HTTPResponse(
status=status, headers=headers, content_type=content_type
)
| {
"repo_name": "lixxu/sanic",
"path": "sanic/response.py",
"copies": "1",
"size": "12676",
"license": "mit",
"hash": -8688221657318452000,
"line_mean": 27.874715262,
"line_max": 79,
"alpha_frac": 0.5775481224,
"autogenerated": false,
"ratio": 4.0602178090967325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 439
} |
from functools import partial
from mkcrowbar import zypper
from fake import LocalCommand, LocalCommands, expect_args, return_ok
def stub_cmd(required, *args):
cmd = LocalCommand('zypper', expect_args(required, return_ok()))
cmd[args]
return cmd
def test_cmd(monkeypatch):
local = LocalCommands()
monkeypatch.setattr('mkcrowbar.zypper.local', local)
args = ['--non-interactive', '--no-gpg-checks']
local.has('zypper', expect_args(args, return_ok()))
update = zypper.cmd('update')
(code, _, _) = update.run(retcode=0)
assert code is 0
def test_repo_exists():
ret = zypper.repo_exists('http://download.opensuse.org/tumbleweed/repo/oss/')
assert ret is True
ret = zypper.repo_exists('http://download.opensuse.org/xxxxxxxxxxxxxxxxxxxxxxxx/')
assert ret is False
ret = zypper.repo_exists('asidjsad')
assert ret is False
def test_repo_enable(monkeypatch):
repo_url = 'http://download.opensuse.org/tumbleweed/repo/oss/'
repo_alias = 'tumbleweed-oss'
repo_repo = 'http://..../openSUSE_Tumbleweed/devel:languages:python3.repo'
# simple call
monkeypatch.setattr('mkcrowbar.zypper.cmd', partial(stub_cmd, [repo_repo]))
(code, _, _) = zypper.repo_enable(repo_repo)
assert code is 0
# url / alias call
monkeypatch.setattr('mkcrowbar.zypper.cmd', partial(stub_cmd, ['ar', repo_url, repo_alias]))
(code, _, _) = zypper.repo_enable(repo_url, repo_alias)
assert code is 0
# with settings
monkeypatch.setattr('mkcrowbar.zypper.cmd', partial(stub_cmd, ['ar', repo_repo, '--some-settings']))
(code, _, _) = zypper.repo_enable(repo_repo, settings={'some-settings': 'foo'})
assert code is 0
def test_refresh(monkeypatch):
monkeypatch.setattr('mkcrowbar.zypper.cmd', partial(stub_cmd, ['ref']))
(code, _, _) = zypper.refresh()
assert code is 0
def test_install(monkeypatch):
monkeypatch.setattr('mkcrowbar.zypper.cmd', partial(stub_cmd, ['in', 'foo']))
(code, _, _) = zypper.install(['foo'])
assert code is 0
def test_remove(monkeypatch):
monkeypatch.setattr('mkcrowbar.zypper.cmd', partial(stub_cmd, ['rm', 'foo']))
(code, _, _) = zypper.remove(['foo'])
assert code is 0
| {
"repo_name": "felixsch/mkcrowbar",
"path": "tests/test_zypper.py",
"copies": "1",
"size": "2234",
"license": "apache-2.0",
"hash": -6181761813278891000,
"line_mean": 28.3947368421,
"line_max": 104,
"alpha_frac": 0.6629364369,
"autogenerated": false,
"ratio": 3.1464788732394364,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43094153101394367,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from mongoengine.queryset.queryset import QuerySet
__all__ = ('queryset_manager', 'QuerySetManager')
class QuerySetManager(object):
"""
The default QuerySet Manager.
Custom QuerySet Manager functions can extend this class and users can
add extra queryset functionality. Any custom manager methods must accept a
:class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument.
The method function should return a :class:`~mongoengine.queryset.QuerySet`
, probably the same one that was passed in, but modified in some way.
"""
get_queryset = None
default = QuerySet
def __init__(self, queryset_func=None):
if queryset_func:
self.get_queryset = queryset_func
def __get__(self, instance, owner):
"""Descriptor for instantiating a new QuerySet object when
Document.objects is accessed.
"""
if instance is not None:
# Document class being used rather than a document object
return self
# owner is the document that contains the QuerySetManager
queryset_class = owner._meta.get('queryset_class', self.default)
queryset = queryset_class(owner, owner._get_collection())
if self.get_queryset:
arg_count = self.get_queryset.func_code.co_argcount
if arg_count == 1:
queryset = self.get_queryset(queryset)
elif arg_count == 2:
queryset = self.get_queryset(owner, queryset)
else:
queryset = partial(self.get_queryset, owner, queryset)
return queryset
def queryset_manager(func):
"""Decorator that allows you to define custom QuerySet managers on
:class:`~mongoengine.Document` classes. The manager must be a function that
accepts a :class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument. The method
function should return a :class:`~mongoengine.queryset.QuerySet`, probably
the same one that was passed in, but modified in some way.
"""
return QuerySetManager(func)
| {
"repo_name": "9nix00/mongoengine",
"path": "mongoengine/queryset/manager.py",
"copies": "44",
"size": "2230",
"license": "mit",
"hash": 1915849601356725000,
"line_mean": 38.1228070175,
"line_max": 79,
"alpha_frac": 0.6730941704,
"autogenerated": false,
"ratio": 4.645833333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import requests
import bs4
BS = partial(bs4.BeautifulSoup, features='html.parser')
class UrlGetter(object):
instance = None
single_page_instance = None
@staticmethod
def get_instance(thread_count: int = 8):
if not UrlGetter.instance:
UrlGetter.instance = UrlGetter.UrlGetterSubClass(thread_count=thread_count)
return UrlGetter.instance
@staticmethod
def get_single_page_instance():
if not UrlGetter.single_page_instance:
UrlGetter.single_page_instance = UrlGetter.UrlGetterSubClass(thread_count=1)
return UrlGetter.single_page_instance
class UrlGetterSubClass(object):
def __init__(self, thread_count: int = 8):
self.pool = ThreadPool(thread_count)
def get_urls(self, urls: list = None):
if not urls:
return None
results = self.pool.map(requests.get, urls)
self.pool.close()
self.pool.join()
UrlGetter.instance = None
return results
if __name__ == '__main__':
urls = ['http://omz-software.com/pythonista/docs/ios/beautifulsoup_guide.html',
'http://www.crummy.com/software/BeautifulSoup/bs4/doc/',
'https://www.google.sk/search?client=ubuntu&channel=fs&q=bs4+tutorial&ie=utf-8&oe=utf-8&gws_rd=cr&ei=bfvtVd\
uWFcussgHU8b3YCw',
'http://www.byty.sk/bratislava/prenajom?p[param10]=18&p[distance]=50&p[param1][to]=400&p[foto]=1&p[page]=1',
'http://chriskiehl.com/article/parallelism-in-one-line/']
responses = UrlGetter.get_instance().get_urls(urls)
print(responses)
| {
"repo_name": "hronecviktor/reality_parse",
"path": "rparse.py",
"copies": "1",
"size": "1760",
"license": "mit",
"hash": -6608139471875193000,
"line_mean": 34.2,
"line_max": 120,
"alpha_frac": 0.65,
"autogenerated": false,
"ratio": 3.52,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46699999999999997,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from multiprocessing import Pool
import numpy as np
import cv2
try:
import cv_gpu
use_gpu = True
except ImportError:
use_gpu = False
# From Programming Computer Vision in Python
# some constants and default parameters
lk_params = dict(winSize=(15, 15), maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
subpix_params = dict(zeroZone=(-1, -1), winSize=(10, 10),
criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 20, 0.03))
feature_params = dict(maxCorners=500, qualityLevel=0.4, minDistance=7, blockSize=7)
def hulls_from_features(tr, hulls):
point = tr[0]
x, y = point
distances_gen = (cv2.pointPolygonTest(h, (x, y), True) for h in hulls)
distances = np.fromiter(distances_gen, np.float, count=len(hulls))
max_hull_index = np.argmax(distances)
return max_hull_index
def test_in_hull(h, x, y):
return cv2.pointPolygonTest(h, (x, y), True)
class LKTracker(object):
"""Class for Lucas-Kanade tracking with
pyramidal optical flow."""
def __init__(self, image):
"""Initialize parameters, and store the first image. """
self.image = image
self.features = []
self.tracks = []
self.track_len = 10
self.current_frame = 0
self.interval = 5
self.mser = cv2.MSER()
self.cvh = cv_gpu.GPU() if use_gpu else cv2
self.pool = Pool()
def step(self, next_image):
"""Step to another frame."""
self.current_frame += 1
self.image = next_image
def detect_points(self):
"""Detect 'good features to track' (corners) in the current frame
using sub-pixel accuracy. """
# load the image and create grayscale
self.gray = self.cvh.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
# search for good points
features = self.cvh.goodFeaturesToTrack(self.gray, **feature_params)
# refine the corner locations
cv2.cornerSubPix(self.gray, features, **subpix_params)
self.features = features
self.tracks = [[p] for p in features.reshape((-1, 2))]
self.prev_gray = self.gray
def track_points(self):
"""Track the detected features."""
if self.features != []:
# use the newly loaded image and create grayscale
self.gray = self.cvh.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
# reshape to fit input format
tmp = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
# calculate optical flow using forwards-backwards algorithm
features, status, track_error = cv2.calcOpticalFlowPyrLK(self.prev_gray,
self.gray, tmp,
None, **lk_params)
features_r, status1, track_error = cv2.calcOpticalFlowPyrLK(self.gray,
self.prev_gray,
features, None,
**lk_params)
d = abs(tmp - features_r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, features.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
cv2.circle(self.image, (x, y), 2, (0, 255, 0), -1)
self.tracks = new_tracks
cv2.polylines(self.image, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
# replenish lost points every self.interval steps
if self.current_frame % self.interval == 0:
mask = np.zeros_like(self.gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
p = self.cvh.goodFeaturesToTrack(self.gray, mask=mask, **feature_params)
# Refine the features using cornerSubPix.
# Takes time to compute, and makes the video choppy, so only enable if you need it.
cv2.cornerSubPix(self.gray, p, **subpix_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)])
self.prev_gray = self.gray
def track(self):
"""Generator for stepping through a sequence."""
if self.features == []:
self.detect_points()
else:
self.track_points()
# create a copy in RGB
f = np.array(self.features).reshape(-1, 2)
im = self.cvh.cvtColor(self.image, cv2.COLOR_BGR2RGB)
yield im, f
def draw(self):
"""Draw the current image with points using
OpenCV's own drawing functions."""
mask = np.zeros_like(self.gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
# do blob detection
regions = self.mser.detect(self.gray, mask=mask)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
hull_test = partial(hulls_from_features, hulls=hulls)
hulls1 = self.pool.map(hull_test, self.tracks)
hulls1 = [hulls[i] for i in hulls1]
# for tr in self.tracks:
# point = tr[0]
# x, y = point
# # hull_test = partial(test_in_hull, x=x, y=y)
# # distances = self.pool.map(hull_test, hulls, chunksize=20)
# # distances = np.float64(distances)
# distances_gen = (cv2.pointPolygonTest(h, (x, y), True) for h in hulls)
# distances = np.fromiter(distances_gen, np.float)
# max_hull_index = np.argmax(distances)
# max_hull = hulls[max_hull_index]
# hulls1.append(max_hull)
cv2.polylines(self.image, hulls1, 1, (0, 255, 255))
return self.image
| {
"repo_name": "vrdabomb5717/COMS_W4731_Final_Project",
"path": "lktrack.py",
"copies": "1",
"size": "6263",
"license": "mit",
"hash": -9178224089871675000,
"line_mean": 33.6022099448,
"line_max": 95,
"alpha_frac": 0.5422321571,
"autogenerated": false,
"ratio": 3.576813249571673,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4619045406671673,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from multiprocessing.pool import ThreadPool
from skimage import measure
import multiprocessing
import itertools
import numpy as np
import time
from . import progress, stl
WORKERS = multiprocessing.cpu_count()
SAMPLES = 2 ** 22
BATCH_SIZE = 32
def _marching_cubes(volume, level=0):
verts, faces, _, _ = measure.marching_cubes(volume, level)
return verts[faces].reshape((-1, 3))
def _cartesian_product(*arrays):
la = len(arrays)
dtype = np.result_type(*arrays)
arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
for i, a in enumerate(np.ix_(*arrays)):
arr[...,i] = a
return arr.reshape(-1, la)
def _skip(sdf, job):
X, Y, Z = job
x0, x1 = X[0], X[-1]
y0, y1 = Y[0], Y[-1]
z0, z1 = Z[0], Z[-1]
x = (x0 + x1) / 2
y = (y0 + y1) / 2
z = (z0 + z1) / 2
r = abs(sdf(np.array([(x, y, z)])).reshape(-1)[0])
d = np.linalg.norm(np.array((x-x0, y-y0, z-z0)))
if r <= d:
return False
corners = np.array(list(itertools.product((x0, x1), (y0, y1), (z0, z1))))
values = sdf(corners).reshape(-1)
same = np.all(values > 0) if values[0] > 0 else np.all(values < 0)
return same
def _worker(sdf, job, sparse):
X, Y, Z = job
if sparse and _skip(sdf, job):
return None
# return _debug_triangles(X, Y, Z)
P = _cartesian_product(X, Y, Z)
volume = sdf(P).reshape((len(X), len(Y), len(Z)))
try:
points = _marching_cubes(volume)
except Exception:
return []
# return _debug_triangles(X, Y, Z)
scale = np.array([X[1] - X[0], Y[1] - Y[0], Z[1] - Z[0]])
offset = np.array([X[0], Y[0], Z[0]])
return points * scale + offset
def _estimate_bounds(sdf):
# TODO: raise exception if bound estimation fails
s = 16
x0 = y0 = z0 = -1e9
x1 = y1 = z1 = 1e9
prev = None
for i in range(32):
X = np.linspace(x0, x1, s)
Y = np.linspace(y0, y1, s)
Z = np.linspace(z0, z1, s)
d = np.array([X[1] - X[0], Y[1] - Y[0], Z[1] - Z[0]])
threshold = np.linalg.norm(d) / 2
if threshold == prev:
break
prev = threshold
P = _cartesian_product(X, Y, Z)
volume = sdf(P).reshape((len(X), len(Y), len(Z)))
where = np.argwhere(np.abs(volume) <= threshold)
x1, y1, z1 = (x0, y0, z0) + where.max(axis=0) * d + d / 2
x0, y0, z0 = (x0, y0, z0) + where.min(axis=0) * d - d / 2
return ((x0, y0, z0), (x1, y1, z1))
def generate(
sdf,
step=None, bounds=None, samples=SAMPLES,
workers=WORKERS, batch_size=BATCH_SIZE,
verbose=True, sparse=True):
start = time.time()
if bounds is None:
bounds = _estimate_bounds(sdf)
(x0, y0, z0), (x1, y1, z1) = bounds
if step is None and samples is not None:
volume = (x1 - x0) * (y1 - y0) * (z1 - z0)
step = (volume / samples) ** (1 / 3)
try:
dx, dy, dz = step
except TypeError:
dx = dy = dz = step
if verbose:
print('min %g, %g, %g' % (x0, y0, z0))
print('max %g, %g, %g' % (x1, y1, z1))
print('step %g, %g, %g' % (dx, dy, dz))
X = np.arange(x0, x1, dx)
Y = np.arange(y0, y1, dy)
Z = np.arange(z0, z1, dz)
s = batch_size
Xs = [X[i:i+s+1] for i in range(0, len(X), s)]
Ys = [Y[i:i+s+1] for i in range(0, len(Y), s)]
Zs = [Z[i:i+s+1] for i in range(0, len(Z), s)]
batches = list(itertools.product(Xs, Ys, Zs))
num_batches = len(batches)
num_samples = sum(len(xs) * len(ys) * len(zs)
for xs, ys, zs in batches)
if verbose:
print('%d samples in %d batches with %d workers' %
(num_samples, num_batches, workers))
points = []
skipped = empty = nonempty = 0
bar = progress.Bar(num_batches, enabled=verbose)
pool = ThreadPool(workers)
f = partial(_worker, sdf, sparse=sparse)
for result in pool.imap(f, batches):
bar.increment(1)
if result is None:
skipped += 1
elif len(result) == 0:
empty += 1
else:
nonempty += 1
points.extend(result)
bar.done()
if verbose:
print('%d skipped, %d empty, %d nonempty' % (skipped, empty, nonempty))
triangles = len(points) // 3
seconds = time.time() - start
print('%d triangles in %g seconds' % (triangles, seconds))
return points
def save(path, *args, **kwargs):
points = generate(*args, **kwargs)
if path.lower().endswith('.stl'):
stl.write_binary_stl(path, points)
else:
mesh = _mesh(points)
mesh.write(path)
def _mesh(points):
import meshio
points, cells = np.unique(points, axis=0, return_inverse=True)
cells = [('triangle', cells.reshape((-1, 3)))]
return meshio.Mesh(points, cells)
def _debug_triangles(X, Y, Z):
x0, x1 = X[0], X[-1]
y0, y1 = Y[0], Y[-1]
z0, z1 = Z[0], Z[-1]
p = 0.25
x0, x1 = x0 + (x1 - x0) * p, x1 - (x1 - x0) * p
y0, y1 = y0 + (y1 - y0) * p, y1 - (y1 - y0) * p
z0, z1 = z0 + (z1 - z0) * p, z1 - (z1 - z0) * p
v = [
(x0, y0, z0),
(x0, y0, z1),
(x0, y1, z0),
(x0, y1, z1),
(x1, y0, z0),
(x1, y0, z1),
(x1, y1, z0),
(x1, y1, z1),
]
return [
v[3], v[5], v[7],
v[5], v[3], v[1],
v[0], v[6], v[4],
v[6], v[0], v[2],
v[0], v[5], v[1],
v[5], v[0], v[4],
v[5], v[6], v[7],
v[6], v[5], v[4],
v[6], v[3], v[7],
v[3], v[6], v[2],
v[0], v[3], v[2],
v[3], v[0], v[1],
]
def sample_slice(
sdf, w=1024, h=1024,
x=None, y=None, z=None, bounds=None):
if bounds is None:
bounds = _estimate_bounds(sdf)
(x0, y0, z0), (x1, y1, z1) = bounds
if x is not None:
X = np.array([x])
Y = np.linspace(y0, y1, w)
Z = np.linspace(z0, z1, h)
extent = (Z[0], Z[-1], Y[0], Y[-1])
axes = 'ZY'
elif y is not None:
Y = np.array([y])
X = np.linspace(x0, x1, w)
Z = np.linspace(z0, z1, h)
extent = (Z[0], Z[-1], X[0], X[-1])
axes = 'ZX'
elif z is not None:
Z = np.array([z])
X = np.linspace(x0, x1, w)
Y = np.linspace(y0, y1, h)
extent = (Y[0], Y[-1], X[0], X[-1])
axes = 'YX'
else:
raise Exception('x, y, or z position must be specified')
P = _cartesian_product(X, Y, Z)
return sdf(P).reshape((w, h)), extent, axes
def show_slice(*args, **kwargs):
import matplotlib.pyplot as plt
show_abs = kwargs.pop('abs', False)
a, extent, axes = sample_slice(*args, **kwargs)
if show_abs:
a = np.abs(a)
im = plt.imshow(a, extent=extent, origin='lower')
plt.xlabel(axes[0])
plt.ylabel(axes[1])
plt.colorbar(im)
plt.show()
| {
"repo_name": "fogleman/sdf",
"path": "sdf/mesh.py",
"copies": "1",
"size": "6893",
"license": "mit",
"hash": -2529376324084737000,
"line_mean": 27.366255144,
"line_max": 79,
"alpha_frac": 0.5066008995,
"autogenerated": false,
"ratio": 2.6665377176015475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36731386171015473,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from nltk import word_tokenize, tokenize
from pprint import pprint
import operator
import json
import random
from docopt import docopt
from goalexp import Goal
version = "0.0.1"
cmdline = """
Generates useful info from raw text or from previous session.
Usage:
structure [<filename>]
structure -h | --help
structure --version
Options:
-h --help Show this screen.
--version Show version.
Version:{0}
""".format(version)
def count_occurances(text, occ = None):
text = text.encode('ascii', 'ignore')
tokenized_text = word_tokenize(text)
tokenized_text = [t.upper() for t in tokenized_text]
map(occ.feed, tokenized_text)
class Occurance(object):
def __init__(self):
self._occ = {}
def feed(self, word):
try:
self._occ[word] += 1
except KeyError:
self._occ[word] = 1
def get_occurances(self):
return self._occ
class TitleGenerator(object):
like_acc = 100
tries = 50
non_word_symbols = list("./\|;()[]{}!@#$%^&*+")
def __init__(self, words, title_limit=10):
self._words = words
self._k = self._words.keys()
self._title_limit = title_limit
self._rejects = []
def shuffle_choose(self):
tr = self.tries
while (tr > 0):
choice = random.choice(self._k)
if (self._words[choice] > 0):
return choice
tr -= 1
return choice
def shuffle_generate_title(self):
tr = self.tries
while (tr > 0):
title_len = random.randint(1, self._title_limit)
title = [self.shuffle_choose() for i in xrange(title_len)]
if title not in self._rejects:
return title
tr -= 1
return title
def struct_generate_title(self, goal=""):
"""
Attempt to produce title
with valid structure defined by a goal.
"""
goal = Goal(goal)
goal.generate_placeholder_words()
return goal.compile()
def reject_title(self, title):
if title not in self._rejects:
self._rejects.append(title)
def like_word(self, word):
self._words[word] += self.like_acc
def unlike_word(self, word):
self._words[word] -= self.like_acc
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _Getch()
def like_words(title):
result = []
for word in title:
print "like word[1] or not[0] :", word
res = getch()
if res == '1':
result.append(word)
return result
def generate_title(words):
t = TitleGenerator(words)
while True:
title = t.struct_generate_title()
print title
print "Is it ok[1] or not[0]:"
user = getch()
if user == '1':
print 'yay!'
break
else:
print 'rejected!'
t.reject_title(title)
likes = like_words(title)
map(t.like_word, likes)
def un_like(x):
return t.unlike_word(x) if x not in likes else None
map(un_like, title)
def main():
"""Begin with raw text"""
with open('texts') as data_file:
texts = json.load(data_file)
print "text count:", len(texts)
occ = Occurance()
c_count_occurances = partial(count_occurances, occ=occ)
map(c_count_occurances, texts)
generate_title(occ.get_occurances())
with open('frequencies.txt', 'w') as f:
results = sorted(occ.get_occurances().items(),
key=operator.itemgetter(1))
results.reverse()
f.write(json.dumps(results))
print 'done!'
if __name__ == '__main__':
arguments = docopt(cmdline)
goal = ""
if arguments["<filename>"]:
with open(arguments["<filename>"], "r") as f:
data = dict(json.loads(f.read()))
generate_title(data)
with open('frequencies.txt', 'w') as f:
results = sorted(data.items(),
key=operator.itemgetter(1))
results.reverse()
f.write(json.dumps(results))
else:
main()
exit(0)
| {
"repo_name": "marto1/sdre",
"path": "structure.py",
"copies": "1",
"size": "4961",
"license": "isc",
"hash": -1000164860428973200,
"line_mean": 23.5594059406,
"line_max": 73,
"alpha_frac": 0.5537190083,
"autogenerated": false,
"ratio": 3.7356927710843375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9755038019703862,
"avg_score": 0.006874751936095027,
"num_lines": 202
} |
from functools import partial
from nose.tools import assert_raises
from syn.base_utils import compose
from syn.python.b import Statement, from_source, from_ast, Pass, Num, Name, \
Assign, Return, ProgN, Module, Alias, Import, Break, Continue
mparse = compose(partial(from_source, mode='exec'), str)
#-------------------------------------------------------------------------------
# Utilities
def examine(s, s2=None):
if s2 is None:
s2 = s
tree = mparse(s)
assert tree.emit() == s2
tree2 = from_ast(tree.to_ast(), mode='exec')
assert tree2.emit() == s2
#-------------------------------------------------------------------------------
# Statement
def test_statement():
Statement
#-------------------------------------------------------------------------------
# Assign
def test_assign():
examine('a = 1')
examine('a = b = 1')
examine('a, b = 1', '(a, b) = 1')
a = Assign([Name('x')], Num(1))
assert a.emit() == 'x = 1'
assert a.emit(indent_level=1) == ' x = 1'
a = Assign([Name('x'), Name('y')], Num(1))
assert a.emit() == 'x = y = 1'
assert a.emit(indent_level=1) == ' x = y = 1'
a = Assign([Name('x')],
ProgN(Assign([Name('y')],
Num(2))))
assert a.resolve_progn() == ProgN(Assign([Name('y')], Num(2)),
Assign([Name('x')], Name('y')))
assert Module(a).resolve_progn().emit() == 'y = 2\nx = y'
a = Assign([Name('x')],
Assign([Name('y')],
Num(2)))
assert a.emit() == 'x = y = 2'
assert_raises(TypeError, a.validate)
assert Module(a).expressify_statements().resolve_progn().emit() == \
'y = 2\nx = y'
a = Assign([Name('x')],
Assign([Name('y')],
Assign([Name('z')],
Num(2))))
assert a.emit() == 'x = y = z = 2'
assert_raises(TypeError, a.validate)
assert Module(a).expressify_statements().resolve_progn().emit() == \
'z = 2\ny = z\nx = y'
#-------------------------------------------------------------------------------
# Return
def test_return():
examine('return')
examine('return 1')
r = Return()
assert r.emit() == 'return'
assert r.emit(indent_level=1) == ' return'
r = Return(Num(1))
assert r.emit() == 'return 1'
assert r.emit(indent_level=1) == ' return 1'
#-------------------------------------------------------------------------------
# Import
def test_import():
examine('import foo')
examine('import foo, bar, baz')
examine('import foo, bar as baz')
examine('import foo as bar, baz')
a = Alias('foo')
assert a.emit() == 'foo'
a = Alias('foo', 'bar')
assert a.emit() == 'foo as bar'
i = Import([Alias('foo'), Alias('bar', 'baz')])
assert i.emit() == 'import foo, bar as baz'
assert i.emit(indent_level=1) == ' import foo, bar as baz'
#-------------------------------------------------------------------------------
# Empty Statements
def test_empty_statements():
examine('break')
examine('continue')
examine('pass')
p = Pass()
assert p.emit() == 'pass'
rp = p.as_return()
assert rp.emit() == 'return'
b = Break()
assert b.emit() == 'break'
assert b.emit(indent_level=1) == ' break'
c = Continue()
assert c.emit() == 'continue'
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
| {
"repo_name": "mbodenhamer/syn",
"path": "syn/python/b/tests/test_statements.py",
"copies": "1",
"size": "3662",
"license": "mit",
"hash": -7742579217029413000,
"line_mean": 28.5322580645,
"line_max": 80,
"alpha_frac": 0.4486619334,
"autogenerated": false,
"ratio": 3.7405515832482124,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4689213516648212,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from nose.tools import eq_
from os import path
from threading import Thread
from time import sleep, time
from unittest import main, TestCase
from spacq.devices.config import DeviceConfig
from spacq.interface.pulse.program import Program
from spacq.interface.resources import Resource
from spacq.interface.units import Quantity
from spacq.tool.box import flatten
from ..variables import sort_variables, InputVariable, OutputVariable, LinSpaceConfig
from .. import sweep
resource_dir = path.join(path.dirname(__file__), 'resources')
class SweepControllerTest(TestCase):
def testSingle(self):
"""
Iterate over a single thing without any measurements.
"""
res_buf = []
def setter(value):
res_buf.append(value)
res = Resource(setter=setter)
var = OutputVariable(name='Var', order=1, enabled=True, const=-1.0)
var.config = LinSpaceConfig(1.0, 4.0, 4)
var.smooth_steps = 3
var.smooth_from, var.smooth_to = [True] * 2
vars, num_items = sort_variables([var])
ctrl = sweep.SweepController([(('Res', res),)], vars, num_items, [], [])
# Callback verification buffers.
actual_values = []
actual_measurement_values = []
actual_writes = []
actual_reads = []
closed = [0]
# Callbacks.
def data_callback(cur_time, values, measurement_values):
actual_values.append(values)
actual_measurement_values.append(measurement_values)
ctrl.data_callback = data_callback
def close_callback():
closed[0] += 1
ctrl.close_callback = close_callback
def write_callback(pos, i, value):
actual_writes.append((pos, i, value))
ctrl.write_callback = write_callback
def read_callback(i, value):
actual_reads.append((i, value))
ctrl.read_callback = read_callback
# Let it run.
ctrl.run()
eq_(res_buf, [-1.0, 0.0, 1.0, 1.0, 2.0, 3.0, 4.0, 4.0, 1.5, -1.0])
eq_(actual_values, [(1.0,), (2.0,), (3.0,), (4.0,)])
eq_(actual_measurement_values, [()] * 4)
eq_(actual_writes, [(0, 0, x) for x in [1.0, 2.0, 3.0, 4.0]])
eq_(actual_reads, [])
eq_(closed, [1])
def testProper(self):
"""
Testing everything that there is to test along the happy path:
nested and parallel variables
measurements
dwell time
"""
res_bufs = [[], [], [], []]
measurement_counts = [0] * 2
def setter(i, value):
res_bufs[i].append(value)
def getter(i):
measurement_counts[i] += (-1) ** i
return measurement_counts[i]
dwell_time = Quantity(50, 'ms')
# Output.
res0 = Resource(setter=partial(setter, 0))
res0.units = 'cm-1'
res1 = Resource(setter=partial(setter, 1))
res2 = Resource(setter=partial(setter, 2))
res3 = Resource(setter=partial(setter, 3))
var0 = OutputVariable(name='Var 0', order=2, enabled=True, const=0.0)
var0.config = LinSpaceConfig(-1.0, -2.0, 2)
var0.smooth_steps = 2
var0.smooth_from, var0.smooth_to, var0.smooth_transition = [True] * 3
var0.type = 'quantity'
var0.units = 'cm-1'
var1 = OutputVariable(name='Var 1', order=1, enabled=True, const=-1.0)
var1.config = LinSpaceConfig(1.0, 4.0, 4)
var1.smooth_steps = 3
var1.smooth_from, var1.smooth_to, var1.smooth_transition = [True] * 3
var2 = OutputVariable(name='Var 2', order=1, enabled=True, const=1.23, use_const=True)
var3 = OutputVariable(name='Var 3', order=1, enabled=True, const=-9.0, wait=str(dwell_time))
var3.config = LinSpaceConfig(-1.0, 2.0, 4)
var3.smooth_steps = 2
var3.smooth_from, var3.smooth_to, var3.smooth_transition = True, True, False
var4 = OutputVariable(name='Var 4', order=3, enabled=True, const=-20.0)
var4.config = LinSpaceConfig(-10.0, 20, 1)
var4.smooth_steps = 2
var4.smooth_from = True
# Input.
meas_res0 = Resource(getter=partial(getter, 0))
meas_res1 = Resource(getter=partial(getter, 1))
meas0 = InputVariable(name='Meas 0')
meas1 = InputVariable(name='Meas 1')
vars, num_items = sort_variables([var0, var1, var2, var3, var4])
ctrl = sweep.SweepController([(('Res 2', res2),), (('Something', None),), (('Res 0', res0),),
(('Res 1', res1), ('Res 3', res3))], vars, num_items,
[('Meas res 0', meas_res0), ('Meas res 1', meas_res1)], [meas0, meas1])
# Callback verification buffers.
actual_values = []
actual_measurement_values = []
actual_writes = []
actual_reads = []
closed = [0]
# Callbacks.
def data_callback(cur_time, values, measurement_values):
actual_values.append(values)
actual_measurement_values.append(measurement_values)
ctrl.data_callback = data_callback
def close_callback():
closed[0] += 1
ctrl.close_callback = close_callback
def write_callback(pos, i, value):
actual_writes.append((pos, i, value))
ctrl.write_callback = write_callback
def read_callback(i, value):
actual_reads.append((i, value))
ctrl.read_callback = read_callback
# Let it run.
start_time = time()
ctrl.run()
elapsed_time = time() - start_time
expected_time = num_items * dwell_time.value
assert expected_time < elapsed_time, 'Took {0} s, expected at least {1} s.'.format(elapsed_time, expected_time)
expected_res1 = [1.0, 2.0, 3.0, 4.0]
expected_res2 = [-1.0, 0.0, 1.0, 2.0]
expected_inner_writes = list(flatten(((3, 0, x), (3, 1, x - 2.0)) for x in [1.0, 2.0, 3.0, 4.0]))
expected_writes = [(0, 0, 1.23), (1, 0, -10.0)] + list(flatten([(2, 0, x)] + expected_inner_writes
for x in [Quantity(x, 'cm-1') for x in [-1.0, -2.0]]))
eq_(res_bufs, [
[Quantity(x, 'cm-1') for x in [0.0, -1.0, -1.0, -2.0, -2.0, 0.0]],
[-1.0, 0.0, 1.0] + expected_res1 + [4.0, 2.5, 1.0] + expected_res1 + [4.0, 1.5, -1.0],
[1.23],
[-9.0, -1.0] + expected_res2 + expected_res2 + [2.0, -9.0],
])
eq_(measurement_counts, [8, -8])
eq_(actual_values, [(1.23, -10.0, x, y, y - 2.0)
for x in [Quantity(x, 'cm-1') for x in [-1.0, -2.0]]
for y in [1.0, 2.0, 3.0, 4.0]])
eq_(actual_measurement_values, [(x, -x) for x in xrange(1, 9)])
eq_(actual_writes, expected_writes)
eq_(actual_reads, list(flatten(((0, x), (1, -x)) for x in xrange(1, 9))))
eq_(closed, [1])
def testContinuous(self):
"""
Keep going, and then eventually stop.
"""
res_buf = []
def setter(value):
res_buf.append(value)
res = Resource(setter=setter)
var = OutputVariable(name='Var', order=1, enabled=True)
var.config = LinSpaceConfig(1.0, 4.0, 4)
vars, num_items = sort_variables([var])
ctrl = sweep.SweepController([(('Res', res),)], vars, num_items, [], [], continuous=True)
thr = Thread(target=ctrl.run)
thr.daemon = True
thr.start()
sleep(0.5)
ctrl.pause()
sleep(0.5)
ctrl.unpause()
sleep(0.5)
ctrl.last_continuous = True
thr.join()
expected_buf = [1.0, 2.0, 3.0, 4.0]
eq_(res_buf[:len(expected_buf) * 50], expected_buf * 50)
def testWriteException(self):
"""
Fail to read.
"""
exceptions = []
e = ValueError()
def setter(value):
raise e
res = Resource(setter=setter)
var = OutputVariable(name='Var', order=1, enabled=True, const=0.0)
var.config = LinSpaceConfig(1.0, 4.0, 4)
vars, num_items = sort_variables([var])
ctrl = sweep.SweepController([(('Res', res),)], vars, num_items, [], [])
def resource_exception_handler(name, e, write):
exceptions.append((name, e))
ctrl.abort(fatal=True)
assert write
ctrl.resource_exception_handler = resource_exception_handler
ctrl.run()
eq_(exceptions, [('Res', e)])
def testReadException(self):
"""
Fail to write.
"""
exceptions = []
e = ValueError()
def getter():
raise e
res = Resource(setter=lambda x: x)
var = OutputVariable(name='Var', order=1, enabled=True)
var.config = LinSpaceConfig(1.0, 4.0, 4)
meas_res = Resource(getter=getter)
meas_var = InputVariable(name='Meas var')
vars, num_items = sort_variables([var])
ctrl = sweep.SweepController([(('Res', res),)], vars, num_items, [('Meas res', meas_res)], [meas_var])
def resource_exception_handler(name, e, write):
exceptions.append((name, e))
assert not write
ctrl.resource_exception_handler = resource_exception_handler
ctrl.run()
eq_(exceptions, [('Meas res', e)] * 4)
def testPulseProgram(self):
"""
Iterate with a pulse program.
"""
res_buf = []
def setter(value):
res_buf.append(value)
res = Resource(setter=setter)
var1 = OutputVariable(name='Var 1', order=1, enabled=True)
var1.config = LinSpaceConfig(1.0, 4.0, 4)
p = Program.from_file(path.join(resource_dir, '01.pulse'))
p.frequency = Quantity(1, 'GHz')
p.set_value(('_acq_marker', 'marker_num'), 1)
p.set_value(('_acq_marker', 'output'), 'f1')
eq_(p.all_values, set([('_acq_marker', 'marker_num'), ('_acq_marker', 'output'), ('d',), ('i',),
('p', 'amplitude'), ('p', 'length'), ('p', 'shape')]))
parameters = [('i',), ('d',), ('p', 'amplitude'), ('p', 'length')]
for parameter in parameters:
p.resource_labels[parameter] = 'res_' + '.'.join(parameter)
p.resources[parameter] = Resource()
var2 = OutputVariable(name='Var 2', order=1, enabled=True)
var2.config = LinSpaceConfig(1, 4, 4)
var2.type = 'integer'
awg_cfg = DeviceConfig('awg')
awg_cfg.address_mode = awg_cfg.address_modes.gpib
awg_cfg.manufacturer, awg_cfg.model = 'Tektronix', 'AWG5014B'
awg_cfg.mock = True
awg_cfg.connect()
osc_cfg = DeviceConfig('osc')
osc_cfg.address_mode = awg_cfg.address_modes.gpib
osc_cfg.manufacturer, osc_cfg.model = 'Tektronix', 'DPO7104'
osc_cfg.mock = True
osc_cfg.connect()
pulse_config = sweep.PulseConfiguration(p.with_resources, {'f1': 1}, awg_cfg.device, osc_cfg.device)
vars, num_items = sort_variables([var1, var2])
ress = [(('Res 1', res), ('Res 2', p.resources[('i',)]))]
ctrl = sweep.SweepController(ress, vars, num_items, [], [], pulse_config)
ctrl.run()
eq_(res_buf, [1.0, 2.0, 3.0, 4.0])
if __name__ == '__main__':
main()
| {
"repo_name": "0/SpanishAcquisition",
"path": "spacq/iteration/tests/test_sweep.py",
"copies": "1",
"size": "9768",
"license": "bsd-2-clause",
"hash": -3811004133289965600,
"line_mean": 27.4781341108,
"line_max": 113,
"alpha_frac": 0.6401515152,
"autogenerated": false,
"ratio": 2.7163515016685205,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8706239195936466,
"avg_score": 0.030052764186410758,
"num_lines": 343
} |
from functools import partial
from nose.tools import eq_
def _validate_content_type(expected_type, result):
eq_(result.headers['Content-Type'], expected_type)
def _validate_content_length(expected_length, result):
eq_(int(result.headers['content-length']), expected_length)
def _validate_md5(expected_md5, result):
eq_(result.headers['x-checksum-md5'], expected_md5)
def _validate_text(expected_text, result):
eq_(result.text, expected_text)
def _validate_404(result):
eq_(result.status_code, 404)
class SphinxHelper(object):
def __init__(self, get_path_fn, lowercase=False):
self._package_prefix = 'python/{}/'.format(
'sphinx' if lowercase else 'Sphinx')
self.get_path_fn = get_path_fn
self.expected_md5_checksum = '8f55a6d4f87fc6d528120c5d1f983e98'
def __get_path_and_perform_validations(self, path, validate_fn_list):
result = self.get_path_fn(path)
for v in validate_fn_list:
v(result)
def perform_md5_validations(self, validators):
self.__get_path_and_perform_validations(
self._package_prefix + '/Sphinx-1.1.3.tar.gz.md5',
validators)
def perform_sha1_validations(self, validators):
self.__get_path_and_perform_validations(
self._package_prefix + '/Sphinx-1.1.3.tar.gz.sha1',
validators)
def perform_primary_artifact_validations(self, validators):
self.__get_path_and_perform_validations(
self._package_prefix + '/Sphinx-1.1.3.tar.gz',
validators)
def perform_package_not_cached_assertions(sphinx_helper):
sphinx_helper.perform_md5_validations((_validate_404,))
sphinx_helper.perform_sha1_validations((_validate_404,))
sphinx_helper.perform_primary_artifact_validations(
(partial(_validate_content_length, 2632059),
partial(_validate_md5, '8f55a6d4f87fc6d528120c5d1f983e98'),)
)
def perform_package_cached_assertions(
sphinx_helper,
expect_artifactory_specific_headers):
sphinx_helper.perform_md5_validations((
partial(_validate_text, sphinx_helper.expected_md5_checksum),
partial(_validate_content_type, 'application/x-checksum'),
))
sphinx_helper.perform_sha1_validations((_validate_404,))
primary_artifact_validators = [
partial(_validate_content_length, 2632059)]
if expect_artifactory_specific_headers:
primary_artifact_validators.append(
partial(_validate_md5, sphinx_helper.expected_md5_checksum))
sphinx_helper.perform_primary_artifact_validations(
primary_artifact_validators)
def perform_package_unavailable_assertions(sphinx_helper):
sphinx_helper.perform_md5_validations((_validate_404,))
sphinx_helper.perform_sha1_validations((_validate_404,))
sphinx_helper.perform_primary_artifact_validations((_validate_404,))
| {
"repo_name": "teamfruit/defend_against_fruit",
"path": "pypi_redirect/pypi_redirect_integration/tests/_assertion_helper.py",
"copies": "1",
"size": "2900",
"license": "apache-2.0",
"hash": -3818745949007112000,
"line_mean": 31.5842696629,
"line_max": 73,
"alpha_frac": 0.684137931,
"autogenerated": false,
"ratio": 3.4647550776583036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9648893008658304,
"avg_score": 0,
"num_lines": 89
} |
from functools import partial
from nose.tools import *
import unittest
import pandas as pd
import numpy as np
import six
import py_entitymatching.feature.tokenizers as tok
class TokenizerTestCases(unittest.TestCase):
def test_get_global_tokenizers(self):
x = tok._global_tokenizers
def test_get_tokenizers_for_blocking(self):
x = tok.get_tokenizers_for_blocking()
self.assertEqual(isinstance(x, dict), True)
input = 'data science'
for name, value in six.iteritems(x):
self.assertEqual(isinstance(value(input), list), True)
@raises(AssertionError)
def test_get_tokenizers_for_blocking_invalid(self):
tok.get_tokenizers_for_blocking(None, None)
def test_get_tokenizers_for_matching(self):
x = tok.get_tokenizers_for_matching()
self.assertEqual(isinstance(x, dict), True)
input = 'data science'
for name, value in six.iteritems(x):
self.assertEqual(isinstance(value(input), list), True)
@raises(AssertionError)
def test_get_tokenizers_for_matching_invalid(self):
x = tok.get_tokenizers_for_matching(None, None)
@raises(AssertionError)
def test_get_single_arg_tokenizers_invalid_1(self):
tok._get_single_arg_tokenizers(None, None)
def test_get_single_arg_tokenizers_valid_2(self):
tok._get_single_arg_tokenizers(q=3, dlm_char=' ')
def test_get_single_arg_tokenizers_valid_3(self):
tok._get_single_arg_tokenizers(q=[], dlm_char=[])
def test_get_single_arg_tokenizers_valid_4(self):
tok._get_single_arg_tokenizers(q=None, dlm_char=[' '])
def test_get_single_arg_tokenizers_valid_5(self):
tok._get_single_arg_tokenizers(q=3, dlm_char=None)
def test_qgram_invalid(self):
x = tok._make_tok_qgram(3)
self.assertEqual(pd.isnull(x(np.NaN)), True)
def test_qgram_delim(self):
x = tok._make_tok_delim(' ')
self.assertEqual(pd.isnull(x(np.NaN)), True)
def test_tokqgram_valid(self):
x = tok.tok_qgram('data science', 3)
self.assertEqual(isinstance(x, list), True)
def test_tokdelim_valid(self):
x = tok.tok_delim('data science', ' ')
self.assertEqual(isinstance(x, list), True)
self.assertEqual(len(x), 2)
def test_tokqgram_invalid(self):
x = tok.tok_qgram(np.NaN, 3)
self.assertEqual(pd.isnull(x), True)
def test_tokdelim_invalid(self):
x = tok.tok_delim(np.NaN, ' ')
self.assertEqual(pd.isnull(x), True)
| {
"repo_name": "anhaidgroup/py_entitymatching",
"path": "py_entitymatching/tests/test_feature_tokenizers.py",
"copies": "1",
"size": "2542",
"license": "bsd-3-clause",
"hash": -6761037949009006000,
"line_mean": 32.012987013,
"line_max": 66,
"alpha_frac": 0.6506687648,
"autogenerated": false,
"ratio": 3.318537859007833,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4469206623807833,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from operator import add, methodcaller
import numpy
import FIAT
import gem
from gem.utils import cached_property
from finat.finiteelementbase import FiniteElementBase
class EnrichedElement(FiniteElementBase):
"""A finite element whose basis functions are the union of the
basis functions of several other finite elements."""
def __new__(cls, elements):
elements = tuple(elements)
if len(elements) == 1:
return elements[0]
else:
self = super().__new__(cls)
self.elements = elements
return self
@cached_property
def cell(self):
result, = set(elem.cell for elem in self.elements)
return result
@cached_property
def degree(self):
return tree_map(max, *[elem.degree for elem in self.elements])
@cached_property
def formdegree(self):
ks = set(elem.formdegree for elem in self.elements)
if None in ks:
return None
else:
return max(ks)
def entity_dofs(self):
'''Return the map of topological entities to degrees of
freedom for the finite element.'''
return concatenate_entity_dofs(self.cell, self.elements,
methodcaller("entity_dofs"))
@cached_property
def _entity_support_dofs(self):
return concatenate_entity_dofs(self.cell, self.elements,
methodcaller("entity_support_dofs"))
def space_dimension(self):
'''Return the dimension of the finite element space.'''
return sum(elem.space_dimension() for elem in self.elements)
@cached_property
def index_shape(self):
return (self.space_dimension(),)
@cached_property
def value_shape(self):
'''A tuple indicating the shape of the element.'''
shape, = set(elem.value_shape for elem in self.elements)
return shape
@cached_property
def fiat_equivalent(self):
# Avoid circular import dependency
from finat.mixed import MixedSubElement
if all(isinstance(e, MixedSubElement) for e in self.elements):
# EnrichedElement is actually a MixedElement
return FIAT.MixedElement([e.element.fiat_equivalent
for e in self.elements], ref_el=self.cell)
else:
return FIAT.EnrichedElement(*(e.fiat_equivalent
for e in self.elements))
def _compose_evaluations(self, results):
keys, = set(map(frozenset, results))
def merge(tables):
tables = tuple(tables)
zeta = self.get_value_indices()
tensors = []
for elem, table in zip(self.elements, tables):
beta_i = elem.get_indices()
tensors.append(gem.ComponentTensor(
gem.Indexed(table, beta_i + zeta),
beta_i
))
beta = self.get_indices()
return gem.ComponentTensor(
gem.Indexed(gem.Concatenate(*tensors), beta),
beta + zeta
)
return {key: merge(result[key] for result in results)
for key in keys}
def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None):
'''Return code for evaluating the element at known points on the
reference element.
:param order: return derivatives up to this order.
:param ps: the point set object.
:param entity: the cell entity on which to tabulate.
'''
results = [element.basis_evaluation(order, ps, entity, coordinate_mapping=coordinate_mapping)
for element in self.elements]
return self._compose_evaluations(results)
def point_evaluation(self, order, refcoords, entity=None):
'''Return code for evaluating the element at an arbitrary points on
the reference element.
:param order: return derivatives up to this order.
:param refcoords: GEM expression representing the coordinates
on the reference entity. Its shape must be
a vector with the correct dimension, its
free indices are arbitrary.
:param entity: the cell entity on which to tabulate.
'''
results = [element.point_evaluation(order, refcoords, entity)
for element in self.elements]
return self._compose_evaluations(results)
@property
def mapping(self):
mappings = set(elem.mapping for elem in self.elements)
if len(mappings) != 1:
return None
else:
result, = mappings
return result
def tree_map(f, *args):
"""Like the built-in :py:func:`map`, but applies to a tuple tree."""
nonleaf, = set(isinstance(arg, tuple) for arg in args)
if nonleaf:
ndim, = set(map(len, args)) # asserts equal arity of all args
return tuple(tree_map(f, *subargs) for subargs in zip(*args))
else:
return f(*args)
def concatenate_entity_dofs(ref_el, elements, method):
"""Combine the entity DoFs from a list of elements into a combined
dict containing the information for the concatenated DoFs of all
the elements.
:arg ref_el: the reference cell
:arg elements: subelement whose DoFs are concatenated
:arg method: method to obtain the entity DoFs dict
:returns: concatenated entity DoFs dict
"""
entity_dofs = {dim: {i: [] for i in entities}
for dim, entities in ref_el.get_topology().items()}
offsets = numpy.cumsum([0] + list(e.space_dimension()
for e in elements), dtype=int)
for i, d in enumerate(map(method, elements)):
for dim, dofs in d.items():
for ent, off in dofs.items():
entity_dofs[dim][ent] += list(map(partial(add, offsets[i]), off))
return entity_dofs
| {
"repo_name": "FInAT/FInAT",
"path": "finat/enriched.py",
"copies": "1",
"size": "6044",
"license": "mit",
"hash": 2784746841456592400,
"line_mean": 34.9761904762,
"line_max": 101,
"alpha_frac": 0.600099272,
"autogenerated": false,
"ratio": 4.398835516739447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00027791905681272814,
"num_lines": 168
} |
from functools import partial
from operator import attrgetter, itemgetter
from pathlib import Path
from random import randint
import jinja2
import ujson
from aiohttp.web import Response, json_response
from sqlalchemy import select
from .models import sa_fortunes, sa_worlds, Fortune, World
json_response = partial(json_response, dumps=ujson.dumps)
template_path = Path(__file__).parent / 'templates' / 'fortune.jinja'
template = jinja2.Template(template_path.read_text())
def get_num_queries(request):
try:
num_queries = int(request.match_info.get('queries', 1))
except ValueError:
return 1
if num_queries < 1:
return 1
if num_queries > 500:
return 500
return num_queries
async def json(request):
"""
Test 1
"""
return json_response({'message': 'Hello, World!'})
async def single_database_query_orm(request):
"""
Test 2 ORM
"""
id_ = randint(1, 10000)
async with request.app['db_session']() as sess:
num = await sess.scalar(select(World.randomnumber).filter_by(id=id_))
return json_response({'id': id_, 'randomNumber': num})
async def single_database_query_raw(request):
"""
Test 2 RAW
"""
id_ = randint(1, 10000)
async with request.app['pg'].acquire() as conn:
r = await conn.fetchval('SELECT id,randomnumber FROM world WHERE id = $1', id_)
return json_response({'id': id_, 'randomNumber': r})
async def multiple_database_queries_orm(request):
"""
Test 3 ORM
"""
num_queries = get_num_queries(request)
ids = [randint(1, 10000) for _ in range(num_queries)]
ids.sort()
result = []
async with request.app['db_session']() as sess:
for id_ in ids:
num = await sess.scalar(select(World.randomnumber).filter_by(id=id_))
result.append({'id': id_, 'randomNumber': num})
return json_response(result)
async def multiple_database_queries_raw(request):
"""
Test 3 RAW
"""
num_queries = get_num_queries(request)
ids = [randint(1, 10000) for _ in range(num_queries)]
ids.sort()
result = []
async with request.app['pg'].acquire() as conn:
stmt = await conn.prepare('SELECT id,randomnumber FROM world WHERE id = $1')
for id_ in ids:
result.append({
'id': id_,
'randomNumber': await stmt.fetchval(id_),
})
return json_response(result)
async def fortunes(request):
"""
Test 4 ORM
"""
async with request.app['db_session']() as sess:
ret = await sess.execute(select(Fortune.id, Fortune.message))
fortunes = ret.all()
fortunes.append(Fortune(id=0, message='Additional fortune added at request time.'))
fortunes.sort(key=attrgetter('message'))
content = template.render(fortunes=fortunes)
return Response(text=content, content_type='text/html')
async def fortunes_raw(request):
"""
Test 4 RAW
"""
async with request.app['pg'].acquire() as conn:
fortunes = await conn.fetch('SELECT * FROM Fortune')
fortunes.append(dict(id=0, message='Additional fortune added at request time.'))
fortunes.sort(key=itemgetter('message'))
content = template.render(fortunes=fortunes)
return Response(text=content, content_type='text/html')
async def updates(request):
"""
Test 5 ORM
"""
num_queries = get_num_queries(request)
result = []
ids = [randint(1, 10000) for _ in range(num_queries)]
ids.sort()
async with request.app['db_session'].begin() as sess:
for id_ in ids:
rand_new = randint(1, 10000)
world = await sess.get(World, id_, populate_existing=True)
world.randomnumber = rand_new
result.append({'id': id_, 'randomNumber': rand_new})
return json_response(result)
async def updates_raw(request):
"""
Test 5 RAW
"""
num_queries = get_num_queries(request)
ids = [randint(1, 10000) for _ in range(num_queries)]
ids.sort()
result = []
updates = []
async with request.app['pg'].acquire() as conn:
stmt = await conn.prepare('SELECT id,randomnumber FROM world WHERE id = $1')
for id_ in ids:
# the result of this is the int previous random number which we don't actually use
await stmt.fetchval(id_)
rand_new = randint(1, 10000)
result.append({'id': id_, 'randomNumber': rand_new})
updates.append((rand_new, id_))
await conn.executemany('UPDATE world SET randomnumber=$1 WHERE id=$2', updates)
return json_response(result)
async def plaintext(request):
"""
Test 6
"""
return Response(body=b'Hello, World!', content_type='text/plain')
| {
"repo_name": "lneves/FrameworkBenchmarks",
"path": "frameworks/Python/aiohttp/app/views.py",
"copies": "3",
"size": "4758",
"license": "bsd-3-clause",
"hash": 6980382488958874000,
"line_mean": 27.4910179641,
"line_max": 94,
"alpha_frac": 0.6252627154,
"autogenerated": false,
"ratio": 3.6628175519630486,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007386614639440785,
"num_lines": 167
} |
from functools import partial
from operator import attrgetter, itemgetter
from random import randint
from email.utils import formatdate
import os
import sys
from bottle import Bottle, route, request, run, template, response
from bottle.ext import sqlalchemy
from sqlalchemy import create_engine, Column, Integer, Unicode
from sqlalchemy.ext.declarative import declarative_base
try:
import ujson as json
except ImportError:
import json
if sys.version_info[0] == 3:
xrange = range
_is_pypy = hasattr(sys, 'pypy_version_info')
DBDRIVER = 'mysql+pymysql' if _is_pypy else 'mysql'
DBHOSTNAME = 'tfb-database'
DATABASE_URI = '%s://benchmarkdbuser:benchmarkdbpass@%s:3306/hello_world?charset=utf8' % (DBDRIVER, DBHOSTNAME)
app = Bottle()
Base = declarative_base()
db_engine = create_engine(DATABASE_URI)
plugin = sqlalchemy.Plugin(db_engine, keyword='db')
app.install(plugin)
# Engine for raw operation. Use autocommit.
raw_engine = create_engine(DATABASE_URI,
connect_args={'autocommit': True},
pool_reset_on_return=None)
class World(Base):
__tablename__ = "World"
id = Column(Integer, primary_key=True)
randomNumber = Column(Integer)
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id': self.id,
'randomNumber': self.randomNumber,
}
class Fortune(Base):
__tablename__ = "Fortune"
id = Column(Integer, primary_key=True)
message = Column(Unicode)
@app.route("/json")
def hello():
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
response.content_type = 'application/json'
resp = {"message": "Hello, World!"}
return json.dumps(resp)
@app.route("/db")
def get_random_world_single(db):
"""Test Type 2: Single Database Query"""
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
wid = randint(1, 10000)
world = db.query(World).get(wid).serialize()
response.content_type = 'application/json'
return json.dumps(world)
@app.route("/raw-db")
def get_random_world_single_raw():
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
connection = raw_engine.connect()
wid = randint(1, 10000)
try:
result = connection.execute("SELECT id, randomNumber FROM world WHERE id = " + str(wid)).fetchone()
world = {'id': result[0], 'randomNumber': result[1]}
response.content_type = 'application/json'
return json.dumps(world)
finally:
connection.close()
@app.route("/queries")
def get_random_world(db):
"""Test Type 3: Multiple database queries"""
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
num_queries = request.query.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
rp = partial(randint, 1, 10000)
get = db.query(World).get
worlds = [get(rp()).serialize() for _ in xrange(num_queries)]
response.content_type = 'application/json'
return json.dumps(worlds)
@app.route("/raw-queries")
def get_random_world_raw():
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
num_queries = request.query.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
worlds = []
rp = partial(randint, 1, 10000)
connection = raw_engine.connect()
try:
for i in xrange(num_queries):
result = connection.execute("SELECT id, randomNumber FROM world WHERE id = " + str(rp())).fetchone()
worlds.append({'id': result[0], 'randomNumber': result[1]})
finally:
connection.close()
response.content_type = 'application/json'
return json.dumps(worlds)
@app.route("/fortune")
def fortune_orm(db):
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
fortunes=db.query(Fortune).all()
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter('message'))
return template('fortune-obj', fortunes=fortunes)
@app.route("/raw-fortune")
def fortune_raw():
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
connection = raw_engine.connect()
try:
fortunes=[(f.id, f.message) for f in connection.execute("SELECT * FROM Fortune")]
fortunes.append((0, u'Additional fortune added at request time.'))
fortunes=sorted(fortunes, key=itemgetter(1))
finally:
connection.close()
return template('fortune', fortunes=fortunes)
@app.route("/updates")
def updates(db):
"""Test 5: Database Updates"""
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
num_queries = request.query.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
worlds = []
rp = partial(randint, 1, 10000)
ids = [rp() for _ in xrange(num_queries)]
ids.sort() # To avoid deadlock
for id in ids:
world = db.query(World).get(id)
world.randomNumber = rp()
worlds.append(world.serialize())
response.content_type = 'application/json'
return json.dumps(worlds)
@app.route("/raw-updates")
def raw_updates():
"""Test 5: Database Updates"""
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
num_queries = request.query.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
conn = raw_engine.connect()
worlds = []
rp = partial(randint, 1, 10000)
for i in xrange(num_queries):
world = conn.execute("SELECT * FROM World WHERE id=%s", (rp(),)).fetchone()
randomNumber = rp()
worlds.append({'id': world['id'], 'randomNumber': randomNumber})
conn.execute("UPDATE World SET randomNumber=%s WHERE id=%s",
(randomNumber, world['id']))
conn.close()
response.content_type = 'application/json'
return json.dumps(worlds)
@app.route('/plaintext')
def plaintext():
"""Test 6: Plaintext"""
response.headers['Date'] = formatdate(timeval=None, localtime=False, usegmt=True)
response.content_type = 'text/plain'
return b'Hello, World!'
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False)
| {
"repo_name": "actframework/FrameworkBenchmarks",
"path": "frameworks/Python/bottle/app.py",
"copies": "22",
"size": "6537",
"license": "bsd-3-clause",
"hash": -8701602800693640000,
"line_mean": 30.7330097087,
"line_max": 112,
"alpha_frac": 0.6565702922,
"autogenerated": false,
"ratio": 3.438716465018411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from operator import attrgetter, itemgetter
from random import randint
import os
import sys
from bottle import Bottle, route, request, run, template, response
from bottle.ext import sqlalchemy
from sqlalchemy import create_engine, Column, Integer, Unicode
from sqlalchemy.ext.declarative import declarative_base
try:
import ujson as json
except ImportError:
import json
if sys.version_info[0] == 3:
xrange = range
_is_pypy = hasattr(sys, 'pypy_version_info')
DBDRIVER = 'mysql+pymysql' if _is_pypy else 'mysql'
DBHOSTNAME = os.environ.get('DBHOST', 'localhost')
DATABASE_URI = '%s://benchmarkdbuser:benchmarkdbpass@%s:3306/hello_world?charset=utf8' % (DBDRIVER, DBHOSTNAME)
app = Bottle()
Base = declarative_base()
db_engine = create_engine(DATABASE_URI)
plugin = sqlalchemy.Plugin(db_engine, keyword='db')
app.install(plugin)
# Engine for raw operation. Use autocommit.
raw_engine = create_engine(DATABASE_URI,
connect_args={'autocommit': True},
pool_reset_on_return=None)
class World(Base):
__tablename__ = "World"
id = Column(Integer, primary_key=True)
randomNumber = Column(Integer)
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id': self.id,
'randomNumber': self.randomNumber,
}
class Fortune(Base):
__tablename__ = "Fortune"
id = Column(Integer, primary_key=True)
message = Column(Unicode)
@app.route("/json")
def hello():
response.content_type = 'application/json'
resp = {"message": "Hello, World!"}
return json.dumps(resp)
@app.route("/db")
def get_random_world_single(db):
"""Test Type 2: Single Database Query"""
wid = randint(1, 10000)
world = db.query(World).get(wid).serialize()
response.content_type = 'application/json'
return json.dumps(world)
@app.route("/raw-db")
def get_random_world_single_raw():
connection = raw_engine.connect()
wid = randint(1, 10000)
try:
result = connection.execute("SELECT id, randomNumber FROM world WHERE id = " + str(wid)).fetchone()
world = {'id': result[0], 'randomNumber': result[1]}
response.content_type = 'application/json'
return json.dumps(world)
finally:
connection.close()
@app.route("/queries")
def get_random_world(db):
"""Test Type 3: Multiple database queries"""
num_queries = request.query.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
rp = partial(randint, 1, 10000)
get = db.query(World).get
worlds = [get(rp()).serialize() for _ in xrange(num_queries)]
response.content_type = 'application/json'
return json.dumps(worlds)
@app.route("/raw-queries")
def get_random_world_raw():
num_queries = request.query.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
worlds = []
rp = partial(randint, 1, 10000)
connection = raw_engine.connect()
try:
for i in xrange(num_queries):
result = connection.execute("SELECT id, randomNumber FROM world WHERE id = " + str(rp())).fetchone()
worlds.append({'id': result[0], 'randomNumber': result[1]})
finally:
connection.close()
response.content_type = 'application/json'
return json.dumps(worlds)
@app.route("/fortune")
def fortune_orm(db):
fortunes=db.query(Fortune).all()
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter('message'))
return template('fortune-obj', fortunes=fortunes)
@app.route("/raw-fortune")
def fortune_raw():
connection = raw_engine.connect()
try:
fortunes=[(f.id, f.message) for f in connection.execute("SELECT * FROM Fortune")]
fortunes.append((0, u'Additional fortune added at request time.'))
fortunes=sorted(fortunes, key=itemgetter(1))
finally:
connection.close()
return template('fortune', fortunes=fortunes)
@app.route("/updates")
def updates(db):
"""Test 5: Database Updates"""
num_queries = request.query.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
worlds = []
rp = partial(randint, 1, 10000)
ids = [rp() for _ in xrange(num_queries)]
ids.sort() # To avoid deadlock
for id in ids:
world = db.query(World).get(id)
world.randomNumber = rp()
worlds.append(world.serialize())
response.content_type = 'application/json'
return json.dumps(worlds)
@app.route("/raw-updates")
def raw_updates():
"""Test 5: Database Updates"""
num_queries = request.query.get('queries', 1, type=int)
if num_queries < 1:
num_queries = 1
if num_queries > 500:
num_queries = 500
conn = raw_engine.connect()
worlds = []
rp = partial(randint, 1, 10000)
for i in xrange(num_queries):
world = conn.execute("SELECT * FROM World WHERE id=%s", (rp(),)).fetchone()
randomNumber = rp()
worlds.append({'id': world['id'], 'randomNumber': randomNumber})
conn.execute("UPDATE World SET randomNumber=%s WHERE id=%s",
(randomNumber, world['id']))
conn.close()
response.content_type = 'application/json'
return json.dumps(worlds)
@app.route('/plaintext')
def plaintext():
"""Test 6: Plaintext"""
response.content_type = 'text/plain'
return b'Hello, World!'
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False)
| {
"repo_name": "martin-g/FrameworkBenchmarks",
"path": "frameworks/Python/bottle/app.py",
"copies": "58",
"size": "5667",
"license": "bsd-3-clause",
"hash": -8013717442965961000,
"line_mean": 28.0615384615,
"line_max": 112,
"alpha_frac": 0.6419622375,
"autogenerated": false,
"ratio": 3.457596095179988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002829553244289118,
"num_lines": 195
} |
from functools import partial
from operator import attrgetter
import os
from random import randint
import sys
import json
from jinja2 import Environment, PackageLoader
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from tg import expose, TGController, AppConfig
from models.Fortune import Fortune
from models.World import World
DBDRIVER = 'mysql'
DBHOSTNAME = os.environ.get('DBHOST', 'localhost')
DATABASE_URI = '%s://benchmarkdbuser:benchmarkdbpass@%s:3306/hello_world?charset=utf8' % (DBDRIVER, DBHOSTNAME)
db_engine = create_engine(DATABASE_URI)
Session = sessionmaker(bind=db_engine)
db_session = Session()
env = Environment(loader=PackageLoader("app", "templates"), autoescape=True, auto_reload=False)
def getQueryNum(queryString):
try:
num_queries = int(queryString)
if num_queries < 1:
return 1
if num_queries > 500:
return 500
return num_queries
except ValueError:
return 1
class RootController(TGController):
@expose(content_type="text/plain")
def plaintext(self):
return "Hello, World!"
@expose("json")
def json(self):
return {"message": "Hello, World!"}
@expose("json")
def db(self):
wid = randint(1, 10000)
world = db_session.query(World).get(wid).serialize()
return world
@expose("json")
def updates(self, queries=1):
num_queries = getQueryNum(queries)
worlds = []
rp = partial(randint, 1, 10000)
ids = [rp() for _ in xrange(num_queries)]
ids.sort()
for id in ids:
world = db_session.query(World).get(id)
world.randomNumber = rp()
worlds.append(world.serialize())
db_session.commit()
return json.dumps(worlds)
@expose("json")
def queries(self, queries=1):
num_queries = getQueryNum(queries)
rp = partial(randint, 1, 10000)
get = db_session.query(World).get
worlds = [get(rp()).serialize() for _ in xrange(num_queries)]
return json.dumps(worlds)
@expose()
def fortune(self):
fortunes = db_session.query(Fortune).all()
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter("message"))
template = env.get_template("fortunes.html")
return template.render(fortunes=fortunes)
config = AppConfig(minimal=True, root_controller=RootController())
config.renderers.append("jinja")
tg_app = config.make_wsgi_app()
def app(env, start):
try:
return tg_app(env, start)
finally:
db_session.close()
| {
"repo_name": "jebbstewart/FrameworkBenchmarks",
"path": "frameworks/Python/turbogears/app.py",
"copies": "51",
"size": "2684",
"license": "bsd-3-clause",
"hash": 4396387257624442000,
"line_mean": 27.5531914894,
"line_max": 111,
"alpha_frac": 0.6516393443,
"autogenerated": false,
"ratio": 3.656675749318801,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from operator import eq, ne
IRRELEVANT = object()
class ChangeWatcher(object):
POSTCONDITION_FAILURE_MESSAGE = {
ne: 'Value did not change',
eq: 'Value changed from {before} to {after}',
'invalid': 'Value changed to {after}, not {expected_after}'
}
def __init__(self, comparator, check, *args, **kwargs):
self.check = check
self.comparator = comparator
self.args = args
self.kwargs = kwargs
self.expected_before = kwargs.pop('before', IRRELEVANT)
self.expected_after = kwargs.pop('after', IRRELEVANT)
def __enter__(self):
self.before = self.__apply()
if not self.expected_before is IRRELEVANT:
check = self.comparator(self.before, self.expected_before)
message = "Value before is {before}, not {expected_before}"
assert not check, message.format(**vars(self))
def __exit__(self, exec_type, exec_value, traceback):
if exec_type is not None:
return False # reraises original exception
self.after = self.__apply()
met_precondition = self.comparator(self.before, self.after)
after_value_matches = self.after == self.expected_after
# Changed when it wasn't supposed to, or, didn't change when it was
if not met_precondition:
self.__raise_postcondition_error(self.comparator)
# Do care about the after value, but it wasn't equal
elif self.expected_after is not IRRELEVANT and not after_value_matches:
self.__raise_postcondition_error('invalid')
def __apply(self):
return self.check(*self.args, **self.kwargs)
def __raise_postcondition_error(self, key):
message = self.POSTCONDITION_FAILURE_MESSAGE[key]
raise AssertionError(message.format(**vars(self)))
class AssertsMixin(object):
assertChanges = partial(ChangeWatcher, ne)
assertDoesNotChange = partial(
ChangeWatcher,
eq,
before=IRRELEVANT,
after=IRRELEVANT
)
| {
"repo_name": "gterzian/exam",
"path": "exam/asserts.py",
"copies": "3",
"size": "2077",
"license": "mit",
"hash": -5710107327512687000,
"line_mean": 30.4696969697,
"line_max": 79,
"alpha_frac": 0.6340876264,
"autogenerated": false,
"ratio": 3.9411764705882355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00029708853238265005,
"num_lines": 66
} |
from functools import partial
from operator import eq
from typing import Any
import pytest # type: ignore
from basic_utils.dict_helpers import (
filter_keys,
filter_values,
get_in_dict,
get_keys,
prune_dict,
set_in_dict
)
from basic_utils.primitives import even
@pytest.mark.parametrize("data, keys, expected", [
({'a': {'b': {'c': 3}}}, 'a', {'b': {'c': 3}}),
({'a': {'b': {'c': 3}}}, ('a', 'b', 'c'), 3)
])
def test_get_in_dict(data: Any, keys: Any, expected: Any) -> None:
assert get_in_dict(data, keys) == expected
@pytest.mark.parametrize("keys, expected, default", [
(("a", "b"), (1, 2), None),
(("a", "b", "x"), (1, 2, None), None),
(("a", "b", "x"), (1, 2, "Missing"), "Missing"),
])
def test_get_keys(keys: Any, expected: Any, default: Any) -> None:
assert get_keys({"a": 1, "b": 2}, keys, default) == expected
@pytest.mark.parametrize("args, expected", [
(({'Homer': 39, 'Bart': 10}, lambda x: x < 20), {'Bart': 10}),
(({'Homer': 39, 'Bart': 10}, partial(eq, 10)), {'Bart': 10}),
(({'a': 1, 'b': 2, 'c': 3, 'd': 4}, even), {'b': 2, 'd': 4}),
])
def test_filter_values(args: Any, expected: Any) -> None:
assert filter_values(*args) == expected
@pytest.mark.parametrize("args, expected", [
(({'Lisa': 8, 'Marge': 36}, lambda x: len(x) > 4), {'Marge': 36}),
(({'Homer': 39, 'Bart': 10}, partial(eq, 'Bart')), {'Bart': 10}),
])
def test_filter_keys(args: Any, expected: Any) -> None:
assert filter_keys(*args) == expected
@pytest.mark.parametrize("args, expected", [
(({'a': None, 'b': 2, 'c': False},), {'b': 2}),
(({'a': [], 'b': 2, 'c': {}},), {'b': 2})
])
def test_prune_dict(args: Any, expected: Any) -> None:
assert prune_dict(*args) == expected
@pytest.mark.parametrize("data, keys, value, expected", [
({'a': {'b': {'c': 3}}}, 'a', 20, {'a': 20}),
({'a': {'b': {'c': 3}}}, ('a', 'b'), 5, {'a': {'b': 5}}),
({'a': {'b': {'c': 3}}}, ('a', 'b', 'c'), 5, {'a': {'b': {'c': 5}}})
])
def test_set_in_dict(data: Any, keys: Any, value: Any, expected: Any) -> None:
set_in_dict(data, keys, value)
assert data == expected
| {
"repo_name": "Jackevansevo/basic-utils",
"path": "tests/test_dict_helpers.py",
"copies": "1",
"size": "2153",
"license": "mit",
"hash": -2518121011230166000,
"line_mean": 31.1343283582,
"line_max": 78,
"alpha_frac": 0.5234556433,
"autogenerated": false,
"ratio": 2.7496807151979565,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3773136358497956,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from operator import getitem, setitem
from typing import Any, Callable, NamedTuple, Optional
class Field(NamedTuple):
source: Optional[str] = None
field: Optional['Field'] = None
getter: Optional[Callable] = None
setter: Optional[Callable] = None
loader: Callable = lambda x: x
dumper: Callable = lambda x: x
def load(self, instance):
result = self.getter(instance, self.source)
if self.field:
result = self.field.load(result)
return self.loader(result)
def dump(self, instance, value):
value = self.dumper(value)
if self.field:
self.field.dump(
self.getter(instance, self.source),
value,
)
else:
self.setter(instance, self.source, value)
item = partial(Field, getter=getitem, setter=setitem)
attr = partial(Field, getter=getattr, setter=setattr)
class Map:
def __init__(self, _default=item, **fields):
self.fields = {
name: _default(field) if isinstance(field, str) else field
for name, field
in fields.items()
}
def load(self, instance):
return {
name: field.load(instance)
for name, field
in self.fields.items()
}
def dump(self, instance, value):
for name, field in self.fields.items():
field.dump(instance, value[name])
| {
"repo_name": "c6401/python-blocks",
"path": "blocks/mapping.py",
"copies": "1",
"size": "1461",
"license": "mit",
"hash": -9164484323475303000,
"line_mean": 26.5660377358,
"line_max": 70,
"alpha_frac": 0.5920602327,
"autogenerated": false,
"ratio": 4.115492957746479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5207553190446479,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from operator import itemgetter
from ubuntui.ev import EventLoop
from ubuntui.utils import Color, Padding
from ubuntui.widgets.buttons import menu_btn
from urwid import Columns, Filler, Frame, Pile, Text, WidgetWrap
class DestroyView(WidgetWrap):
def __init__(self, app, models, cb):
self.app = app
self.cb = cb
self.controllers = models.keys()
self.models = models
self.config = self.app.config
self.buttons_pile_selected = False
self.frame = Frame(body=self._build_widget(),
footer=self._build_footer())
super().__init__(self.frame)
def keypress(self, size, key):
if key in ['tab', 'shift tab']:
self._swap_focus()
return super().keypress(size, key)
def _swap_focus(self):
if not self.buttons_pile_selected:
self.buttons_pile_selected = True
self.frame.focus_position = 'footer'
self.buttons_pile.focus_position = 1
else:
self.buttons_pile_selected = False
self.frame.focus_position = 'body'
def _build_buttons(self):
cancel = menu_btn(on_press=self.cancel,
label="\n QUIT\n")
buttons = [
Padding.line_break(""),
Color.menu_button(cancel,
focus_map='button_primary focus'),
]
self.buttons_pile = Pile(buttons)
return self.buttons_pile
def _build_footer(self):
footer_pile = Pile([
Padding.line_break(""),
Color.frame_footer(
Columns([
('fixed', 2, Text("")),
('fixed', 13, self._build_buttons())
]))
])
return footer_pile
def _total_machines(self, model):
""" Returns total machines in model
"""
machines = model.get('machines', None)
if machines is None:
return 0
return len(machines.keys())
def _build_widget(self):
total_items = []
for controller in sorted(self.controllers):
models = self.models[controller]['models']
if len(models) > 0:
total_items.append(Color.label(
Text("{} ({})".format(controller,
models[0].get('cloud', "")))
))
for model in sorted(models, key=itemgetter('name')):
if model['name'] == "controller":
continue
if model['life'] == 'dying':
continue
label = " {}, Machine Count: {}{}".format(
model['name'],
self._total_machines(model),
", Running since: {}".format(
model['status'].get('since'))
if 'since' in model['status'] else '')
total_items.append(
Color.body(
menu_btn(label=label,
on_press=partial(self.submit,
controller,
model)),
focus_map='menu_button focus'
))
total_items.append(Padding.line_break(""))
total_items.append(Padding.line_break(""))
return Padding.center_80(Filler(Pile(total_items), valign='top'))
def submit(self, controller, model, btn):
self.cb(controller, model)
def cancel(self, btn):
EventLoop.exit(0)
| {
"repo_name": "ubuntu/conjure-up",
"path": "conjureup/ui/views/destroy.py",
"copies": "3",
"size": "3733",
"license": "mit",
"hash": 7332558836088061000,
"line_mean": 34.8942307692,
"line_max": 73,
"alpha_frac": 0.4813822663,
"autogenerated": false,
"ratio": 4.61433868974042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 104
} |
from functools import partial
from operator import methodcaller
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.query import ModelIterable
type_cast_iterator = partial(map, methodcaller('type_cast'))
type_cast_prefetch_iterator = partial(map, methodcaller('type_cast', with_prefetched_objects=True))
class PolymorphicModelIterable(ModelIterable):
def __init__(self, queryset, type_cast=True, **kwargs):
self.type_cast = type_cast
super().__init__(queryset, **kwargs)
def __iter__(self):
iterator = super().__iter__()
if self.type_cast:
iterator = type_cast_iterator(iterator)
return iterator
class PolymorphicQuerySet(models.query.QuerySet):
def select_subclasses(self, *models):
if issubclass(self._iterable_class, ModelIterable):
self._iterable_class = PolymorphicModelIterable
related_lookups = set()
accessors = self.model.subclass_accessors
if models:
subclasses = set()
for model in models:
if not issubclass(model, self.model):
raise TypeError(
"%r is not a subclass of %r" % (model, self.model)
)
subclasses.update(model.subclass_accessors)
# Collect all `select_related` required lookups
for subclass in subclasses:
# Avoid collecting ourself and proxy subclasses
related_lookup = accessors[subclass].related_lookup
if related_lookup:
related_lookups.add(related_lookup)
queryset = self.filter(
**self.model.content_type_lookup(*tuple(subclasses))
)
else:
# Collect all `select_related` required relateds
for accessor in accessors.values():
# Avoid collecting ourself and proxy subclasses
related_lookup = accessor.related_lookup
if related_lookup:
related_lookups.add(related_lookup)
queryset = self
if related_lookups:
queryset = queryset.select_related(*related_lookups)
return queryset
def exclude_subclasses(self):
return self.filter(**self.model.content_type_lookup())
def _fetch_all(self):
# Override _fetch_all in order to disable PolymorphicModelIterable's
# type casting when prefetch_related is used because the latter might
# crash or disfunction when dealing with a mixed set of objects.
prefetch_related_objects = self._prefetch_related_lookups and not self._prefetch_done
type_cast = False
if self._result_cache is None:
iterable_class = self._iterable_class
if issubclass(iterable_class, PolymorphicModelIterable):
type_cast = bool(prefetch_related_objects)
iterable_class = partial(iterable_class, type_cast=not type_cast)
self._result_cache = list(iterable_class(self))
if prefetch_related_objects:
self._prefetch_related_objects()
if type_cast:
self._result_cache = list(type_cast_prefetch_iterator(self._result_cache))
class PolymorphicManager(models.Manager.from_queryset(PolymorphicQuerySet)):
def contribute_to_class(self, model, name):
# Avoid circular reference
from .models import BasePolymorphicModel
if not issubclass(model, BasePolymorphicModel):
raise ImproperlyConfigured(
'`%s` can only be used on '
'`BasePolymorphicModel` subclasses.' % self.__class__.__name__
)
return super().contribute_to_class(model, name)
def get_queryset(self):
queryset = super().get_queryset()
model = self.model
if model._meta.proxy:
# Select only associated model and its subclasses.
queryset = queryset.filter(**self.model.subclasses_lookup())
return queryset
| {
"repo_name": "charettes/django-polymodels",
"path": "polymodels/managers.py",
"copies": "1",
"size": "4084",
"license": "mit",
"hash": -5534211559819731000,
"line_mean": 41.1030927835,
"line_max": 99,
"alpha_frac": 0.624632713,
"autogenerated": false,
"ratio": 4.715935334872979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5840568047872979,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from operator import methodcaller
try:
from itertools import chain
from itertools import ifilter
from itertools import imap
except ImportError:
imap = map
ifilter = filter
import os
from iter_karld_tools import i_batch
from iter_karld_tools import yield_nth_of
from karld.loadump import ensure_dir
from karld.loadump import i_get_csv_data
from karld.loadump import i_walk_dir_for_filepaths_names
from karld.loadump import i_read_buffered_binary_file
from karld.loadump import write_as_csv
def csv_file_consumer(csv_rows_consumer, file_path_name):
"""
Consume the file at file_path_name as a csv file, passing
it through csv_rows_consumer.
:param csv_rows_consumer: consumes data_items yielding collection for each
:type csv_rows_consumer: callable
:param file_path_name: path to input csv file
:type file_path_name: str, str
"""
data_path, data_file_name = file_path_name
data_items = i_get_csv_data(data_path)
return csv_rows_consumer(data_items)
def csv_file_to_file(csv_rows_consumer, out_prefix, out_dir, file_path_name):
"""
Consume the file at file_path_name as a csv file, passing
it through csv_rows_consumer, writing the results
as a csv file into out_dir as the same name, lowered, and prefixed.
:param csv_rows_consumer: consumes data_items yielding collection for each
:type csv_rows_consumer: callable
:param out_prefix: prefix out_file_name
:type out_prefix: str
:param out_dir: directory to write output file to
:type out_dir: str
:param file_path_name: path to input csv file
:type file_path_name: str, str
"""
data_path, data_file_name = file_path_name
data_items = i_get_csv_data(data_path)
ensure_dir(out_dir)
out_filename = os.path.join(
out_dir, '{}{}'.format(
out_prefix, data_file_name.lower()))
write_as_csv(csv_rows_consumer(data_items), out_filename)
def multi_in_single_out(rows_reader,
rows_writer,
rows_iter_consumer,
out_url,
in_urls_func):
"""
Multi input combiner.
:param rows_reader: function to read a file path and returns an iterator
:param rows_writer: function to write values
:param rows_iter_consumer: function takes iter. of iterators returns iter.
:param out_url: url for the rows_writer to write to.
:param in_urls_func: function generates iterator of input urls.
"""
data_items_iter = (rows_reader(data_path) for data_path in in_urls_func())
rows_writer(rows_iter_consumer(data_items_iter), out_url)
def csv_files_to_file(csv_rows_consumer,
out_prefix,
out_dir,
out_file_name,
file_path_names):
"""
Consume the file at file_path_name as a csv file, passing
it through csv_rows_consumer, writing the results
as a csv file into out_dir as the same name, lowered, and prefixed.
:param csv_rows_consumer: consumes data_items yielding collection for each
:param out_prefix: prefix out_file_name
:type out_prefix: str
:param out_dir: Directory to write output file to.
:type out_dir: str
:param out_file_name: Output file base name.
:type out_file_name: str
:param file_path_names: tuple of paths and basenames to input csv files
:type file_path_names: str, str
"""
ensure_dir(out_dir)
out_filename = os.path.join(
out_dir, '{}{}'.format(
out_prefix, out_file_name.lower()))
in_urls_func = partial(yield_nth_of, 0, file_path_names)
multi_in_single_out(i_get_csv_data,
write_as_csv,
csv_rows_consumer,
out_filename,
in_urls_func)
def pool_run_files_to_files(file_to_file, in_dir, filter_func=None):
"""
With a multi-process pool, map files in in_dir over
file_to_file function.
:param file_to_file: callable that takes file paths.
:param in_dir: path to process all files from.
:param filter_func: Takes a tuple of path and base \
name of a file and returns a bool.
:returns: A list of return values from the map.
"""
from concurrent.futures import ProcessPoolExecutor
results = i_walk_dir_for_filepaths_names(in_dir)
if filter_func:
results_final = ifilter(filter_func, results)
else:
results_final = results
with ProcessPoolExecutor() as pool:
return list(pool.map(file_to_file, results_final))
def distribute_run_to_runners(items_func, in_url, reader=None, batch_size=1100):
"""
With a multi-process pool, map batches of items from
file to an items processing function.
The reader callable should be as fast as possible to
reduce data feeder cpu usage. It should do the minimal
to produce discrete units of data, save any decoding
for the items function.
:param items_func: Callable that takes multiple items of the data.
:param reader: URL reader callable.
:param in_url: Url of content
:param batch_size: size of batches.
"""
from concurrent.futures import ProcessPoolExecutor
if not reader:
reader = i_read_buffered_binary_file
stream = reader(in_url)
batches = i_batch(batch_size, stream)
with ProcessPoolExecutor() as pool:
return list(pool.map(items_func, batches))
def distribute_multi_run_to_runners(items_func, in_dir,
reader=None,
walker=None,
batch_size=1100,
filter_func=None):
"""
With a multi-process pool, map batches of items from
multiple files to an items processing function.
The reader callable should be as fast as possible to
reduce data feeder cpu usage. It should do the minimal
to produce discrete units of data, save any decoding
for the items function.
:param items_func: Callable that takes multiple items of the data.
:param reader: URL reader callable.
:param walker: A generator that takes the in_dir URL and emits
url, name tuples.
:param batch_size: size of batches.
:param filter_func: a function that returns True for desired paths names.
"""
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import cpu_count
if not reader:
reader = i_read_buffered_binary_file
if not walker:
walker = i_walk_dir_for_filepaths_names
paths_names = walker(in_dir)
if filter_func:
paths_names_final = ifilter(filter_func, paths_names)
else:
paths_names_final = paths_names
stream = chain.from_iterable(
(reader(in_url) for in_url, name in paths_names_final))
batches = i_batch(batch_size, stream)
n_cpus = cpu_count()
max_workers = (n_cpus-1) or 1
max_in_queue = int(n_cpus * 1.5)
with ProcessPoolExecutor(max_workers=max_workers) as pool:
futures = []
while True:
if len(pool._pending_work_items) < max_in_queue:
try:
batch = next(batches)
futures.append(pool.submit(items_func, batch))
except StopIteration:
break
def results():
"""Generator that yield results of futures
that are done. If not done yet, it skips it.
"""
while futures:
for index, future in enumerate(futures):
if future.done():
yield future.result()
del futures[index]
break
return results()
def serial_run_files_to_files(file_to_file, in_dir, filter_func=None):
"""
With a map files in in_dir over the file_to_file function.
Using this to debug your file_to_file function can
make it easier.
:param file_to_file: callable that takes file paths.
:param in_dir: path to process all files from.
:param filter_func: Takes a tuple of path and base \
name of a file and returns a bool.
:returns: A list of return values from the map.
"""
results = i_walk_dir_for_filepaths_names(in_dir)
if filter_func:
results_final = ifilter(filter_func, results)
else:
results_final = results
return list(map(file_to_file, results_final))
| {
"repo_name": "johnwlockwood/karl_data",
"path": "karld/run_together.py",
"copies": "1",
"size": "8490",
"license": "apache-2.0",
"hash": 964248192798779400,
"line_mean": 32.557312253,
"line_max": 80,
"alpha_frac": 0.6389870436,
"autogenerated": false,
"ratio": 3.8277727682596936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49667598118596934,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from ophyd.sim import motor1, motor2
RE.clear_suspenders()
# here is what we used to figure out how to prime the detector before we
# stripped it down
def hf2dxrf_test(*, xstart, xnumstep, xstepsize,
ystart, ynumstep, ystepsize,
#wait=None, simulate=False, checkbeam = False, checkcryo = False, #need to add these features
shutter = True, align = False, xmotor = hf_stage.x, ymotor = hf_stage.y,
acqtime, numrois=1, i0map_show=True, itmap_show=False, record_cryo = False,
dpc = None, e_tomo=None, struck = True, srecord = None,
setenergy=None, u_detune=None, echange_waittime=10,samplename=None):
'''
input:
xstart, xnumstep, xstepsize (float)
ystart, ynumstep, ystepsize (float)
acqtime (float): acqusition time to be set for both xspress3 and F460
numrois (integer): number of ROIs set to display in the live raster scans. This is for display ONLY.
The actualy number of ROIs saved depend on how many are enabled and set in the read_attr
However noramlly one cares only the raw XRF spectra which are all saved and will be used for fitting.
i0map_show (boolean): When set to True, map of the i0 will be displayed in live raster, default is True
itmap_show (boolean): When set to True, map of the trasnmission diode will be displayed in the live raster, default is True
energy (float): set energy, use with caution, hdcm might become misaligned
u_detune (float): amount of undulator to detune in the unit of keV
'''
#record relevant meta data in the Start document, defined in 90-usersetup.py
xs.external_trig.put(False)
#setup the detector
# TODO do this with configure
if acqtime < 0.001:
acqtime = 0.001
if struck == False:
current_preamp.exp_time.put(acqtime)
else:
sclr1.preset_time.put(acqtime)
xs.settings.acquire_time.put(acqtime)
xs.total_points.put((xnumstep+1)*(ynumstep+1))
xstop = xstart + xnumstep*xstepsize
ystop = ystart + ynumstep*ystepsize
#setup the plan
#outer_product_scan(detectors, *args, pre_run=None, post_run=None)
#outer_product_scan(detectors, motor1, start1, stop1, num1, motor2, start2, stop2, num2, snake2, pre_run=None, post_run=None)
if setenergy is not None:
if u_detune is not None:
# TODO maybe do this with set
energy.detune.put(u_detune)
# TODO fix name shadowing
print('changing energy to', setenergy)
yield from bp.abs_set(energy, setenergy, wait=True)
time.sleep(echange_waittime)
print('waiting time (s)', echange_waittime)
#TO-DO: implement fast shutter control (open)
#TO-DO: implement suspender for all shutters in genral start up script
def finalize_scan(name, doc):
scanrecord.scanning.put(False)
det = [xs]
hf2dxrf_scanplan = outer_product_scan(det, ymotor, ystart, ystop, ynumstep+1, xmotor, xstart, xstop, xnumstep+1, True)
scaninfo = yield from hf2dxrf_scanplan
return scaninfo
def prime():
'''
From : https://github.com/NSLS-II/ophyd/blob/master/ophyd/areadetector/plugins.py#L854
Doesn't work for now
'''
set_and_wait(xs.hdf5.enable, 1)
sigs = OrderedDict([(xs.settings.array_callbacks, 1),
(xs.settings.image_mode, 'Single'),
(xs.settings.trigger_mode, 'Internal'),
# just in case tha acquisition time is set very long...
(xs.settings.acquire_time , 1),
#(xs.settings.acquire_period, 1),
#(xs.settings.acquire, 1),
])
original_vals = {sig: sig.get() for sig in sigs}
RE(bp.count([xs]))
for sig, val in sigs.items():
ttime.sleep(0.1) # abundance of caution
set_and_wait(sig, val)
ttime.sleep(2) # wait for acquisition
for sig, val in reversed(list(original_vals.items())):
ttime.sleep(0.1)
set_and_wait(sig, val)
xmotor = motor1
xmotor.velocity = Signal(name="xmotor_velocity")
ymotor = motor1
ymotor.velocity = Signal(name="ymotor_velocity")
# fast motor
xstart = 0
xnumstep= 1000
xstepsize = .1
# slow motor : dummy values
# ynum will be the number of times to move in slow axis
ystart = 0
ynumstep = 0
ystepsize = .1
# dwell time?
dwell = .01
acqtime = .001
prime_plan = partial(hf2dxrf_test, xstart=xstart, xnumstep=xnumstep, xstepsize=xstepsize,
ystart=ystart, ynumstep=ynumstep, ystepsize=ystepsize,
shutter = False, align = False, xmotor = motor1, ymotor = motor2,
acqtime=acqtime, numrois=1, i0map_show=False, itmap_show=False, record_cryo = False,
dpc = None, e_tomo=None, struck = True, srecord = None,
setenergy=None, u_detune=None, echange_waittime=10,samplename=None)
| {
"repo_name": "NSLS-II-SRX/ipython_ophyd",
"path": "profile_xf05id1/tests/test_priming_old.py",
"copies": "1",
"size": "5070",
"license": "bsd-2-clause",
"hash": -7693613479316920000,
"line_mean": 36.8358208955,
"line_max": 134,
"alpha_frac": 0.63234714,
"autogenerated": false,
"ratio": 3.335526315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4467873455789474,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os import environ
from shlex import quote
from subprocess import Popen, PIPE
from logging import getLogger
from sys import stdout
from threading import Timer
from contextlib import contextmanager
from raptiformica.settings import conf
log = getLogger(__name__)
COMMAND_TIMEOUT = 1200
def raise_failure_factory(message):
"""
Create a function that raises a failure message
:param str message: Message to raise in combination with standard out
:return func raise_failure:
"""
def raise_failure(process_output):
_, _, standard_error = process_output
raise RuntimeError("{}\n{}".format(message, standard_error))
return raise_failure
def log_failure_factory(message):
"""
Create a function that logs a failure message
:param str message: Message to log in combination with standard error
:return func log_failure:
"""
def log_failure(process_output):
"""
Log a failure error message
:param tuple process_output: printable process_output
:return None:
"""
_, _, standard_error = process_output
error_message = standard_error if isinstance(
standard_error, str
) else standard_error.decode()
log.warning("{}\n{}".format(message, error_message))
return log_failure
def log_success_factory(message):
"""
Create a function that logs a success message
:param str message: Message to log in combination with standard out
:return func log_success:
"""
def log_success(process_output):
"""
Log a success message
:param tuple process_output: printable process_output
:return None:
"""
_, standard_out, _ = process_output
log.info("{} {}".format(message, standard_out))
return log_success
def write_real_time_output(process):
"""
Write real time output from the running process
:param obj process: the running process to do
blocking reads on the stdout pipe on.
:return None:
"""
log.debug(
"Doing blocking reads on the stdout pipe "
"of the running process. Will output in real time."
)
for line in iter(process.stdout.readline, b''):
stdout.buffer.write(line)
stdout.flush()
def terminate_and_kill(process, timeout, command):
"""
Terminate and kill a subprocess.Popen process
:param obj process: The object to terminate and kill
:param int timeout: The amount of time to allow
it to run.
:param list | str command: The command as a list or as string (when shell).
I.e. ['/bin/ls', '/root'] or "/bin/ls /root"
:return None:
"""
process.terminate()
process.kill()
raise TimeoutError(
"Subprocess timed out after {} seconds. "
"Command was: {}".format(timeout, command)
)
@contextmanager
def terminate_on_timeout(process, timeout, command):
"""
Run a thread to watch the passed process. If
it is running longer than the allowed amount of
time, send a terminate and kill to the process.
:param obj process: the running process to kill
after the timeout expires.
:param int timeout: The amount of time to allow
it to run.
:param list | str command: The command as a list or as string (when shell).
I.e. ['/bin/ls', '/root'] or "/bin/ls /root"
:yield obj timer: The timer object
"""
timer = Timer(
timeout, terminate_and_kill,
args=[process, timeout, command]
)
timer.start()
yield
timer.cancel()
def execute_process(command, buffered=True, shell=False, timeout=COMMAND_TIMEOUT):
"""
Execute a command locally in the shell and return the exit code, standard out and standard error as a tuple
:param list | str command: The command as a list or as string (when shell).
I.e. ['/bin/ls', '/root'] or "/bin/ls /root"
:param bool buffered: Store output in a variable instead of printing it live
:param bool shell: Run the command as in a shell and treat the command as a string instead of a list
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return tuple (exit code, standard out, standard error):
"""
log.debug("Running command: {}".format(command))
env = dict(**environ)
env['RAPTIFORMICA_CACHE_DIR'] = conf().CACHE_DIR
process = Popen(
command, stdout=PIPE,
universal_newlines=buffered,
stderr=PIPE, shell=shell,
bufsize=-1 if buffered else 0,
env=env
)
with terminate_on_timeout(process, timeout, command):
if not buffered:
write_real_time_output(process)
standard_out, standard_error = process.communicate()
exit_code = process.returncode
return exit_code, standard_out, standard_error
def make_process_output_print_ready(process_output):
"""
Make the process output ready to print in the terminal
as if the process was writing to standard out directly
:return tuple process_output: the raw process_output
:param tuple process_output: printable process_output
"""
def un_escape_newlines(output):
return output if isinstance(output, str) else output.decode('unicode_escape')
exit_code, standard_out, standard_error = process_output
return exit_code, un_escape_newlines(standard_out), un_escape_newlines(standard_error)
def run_command_locally(command, success_callback=lambda ret: ret, failure_callback=lambda ret: ret,
buffered=True, shell=False, timeout=COMMAND_TIMEOUT):
"""
Run a command and return the exit code.
Optionally pass a callbacks that take a tuple of (exit_code, standard out, standard error)
:param list | str command: The command as a list or as string (when shell).
I.e. ['/bin/ls', '/root'] or "/bin/ls /root"
:param func failure_callback: function that takes the process output tuple, runs on failure
:param func success_callback: function that takes the process output tuple, runs on success
:param bool buffered: Store output in a variable instead of printing it live
:param bool shell: Run the command as in a shell and treat the command as a string instead of a list
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return tuple process_output (exit code, standard out, standard error):
"""
process_output = execute_process(
command, buffered=buffered, shell=shell, timeout=timeout
)
exit_code, standard_out, standard_error = process_output
if exit_code != 0:
failure_callback(process_output)
else:
success_callback(process_output)
return exit_code, standard_out, standard_error
def run_command_remotely(command, host, port=22,
success_callback=lambda ret: ret,
failure_callback=lambda ret: ret,
buffered=True, shell=False, timeout=COMMAND_TIMEOUT):
"""
Run a command remotely and return the exit code.
Optionally pass a callbacks that take a tuple of (exit_code, standard out, standard error)
:param list command | str command: The command as a list or string.
E.g. ['/bin/ls', '/root'] or if shell=True '/bin/ls/root'
:param str host: hostname or ip of the remote machine
:param int port: port to use to connect to the remote machine over ssh
:param func failure_callback: function that takes the process output tuple, runs on failure
:param func success_callback: function that takes the process output tuple, runs on success
:param bool buffered: Store output in a variable instead of printing it live
:param bool shell: Run the command as in a shell and treat the command as a string instead of a list
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return tuple process_output (exit code, standard out, standard error):
"""
ssh_command_as_list = ['/usr/bin/env', 'ssh', '-A',
'-o', 'ConnectTimeout=5',
'-o', 'StrictHostKeyChecking=no',
'-o', 'ServerAliveInterval=10',
'-o', 'ServerAliveCountMax=3',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'PasswordAuthentication=no',
'root@{}'.format(host), '-p', str(port)]
if shell:
command = ' '.join(ssh_command_as_list) + ' ' + quote(command)
else:
command = ssh_command_as_list + command
return run_command_locally(
command,
success_callback=success_callback,
failure_callback=failure_callback,
buffered=buffered,
shell=shell,
timeout=timeout
)
def run_command(command, host=None, port=22,
success_callback=lambda ret: ret,
failure_callback=lambda ret: ret,
buffered=True, shell=False,
timeout=COMMAND_TIMEOUT):
"""
Run a command and return the exit code, standard output and standard error output.
If no host is specified, the command will run locally.
If host is specified, it will run over SSH on that host.
:param list command | str command: The command as a list or string.
E.g. ['/bin/ls', '/root'] or if shell=True '/bin/ls/root'
:param str host: hostname or ip of the remote machine, or None for local
:param int port: port to use to connect to the remote machine over ssh
:param func failure_callback: function that takes the process output tuple, runs on failure
:param func success_callback: function that takes the process output tuple, runs on success
:param bool buffered: Store output in a variable instead of printing it live
:param bool shell: Run the command as in a shell and treat the command as a string instead of a list
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return tuple process_output (exit code, standard out, standard error):
:return:
"""
if host:
return run_command_remotely(
command, host=host, port=port,
success_callback=success_callback,
failure_callback=failure_callback,
buffered=buffered, shell=shell,
timeout=timeout
)
else:
return run_command_locally(
command,
success_callback=success_callback,
failure_callback=failure_callback,
buffered=buffered, shell=shell,
timeout=timeout
)
def print_ready_callback_factory(callback):
"""
Wrap a failure or success callback in a function that first
makes the process output printable and pass the newly
transformed process_output into the new function
:param func callback: success or failure callback
:return func print_ready_callback: callback with a printable process_output
"""
def print_ready_callback(process_output):
callback(make_process_output_print_ready(process_output))
return print_ready_callback
def create_in_directory_factory(directory, command_as_string, procedure):
"""
Return a partially filled out proc function which executes "command" as a string in
the provided directory.
:param str directory: Directory to cwd to before running the command_as_string command
:param str command_as_string: The command as string that will be executed as sh -c 'cd directory; command'
:param func procedure: the command to use to build the partial
:return func: partial command with the command filled in
"""
command_as_list = [
'sh', '-c', 'cd {}; {}'.format(
directory, command_as_string
)
]
return partial(procedure, command_as_list)
def run_command_in_directory_factory(directory, command_as_string):
"""
Return a partially filled out run_command function which executes "command" as a string in
the provided directory.
:param str directory: Directory to cwd to before running the command_as_string command
:param str command_as_string: The command as string that will be executed as sh -c 'cd directory; command'
return func partial_run_command: run_command with the command filled in
"""
return create_in_directory_factory(
directory, command_as_string, run_command
)
def run_command_print_ready_in_directory_factory(directory, command_as_string):
"""
Return a partially filled out run_command_print_ready function which executes "command" as a string in
the provided directory.
:param str directory: Directory to cwd to before running the command_as_string command
:param str command_as_string: The command as string that will be executed as sh -c 'cd directory; command'
return func partial_run_command_print_ready: run_command_print_ready with the command filled in
"""
return create_in_directory_factory(
directory, command_as_string, run_command_print_ready
)
def run_command_print_ready(command, host=None, port=22,
success_callback=lambda ret: ret,
failure_callback=lambda ret: ret,
buffered=True, shell=False, timeout=COMMAND_TIMEOUT):
"""
Print ready version of run_command. Un-escapes output so it can be printed.
:param list command | str command: The command as a list or string.
:param str host: hostname or ip of the remote machine, or None for local
:param int port: port to use to connect to the remote machine over ssh
:param func failure_callback: function that takes the process output tuple, runs on failure
:param func success_callback: function that takes the process output tuple, runs on success
:param bool buffered: Store output in a variable instead of printing it live
:param bool shell: Run the command as in a shell and treat the command as a string instead of a list
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return tuple process_output (exit code, standard out, standard error):
"""
return run_command(
command, host=host, port=port,
success_callback=print_ready_callback_factory(success_callback),
failure_callback=print_ready_callback_factory(failure_callback),
buffered=buffered, shell=shell, timeout=timeout
)
def check_nonzero_exit(command):
"""
Return True or False based on whether the command exited nonzero
:param str command: shell command to test for a zero exit code
:return bool exited_zero: True if exited with 0, False if anything else
"""
exit_code, _, _ = run_command(command, shell=True)
return exit_code == 0
def run_critical_command_print_ready(
command, host=None, port=22, buffered=True,
failure_message='Command failed', shell=False,
timeout=COMMAND_TIMEOUT):
"""
A wrapper around run_command_print_ready but with a failure callback specified.
:param list command | str command: The command as a list or string.
:param str host: hostname or ip of the remote machine
:param int port: port to use to connect to the remote machine over ssh
:param str failure_message: message to include in the raised failure
if the exit code is nonzero
:param bool buffered: Store output in a variable instead of printing it live
:param bool shell: Run the command as in a shell and treat the command as a string instead of a list
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return tuple process_output (exit code, standard out, standard error):
"""
return run_command_print_ready(
command,
host=host, port=port,
failure_callback=raise_failure_factory(
failure_message
),
buffered=buffered,
shell=shell,
timeout=timeout
)
def run_critical_unbuffered_command_print_ready(
command, host=None, port=22,
failure_message='Command failed', shell=False, timeout=COMMAND_TIMEOUT):
"""
Wrapper around run_critical_command_remotely_print_ready but with output to
standard out instead of capturing it.
:param list command | str command: The command as a list or string.
:param str host: hostname or ip of the remote machine
:param int port: port to use to connect to the remote machine over ssh
:param str failure_message: message to include in the raised failure
if the exit code is nonzero
:param bool shell: Run the command as in a shell and treat the command as a string instead of a list
:param int timeout: The amount of time the command is allowed to run before terminating it.
:return tuple process_output (exit code, standard out, standard error):
"""
return run_critical_command_print_ready(
command, host=host, port=port,
failure_message=failure_message,
buffered=False, shell=shell,
timeout=timeout
)
def run_multiple_labeled_commands(distro_command_iterable, host=None, port=22,
failure_message='Command failed for label {}'):
"""
Takes a iterable of iterables with label and command_as_string and runs the
command remotely and unbuffered, raising an error if it fails.
:param iterable[iterable] distro_command_iterable: iterable of pairs containing
label and a command as string
:param str host: hostname or ip of the remote machine
:param int port: port to use to connect to the remote machine over ssh
:param str failure_message: message to include in the raised failure
if the exit code is nonzero. Should contain a {} to format the label.
:return None
"""
for label, command_as_string in distro_command_iterable:
run_critical_unbuffered_command_print_ready(
command_as_string, host=host, port=port,
failure_message=failure_message.format(label),
shell=True
)
| {
"repo_name": "vdloo/raptiformica",
"path": "raptiformica/shell/execute.py",
"copies": "1",
"size": "18161",
"license": "mit",
"hash": -4824745419633049000,
"line_mean": 41.2348837209,
"line_max": 111,
"alpha_frac": 0.6745773911,
"autogenerated": false,
"ratio": 4.319933396764986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012669943040038933,
"num_lines": 430
} |
from functools import partial
from os import environ
from invocations import travis
from invocations.checks import blacken
from invocations.docs import docs, www, sites, watch_docs
from invocations.pytest import test, integration as integration_, coverage
from invocations.packaging import release
from invocations.util import tmpdir
from invoke import Collection, task
from invoke.util import LOG_FORMAT
# Neuter the normal release.publish task to prevent accidents, then reinstate
# it as a custom task that does dual fabric-xxx and fabric2-xxx releases.
# TODO: tweak this once release.all_ actually works right...sigh
# TODO: if possible, try phrasing as a custom build that builds x2, and then
# convince the vanilla publish() to use that custom build instead of its local
# build?
# NOTE: this skips the dual_wheels, alt_python bits the upstream task has,
# which are at the moment purely for Invoke's sake (as it must publish explicit
# py2 vs py3 wheels due to some vendored dependencies)
@task
def publish(
c,
sdist=True,
wheel=False,
index=None,
sign=False,
dry_run=False,
directory=None,
check_desc=False,
):
# TODO: better pattern for merging kwargs + config
config = c.config.get("packaging", {})
index = config.get("index", index)
sign = config.get("sign", sign)
check_desc = config.get("check_desc", check_desc)
# Initial sanity check, if needed. Will die usefully.
# TODO: this could also get factored out harder in invocations. shrug. it's
# like 3 lines total...
if check_desc:
c.run("python setup.py check -r -s")
with tmpdir(skip_cleanup=dry_run, explicit=directory) as directory:
# Doesn't reeeeally need to be a partial, but if we start having to add
# a kwarg to one call or the other, it's nice
builder = partial(
release.build, c, sdist=sdist, wheel=wheel, directory=directory
)
# Vanilla build
builder()
# Fabric 2 build
environ["PACKAGE_AS_FABRIC2"] = "yes"
builder()
# Upload
release.upload(c, directory, index, sign, dry_run)
# TODO: as usual, this just wants a good pattern for "that other task, with a
# tweaked default arg value"
@task
def integration(
c,
opts=None,
pty=True,
x=False,
k=None,
verbose=True,
color=True,
capture="no",
module=None,
):
return integration_(c, opts, pty, x, k, verbose, color, capture, module)
# Better than nothing, since we haven't solved "pretend I have some other
# task's signature" yet...
publish.__doc__ = release.publish.__doc__
my_release = Collection(
"release", release.build, release.status, publish, release.prepare
)
ns = Collection(
blacken,
coverage,
docs,
integration,
my_release,
sites,
test,
travis,
watch_docs,
www,
)
ns.configure(
{
"tests": {
# TODO: have pytest tasks honor these?
"package": "fabric",
"logformat": LOG_FORMAT,
},
"packaging": {
# NOTE: this is currently for identifying the source directory.
# Should it get used for actual releasing, needs changing.
"package": "fabric",
"sign": True,
"wheel": True,
"check_desc": True,
"changelog_file": "sites/www/changelog.rst",
},
# TODO: perhaps move this into a tertiary, non automatically loaded,
# conf file so that both this & the code under test can reference it?
# Meh.
"travis": {
"sudo": {"user": "sudouser", "password": "mypass"},
"black": {"version": "18.6b4"},
},
}
)
| {
"repo_name": "fabric/fabric",
"path": "tasks.py",
"copies": "1",
"size": "3716",
"license": "bsd-2-clause",
"hash": -589021143419219700,
"line_mean": 29.9666666667,
"line_max": 79,
"alpha_frac": 0.6412809473,
"autogenerated": false,
"ratio": 3.8388429752066116,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4980123922506612,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os import getcwd
import pdb
import sys
from warnings import warn
from nose.plugins import Plugin
from noseprogressive.runner import ProgressiveRunner
from noseprogressive.tracebacks import DEFAULT_EDITOR_SHORTCUT_TEMPLATE
from noseprogressive.wrapping import cmdloop, set_trace, StreamWrapper
class ProgressivePlugin(Plugin):
"""A nose plugin which has a progress bar and formats tracebacks for humans"""
name = 'progressive'
_totalTests = 0
score = 10000 # Grab stdout and stderr before the capture plugin.
def __init__(self, *args, **kwargs):
super(ProgressivePlugin, self).__init__(*args, **kwargs)
# Same wrapping pattern as the built-in capture plugin. The lists
# shouldn't be necessary, but they don't cost much, and I have to
# wonder why capture uses them.
self._stderr, self._stdout, self._set_trace, self._cmdloop = \
[], [], [], []
def begin(self):
"""Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger.
"""
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd()
def finalize(self, result):
"""Put monkeypatches back as we found them."""
sys.stderr = self._stderr.pop()
sys.stdout = self._stdout.pop()
pdb.set_trace = self._set_trace.pop()
pdb.Pdb.cmdloop = self._cmdloop.pop()
def options(self, parser, env):
super(ProgressivePlugin, self).options(parser, env)
parser.add_option('--progressive-editor',
type='string',
dest='editor',
default=env.get('NOSE_PROGRESSIVE_EDITOR',
env.get('EDITOR', 'vi')),
help='The editor to use for the shortcuts in '
'tracebacks. Defaults to the value of $EDITOR '
'and then "vi". [NOSE_PROGRESSIVE_EDITOR]')
parser.add_option('--progressive-abs',
action='store_true',
dest='absolute_paths',
default=env.get('NOSE_PROGRESSIVE_ABSOLUTE_PATHS', False),
help='Display paths in traceback as absolute, '
'rather than relative to the current working '
'directory. [NOSE_PROGRESSIVE_ABSOLUTE_PATHS]')
parser.add_option('--progressive-advisories',
action='store_true',
dest='show_advisories',
default=env.get('NOSE_PROGRESSIVE_ADVISORIES', False),
help='Show skips and deprecation exceptions in '
'addition to failures and errors. '
'[NOSE_PROGRESSIVE_ADVISORIES]')
parser.add_option('--progressive-with-styling',
action='store_true',
dest='with_styling',
default=env.get('NOSE_PROGRESSIVE_WITH_STYLING', False),
help='nose-progressive automatically omits bold and '
'color formatting when its output is directed '
'to a non-terminal. Specifying '
'--progressive-with-styling forces such '
'styling to be output regardless. '
'[NOSE_PROGRESSIVE_WITH_STYLING]')
parser.add_option('--progressive-with-bar',
action='store_true',
dest='with_bar',
default=env.get('NOSE_PROGRESSIVE_WITH_BAR', False),
help='nose-progressive automatically omits the '
'progress bar when its output is directed to a '
'non-terminal. Specifying '
'--progressive-with-bar forces the bar to be '
'output regardless. This option implies '
'--progressive-with-styling. '
'[NOSE_PROGRESSIVE_WITH_BAR]')
parser.add_option('--progressive-function-color',
type='int',
dest='function_color',
default=env.get('NOSE_PROGRESSIVE_FUNCTION_COLOR', 12),
help='Color of function names in tracebacks. An '
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_FUNCTION_COLOR]')
parser.add_option('--progressive-dim-color',
type='int',
dest='dim_color',
default=env.get('NOSE_PROGRESSIVE_DIM_COLOR', 8),
help='Color of de-emphasized text (like editor '
'shortcuts) in tracebacks. An ANSI color '
'expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_DIM_COLOR]')
parser.add_option('--progressive-bar-filled-color',
type='int',
dest='bar_filled_color',
default=env.get('NOSE_PROGRESSIVE_BAR_FILLED_COLOR', 8),
help="Color of the progress bar's filled portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_FILLED_COLOR]')
parser.add_option('--progressive-bar-empty-color',
type='int',
dest='bar_empty_color',
default=env.get('NOSE_PROGRESSIVE_BAR_EMPTY_COLOR', 7),
help="Color of the progress bar's empty portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_EMPTY_COLOR]')
parser.add_option('--progressive-editor-shortcut-template',
type='string',
dest='editor_shortcut_template',
default=env.get(
'NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE',
DEFAULT_EDITOR_SHORTCUT_TEMPLATE),
help='A str.format() template for the non-code lines'
' of the traceback. '
'[NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE]')
def configure(self, options, conf):
"""Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''.
"""
super(ProgressivePlugin, self).configure(options, conf)
if (getattr(options, 'verbosity', 0) > 1 and
getattr(options, 'enable_plugin_id', False)):
# TODO: Can we forcibly disable the ID plugin?
print ('Using --with-id and --verbosity=2 or higher with '
'nose-progressive causes visualization errors. Remove one '
'or the other to avoid a mess.')
if options.with_bar:
options.with_styling = True
def prepareTestLoader(self, loader):
"""Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkeypatch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count.
"""
def capture_suite(orig_method, *args, **kwargs):
"""Intercept calls to the loader before they get lazy.
Re-execute them to grab a copy of the possibly lazy suite, and
count the tests therein.
"""
self._totalTests += orig_method(*args, **kwargs).countTestCases()
# Clear out the loader's cache. Otherwise, it never finds any tests
# for the actual test run:
loader._visitedPaths = set()
return orig_method(*args, **kwargs)
# TODO: If there's ever a practical need, also patch loader.suiteClass
# or even TestProgram.createTests. createTests seems to be main top-
# level caller of loader methods, and nose.core.collector() (which
# isn't even called in nose) is an alternate one.
if hasattr(loader, 'loadTestsFromNames'):
loader.loadTestsFromNames = partial(capture_suite,
loader.loadTestsFromNames)
def prepareTestRunner(self, runner):
"""Replace TextTestRunner with something that prints fewer dots."""
return ProgressiveRunner(self._cwd,
self._totalTests,
runner.stream,
verbosity=self.conf.verbosity,
config=self.conf) # So we don't get a default
# NoPlugins manager
def prepareTestResult(self, result):
"""Hang onto the progress bar so the StreamWrappers can grab it."""
self.bar = result.bar
| {
"repo_name": "veo-labs/nose-progressive",
"path": "noseprogressive/plugin.py",
"copies": "4",
"size": "10478",
"license": "mit",
"hash": -8494470440132122000,
"line_mean": 49.375,
"line_max": 84,
"alpha_frac": 0.5355029586,
"autogenerated": false,
"ratio": 4.804218248509858,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7339721207109858,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os import getcwd
import pdb
import sys
from nose.plugins import Plugin
from noseprogressive.runner import ProgressiveRunner
from noseprogressive.wrapping import cmdloop, set_trace, StreamWrapper
class ProgressivePlugin(Plugin):
"""A nose plugin which has a progress bar and formats tracebacks for humans"""
name = 'progressive'
_totalTests = 0
score = 10000 # Grab stdout and stderr before the capture plugin.
def __init__(self, *args, **kwargs):
super(ProgressivePlugin, self).__init__(*args, **kwargs)
# Same wrapping pattern as the built-in capture plugin. The lists
# shouldn't be necessary, but they don't cost much, and I have to
# wonder why capture uses them.
self._stderr, self._stdout, self._set_trace, self._cmdloop = \
[], [], [], []
def begin(self):
"""Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger.
"""
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd()
def finalize(self, result):
"""Put monkeypatches back as we found them."""
sys.stderr = self._stderr.pop()
sys.stdout = self._stdout.pop()
pdb.set_trace = self._set_trace.pop()
pdb.Pdb.cmdloop = self._cmdloop.pop()
def options(self, parser, env):
super(ProgressivePlugin, self).options(parser, env)
parser.add_option('--progressive-editor',
type='string',
dest='editor',
default=env.get('NOSE_PROGRESSIVE_EDITOR',
env.get('EDITOR', 'vi')),
help='The editor to use for the shortcuts in '
'tracebacks. Defaults to the value of $EDITOR '
'and then "vi". [NOSE_PROGRESSIVE_EDITOR]')
parser.add_option('--progressive-abs',
action='store_true',
dest='absolute_paths',
default=env.get('NOSE_PROGRESSIVE_ABSOLUTE_PATHS', False),
help='Display paths in traceback as absolute, '
'rather than relative to the current working '
'directory. [NOSE_PROGRESSIVE_ABSOLUTE_PATHS]')
parser.add_option('--progressive-advisories',
action='store_true',
dest='show_advisories',
default=env.get('NOSE_PROGRESSIVE_ADVISORIES', False),
help='Show skips and deprecation exceptions in '
'addition to failures and errors. '
'[NOSE_PROGRESSIVE_ADVISORIES]')
parser.add_option('--progressive-with-styling',
action='store_true',
dest='with_styling',
default=env.get('NOSE_PROGRESSIVE_WITH_STYLING', False),
help='nose-progressive automatically omits bold and '
'color formatting when its output is directed '
'to a non-terminal. Specifying '
'--progressive-with-styling forces such '
'styling to be output regardless. '
'[NOSE_PROGRESSIVE_WITH_STYLING]')
parser.add_option('--progressive-with-bar',
action='store_true',
dest='with_bar',
default=env.get('NOSE_PROGRESSIVE_WITH_BAR', False),
help='nose-progressive automatically omits the '
'progress bar when its output is directed to a '
'non-terminal. Specifying '
'--progressive-with-bar forces the bar to be '
'output regardless. This option implies '
'--progressive-with-styling. '
'[NOSE_PROGRESSIVE_WITH_BAR]')
parser.add_option('--progressive-function-color',
type='int',
dest='function_color',
default=env.get('NOSE_PROGRESSIVE_FUNCTION_COLOR', 12),
help='Color of function names in tracebacks. An '
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_FUNCTION_COLOR]')
parser.add_option('--progressive-dim-color',
type='int',
dest='dim_color',
default=env.get('NOSE_PROGRESSIVE_DIM_COLOR', 8),
help='Color of de-emphasized text (like editor '
'shortcuts) in tracebacks. An ANSI color '
'expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_DIM_COLOR]')
parser.add_option('--progressive-bar-filled-color',
type='int',
dest='bar_filled_color',
default=env.get('NOSE_PROGRESSIVE_BAR_FILLED_COLOR', 8),
help="Color of the progress bar's filled portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_FILLED_COLOR]')
parser.add_option('--progressive-bar-empty-color',
type='int',
dest='bar_empty_color',
default=env.get('NOSE_PROGRESSIVE_BAR_EMPTY_COLOR', 7),
help="Color of the progress bar's empty portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_EMPTY_COLOR]')
def configure(self, options, conf):
"""Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''.
"""
super(ProgressivePlugin, self).configure(options, conf)
if options.with_bar:
options.with_styling = True
def prepareTestLoader(self, loader):
"""Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkeypatch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count.
"""
def capture_suite(orig_method, *args, **kwargs):
"""Intercept calls to the loader before they get lazy.
Re-execute them to grab a copy of the possibly lazy suite, and
count the tests therein.
"""
self._totalTests += orig_method(*args, **kwargs).countTestCases()
return orig_method(*args, **kwargs)
# TODO: If there's ever a practical need, also patch loader.suiteClass
# or even TestProgram.createTests. createTests seems to be main top-
# level caller of loader methods, and nose.core.collector() (which
# isn't even called in nose) is an alternate one.
if hasattr(loader, 'loadTestsFromNames'):
loader.loadTestsFromNames = partial(capture_suite,
loader.loadTestsFromNames)
return loader
def prepareTestRunner(self, runner):
"""Replace TextTestRunner with something that prints fewer dots."""
return ProgressiveRunner(self._cwd,
self._totalTests,
runner.stream,
verbosity=self.conf.verbosity,
config=self.conf) # So we don't get a default
# NoPlugins manager
def prepareTestResult(self, result):
"""Hang onto the progress bar so the StreamWrappers can grab it."""
self.bar = result.bar
| {
"repo_name": "enkiv2/popcorn_maker",
"path": "vendor-local/lib/python/noseprogressive/plugin.py",
"copies": "3",
"size": "9303",
"license": "bsd-3-clause",
"hash": 468756564662518700,
"line_mean": 48.4840425532,
"line_max": 84,
"alpha_frac": 0.5354186821,
"autogenerated": false,
"ratio": 4.80030959752322,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6835728279623219,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os import makedirs
from os.path import dirname, join as pjoin, realpath
import numpy as np
from pyradigm import (ClassificationDataset as ClfDataset, MultiDatasetClassify,
MultiDatasetRegress, RegressionDataset as RegrDataset)
from pyradigm.utils import (dataset_with_new_features_same_everything_else,
make_random_ClfDataset, make_random_RegrDataset,
make_random_dataset)
test_dir = dirname(__file__)
out_dir = realpath(pjoin(test_dir, 'tmp'))
makedirs(out_dir, exist_ok=True)
min_num_modalities = 3
max_num_modalities = 10
max_feat_dim = 10
def make_fully_separable_classes(max_class_size=10, max_dim=22):
from sklearn.datasets import make_blobs
random_center = np.random.rand(max_dim)
cluster_std = 1.5
centers = [random_center, random_center + cluster_std * 6]
blobs_X, blobs_y = make_blobs(n_samples=max_class_size, n_features=max_dim,
centers=centers, cluster_std=cluster_std)
unique_labels = np.unique(blobs_y)
class_ids = {lbl: str(lbl) for lbl in unique_labels}
new_ds = ClfDataset()
for index, row in enumerate(blobs_X):
new_ds.add_samplet('sub{}'.format(index),
row, class_ids[blobs_y[index]])
return new_ds
num_modalities = np.random.randint(min_num_modalities, max_num_modalities)
def test_holdout():
""""""
for multi_class, ds_class in zip((MultiDatasetClassify, MultiDatasetRegress),
(ClfDataset, RegrDataset)):
# ds = make_fully_separable_classes()
ds = make_random_dataset(5, 20, 50, 10, stratified=False,
class_type=ds_class)
multi = multi_class()
for ii in range(num_modalities):
multi.append(dataset_with_new_features_same_everything_else(ds, max_feat_dim),
identifier=ii)
# for trn, tst in multi.holdout(num_rep=5, return_ids_only=True):
# print('train: {}\ntest: {}\n'.format(trn, tst))
print(multi)
return_ids_only = False
for trn, tst in multi.holdout(num_rep=5, train_perc=0.51,
return_ids_only=return_ids_only):
if return_ids_only:
print('train: {}\ttest: {}\n'.format(len(trn), len(tst)))
else:
for aa, bb in zip(trn, tst):
if aa.num_features != bb.num_features:
raise ValueError(
'train and test dimensionality do not match!')
print('train: {}\ntest : {}\n'.format(aa.shape, bb.shape))
print()
def test_init_list_of_paths():
"""main use case for neuropredict"""
for multi_class, ds_class in zip((MultiDatasetClassify, MultiDatasetRegress),
(ClfDataset, RegrDataset)):
ds = make_random_dataset(5, 20, 50, 10, stratified=False,
class_type=ds_class)
paths = list()
for ii in range(num_modalities):
new_ds = dataset_with_new_features_same_everything_else(ds, max_feat_dim)
path = pjoin(out_dir, 'ds{}.pkl'.format(ii))
new_ds.save(path)
paths.append(path)
try:
multi = multi_class(dataset_spec=paths)
except:
raise ValueError('MultiDataset constructor via list of paths does not '
'work!')
def test_attributes():
"""basic tests to ensure attrs are handled properly in MultiDataset"""
random_clf_ds = partial(make_random_ClfDataset, 5, 20, 50, 10, stratified=False)
random_regr_ds = partial(make_random_RegrDataset, 20, 100, 50)
for multi_cls, ds_gen in zip((MultiDatasetClassify, MultiDatasetRegress),
(random_clf_ds, random_regr_ds)):
ds = ds_gen(attr_names=('age', 'gender'),
attr_types=('age', 'gender'))
multi_ds = multi_cls()
multi_ds.append(ds, 0)
for ii in range(num_modalities - 1):
new_ds = dataset_with_new_features_same_everything_else(ds, max_feat_dim)
multi_ds.append(new_ds, ii + 1)
if multi_ds.common_attr != ds.attr:
raise ValueError('Attributes in indiv Dataset and MultiDataset differ!')
print('!! --- write tests for ds.dataset_attr and mutli_ds.meta')
| {
"repo_name": "raamana/pyradigm",
"path": "pyradigm/tests/test_MultiDataset.py",
"copies": "1",
"size": "4507",
"license": "mit",
"hash": -7255523337245605000,
"line_mean": 35.9426229508,
"line_max": 90,
"alpha_frac": 0.5810960728,
"autogenerated": false,
"ratio": 3.6642276422764226,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47453237150764227,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os import path
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.utils.translation import gettext_lazy as _
from django.views import defaults, i18n, static
from . import views
base_dir = path.dirname(path.abspath(__file__))
media_dir = path.join(base_dir, 'media')
locale_dir = path.join(base_dir, 'locale')
js_info_dict = {
'domain': 'djangojs',
'packages': ('view_tests',),
}
js_info_dict_english_translation = {
'domain': 'djangojs',
'packages': ('view_tests.app0',),
}
js_info_dict_multi_packages1 = {
'domain': 'djangojs',
'packages': ('view_tests.app1', 'view_tests.app2'),
}
js_info_dict_multi_packages2 = {
'domain': 'djangojs',
'packages': ('view_tests.app3', 'view_tests.app4'),
}
js_info_dict_admin = {
'domain': 'djangojs',
'packages': ('django.contrib.admin', 'view_tests'),
}
js_info_dict_app1 = {
'domain': 'djangojs',
'packages': ('view_tests.app1',),
}
js_info_dict_app2 = {
'domain': 'djangojs',
'packages': ('view_tests.app2',),
}
js_info_dict_app5 = {
'domain': 'djangojs',
'packages': ('view_tests.app5',),
}
urlpatterns = [
url(r'^$', views.index_page),
# Default views
url(r'^nonexistent_url/', partial(defaults.page_not_found, exception=None)),
url(r'^server_error/', defaults.server_error),
# a view that raises an exception for the debug view
url(r'raises/$', views.raises),
url(r'raises400/$', views.raises400),
url(r'raises403/$', views.raises403),
url(r'raises404/$', views.raises404),
url(r'raises500/$', views.raises500),
url(r'technical404/$', views.technical404, name="my404"),
url(r'classbased404/$', views.Http404View.as_view()),
# i18n views
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^jsi18n/$', i18n.JavaScriptCatalog.as_view(packages=['view_tests'])),
url(r'^jsi18n/app1/$', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app1'])),
url(r'^jsi18n/app2/$', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app2'])),
url(r'^jsi18n/app5/$', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app5'])),
url(r'^jsi18n_english_translation/$', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app0'])),
url(r'^jsi18n_multi_packages1/$',
i18n.JavaScriptCatalog.as_view(packages=['view_tests.app1', 'view_tests.app2'])),
url(r'^jsi18n_multi_packages2/$',
i18n.JavaScriptCatalog.as_view(packages=['view_tests.app3', 'view_tests.app4'])),
url(r'^jsi18n_admin/$',
i18n.JavaScriptCatalog.as_view(packages=['django.contrib.admin', 'view_tests'])),
url(r'^jsi18n_template/$', views.jsi18n),
url(r'^jsi18n_multi_catalogs/$', views.jsi18n_multi_catalogs),
url(r'^jsoni18n/$', i18n.JSONCatalog.as_view(packages=['view_tests'])),
# Static views
url(r'^site_media/(?P<path>.*)$', static.serve, {'document_root': media_dir, 'show_indexes': True}),
]
urlpatterns += i18n_patterns(
url(_(r'^translated/$'), views.index_page, name='i18n_prefixed'),
)
urlpatterns += [
url(r'template_exception/$', views.template_exception, name='template_exception'),
url(
r'^raises_template_does_not_exist/(?P<path>.+)$',
views.raises_template_does_not_exist,
name='raises_template_does_not_exist'
),
url(r'^render_no_template/$', views.render_no_template, name='render_no_template'),
url(r'^test-setlang/(?P<parameter>[^/]+)/$', views.with_parameter, name='with_parameter'),
]
| {
"repo_name": "camilonova/django",
"path": "tests/view_tests/urls.py",
"copies": "7",
"size": "3556",
"license": "bsd-3-clause",
"hash": 1194942756119306500,
"line_mean": 32.2336448598,
"line_max": 104,
"alpha_frac": 0.6507311586,
"autogenerated": false,
"ratio": 3.03154305200341,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.718227421060341,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os import path
from flask import abort, current_app
from notifications_utils.formatters import strip_whitespace
from notifications_utils.recipients import RecipientCSV
from notifications_utils.timezones import utc_string_to_aware_gmt_datetime
from werkzeug.utils import cached_property
from app.models import JSONModel, ModelList
from app.models.job import PaginatedJobsAndScheduledJobs
from app.notify_client.contact_list_api_client import contact_list_api_client
from app.s3_client.s3_csv_client import (
get_csv_metadata,
s3download,
s3upload,
set_metadata_on_csv_upload,
)
from app.utils.templates import get_sample_template
class ContactList(JSONModel):
ALLOWED_PROPERTIES = {
'id',
'created_by',
'has_jobs',
'recent_job_count',
'service_id',
'original_file_name',
'row_count',
'template_type',
}
upload_type = 'contact_list'
@classmethod
def from_id(cls, contact_list_id, *, service_id):
return cls(contact_list_api_client.get_contact_list(
service_id=service_id,
contact_list_id=contact_list_id,
))
@staticmethod
def get_bucket_name():
return current_app.config['CONTACT_LIST_UPLOAD_BUCKET_NAME']
@staticmethod
def upload(service_id, file_dict):
return s3upload(
service_id,
file_dict,
current_app.config['AWS_REGION'],
bucket=ContactList.get_bucket_name(),
)
@staticmethod
def download(service_id, upload_id):
return strip_whitespace(s3download(
service_id,
upload_id,
bucket=ContactList.get_bucket_name(),
))
@staticmethod
def set_metadata(service_id, upload_id, **kwargs):
return set_metadata_on_csv_upload(
service_id,
upload_id,
bucket=ContactList.get_bucket_name(),
**kwargs,
)
@staticmethod
def get_metadata(service_id, upload_id):
return get_csv_metadata(
service_id,
upload_id,
bucket=ContactList.get_bucket_name(),
)
def copy_to_uploads(self):
metadata = self.get_metadata(self.service_id, self.id)
new_upload_id = s3upload(
self.service_id,
{'data': self.contents},
current_app.config['AWS_REGION'],
)
set_metadata_on_csv_upload(
self.service_id,
new_upload_id,
**metadata,
)
return new_upload_id
@classmethod
def create(cls, service_id, upload_id):
metadata = cls.get_metadata(service_id, upload_id)
if not metadata.get('valid'):
abort(403)
return cls(contact_list_api_client.create_contact_list(
service_id=service_id,
upload_id=upload_id,
original_file_name=metadata['original_file_name'],
row_count=int(metadata['row_count']),
template_type=metadata['template_type'],
))
def delete(self):
contact_list_api_client.delete_contact_list(
service_id=self.service_id,
contact_list_id=self.id,
)
@property
def created_at(self):
return utc_string_to_aware_gmt_datetime(self._dict['created_at'])
@property
def contents(self):
return self.download(self.service_id, self.id)
@cached_property
def recipients(self):
return RecipientCSV(
self.contents,
template=get_sample_template(self.template_type),
allow_international_sms=True,
max_initial_rows_shown=50,
)
@property
def saved_file_name(self):
file_name, extention = path.splitext(self.original_file_name)
return f'{file_name}.csv'
def get_jobs(self, *, page, limit_days=None):
return PaginatedJobsAndScheduledJobs(
self.service_id,
contact_list_id=self.id,
page=page,
limit_days=limit_days,
)
class ContactLists(ModelList):
client_method = contact_list_api_client.get_contact_lists
model = ContactList
sort_function = partial(
sorted,
key=lambda item: item['created_at'],
reverse=True,
)
def __init__(self, service_id, template_type=None):
super().__init__(service_id)
self.items = self.sort_function([
item for item in self.items
if template_type in {item['template_type'], None}
])
class ContactListsAlphabetical(ContactLists):
sort_function = partial(
sorted,
key=lambda item: item['original_file_name'].lower(),
)
| {
"repo_name": "alphagov/notifications-admin",
"path": "app/models/contact_list.py",
"copies": "1",
"size": "4759",
"license": "mit",
"hash": -6741668659439992000,
"line_mean": 26.6686046512,
"line_max": 77,
"alpha_frac": 0.6024374869,
"autogenerated": false,
"ratio": 3.8691056910569106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.497154317795691,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os import path
class LinuxCpuTemperatureReader():
files = [
'/sys/devices/LNXSYSTM:00/LNXTHERM:00/LNXTHERM:01/thermal_zone/temp',
'/sys/bus/acpi/devices/LNXTHERM:00/thermal_zone/temp',
'/proc/acpi/thermal_zone/THM0/temperature',
'/proc/acpi/thermal_zone/THRM/temperature',
'/proc/acpi/thermal_zone/THR1/temperature'
]
@classmethod
def get_reader(cls):
readers = {
cls.files[0]: cls.reader1,
cls.files[1]: cls.reader1,
cls.files[2]: cls.reader2,
cls.files[3]: cls.reader2,
cls.files[4]: cls.reader2
}
for file in cls.files:
if path.exists(file):
reader = readers.get(file)
if reader:
return reader(file)
@classmethod
def reader1(cls, file):
def reader(file):
temperature = open(file).read().strip()
temperature = int(temperature) // 1000
return temperature
return partial(reader, file)
@classmethod
def reader2(cls, file):
def reader(file):
temperature = open(file).read().strip()
temperature = temperature.lstrip('temperature :').rstrip(' C')
return int(temperature)
return partial(reader, file)
class WindowsCpuTemperatureReader():
@classmethod
def get_reader(cls):
import wmi
import pythoncom
def temperature_reader():
pythoncom.CoInitialize()
w = wmi.WMI(namespace='root\\wmi')
temperature = w.MSAcpi_ThermalZoneTemperature()[0]
temperature = int(temperature.CurrentTemperature / 10.0 - 273.15)
return temperature
return temperature_reader
__all__ = ['LinuxCpuTemperatureReader', 'WindowsCpuTemperatureReader']
| {
"repo_name": "uzumaxy/pyspectator",
"path": "pyspectator/temperature_reader.py",
"copies": "1",
"size": "1884",
"license": "bsd-3-clause",
"hash": -6962041354935885000,
"line_mean": 29.3870967742,
"line_max": 77,
"alpha_frac": 0.5891719745,
"autogenerated": false,
"ratio": 3.966315789473684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 62
} |
from functools import partial
from os.path import join, expanduser
from unittest.mock import MagicMock
import uuid
from genty import genty, genty_dataset, genty_args
from app.subcommands.deploy_subcommand import DeploySubcommand
from app.util.network import Network
from test.framework.base_unit_test_case import BaseUnitTestCase
@genty
class TestDeploySubcommand(BaseUnitTestCase):
def setUp(self):
super().setUp()
self.patch('app.subcommands.deploy_subcommand.fs.tar_directory')
def test_binaries_tar_raises_exception_if_running_from_source(self):
deploy_subcommand = DeploySubcommand()
with self.assertRaisesRegex(SystemExit, '1'):
deploy_subcommand._binaries_tar('python -m app deploy', '~/.clusterrunner/dist')
def test_binaries_doesnt_raise_exception_if_running_from_bin(self):
self.patch('os.path.isfile').return_value = True
deploy_subcommand = DeploySubcommand()
deploy_subcommand._binaries_tar('clusterrunner', '~/.clusterrunner/dist')
def test_deploy_binaries_and_conf_deploys_both_conf_and_binary_for_remote_host(self):
mock_DeployTarget = self.patch('app.subcommands.deploy_subcommand.DeployTarget')
mock_DeployTarget_instance = mock_DeployTarget.return_value
deploy_subcommand = DeploySubcommand()
deploy_subcommand._deploy_binaries_and_conf(
'remote_host', 'username', 'exec', '/path/to/exec', '/path/to/conf')
self.assertTrue(mock_DeployTarget_instance.deploy_binary.called)
self.assertTrue(mock_DeployTarget_instance.deploy_conf.called)
@genty_dataset(
# expect to deploy the binary but not the conf when the current executable path is not the same
# as the target executable path but the current conf path is the same as the target conf path
same_conf_path_different_exe_path=genty_args(
current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner2'),
in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner.conf'),
expect_deploy_conf=False,
expect_deploy_binary=True,
),
# expect not to deploy the binary or the conf when the current executable path is the same
# as the target executable path and the current conf path is the same as the target conf path
same_conf_path_same_exe_path=genty_args(
current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner'),
in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner.conf'),
expect_deploy_conf=False,
expect_deploy_binary=False,
),
# expect to deploy the conf but not the binary when the current conf path is not the same
# as the target conf path but the current binary path is the same as the target binary path
different_conf_path_same_exe_path=genty_args(
current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner'),
in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner2.conf'),
expect_deploy_conf=True,
expect_deploy_binary=False,
),
# expect to deploy the binary and the conf when the current executable path is not the same
# as the target executable path and the current conf path is not the same as the target conf path
different_conf_path_different_exe_path=genty_args(
current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner2'),
in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner2.conf'),
expect_deploy_conf=True,
expect_deploy_binary=True,
),
)
def test_deploy_binaries_and_conf_behaves_properly_if_conf_or_binary_is_in_use_on_localhost(
self,
current_executable,
in_use_conf_path,
expect_deploy_conf,
expect_deploy_binary,
):
mock_DeployTarget = self.patch('app.subcommands.deploy_subcommand.DeployTarget')
mock_DeployTarget_instance = mock_DeployTarget.return_value
deploy_subcommand = DeploySubcommand()
deploy_subcommand._deploy_binaries_and_conf(
'localhost',
'username',
current_executable,
join(expanduser('~'), '.clusterrunner', 'clusterrunner.tgz'),
in_use_conf_path
)
self.assertEqual(expect_deploy_binary, mock_DeployTarget_instance.deploy_binary.called)
self.assertEqual(expect_deploy_conf, mock_DeployTarget_instance.deploy_conf.called)
def test_non_registered_slaves_returns_empty_list_if_all_registered(self):
registered_hosts = ['host_1', 'host_2']
slaves_to_validate = ['host_1', 'host_2']
def get_host_id(*args, **kwargs):
if args[0] == 'host_1':
return 'host_id_1'
elif args[0] == 'host_2':
return 'host_id_2'
else:
return 'blah'
old_host_id = Network.get_host_id
Network.get_host_id = get_host_id
deploy_subcommand = DeploySubcommand()
non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate)
Network.get_host_id = old_host_id
self.assertEquals(0, len(non_registered))
def test_non_registered_slaves_returns_non_registered_slaves(self):
registered_hosts = ['host_1', 'host_3']
slaves_to_validate = ['host_1', 'host_2', 'host_3', 'host_4']
def get_host_id(*args, **kwargs):
if args[0] == 'host_1':
return 'host_id_1'
elif args[0] == 'host_2':
return 'host_id_2'
elif args[0] == 'host_3':
return 'host_id_3'
elif args[0] == 'host_4':
return 'host_id_4'
else:
return 'blah'
self.patch('app.util.network.Network.get_host_id', new=get_host_id)
deploy_subcommand = DeploySubcommand()
non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate)
self.assertEquals(len(non_registered), 2)
self.assertTrue('host_2' in non_registered)
self.assertTrue('host_4' in non_registered)
def test_non_registered_slaves_returns_empty_list_with_slaves_with_same_host_ids_but_different_names(self):
registered_hosts = ['host_1_alias', 'host_2_alias']
slaves_to_validate = ['host_1', 'host_2']
def get_host_id(*args, **kwargs):
if args[0] == 'host_1':
return 'host_id_1'
elif args[0] == 'host_2':
return 'host_id_2'
elif args[0] == 'host_1_alias':
return 'host_id_1'
elif args[0] == 'host_2_alias':
return 'host_id_2'
else:
return 'blah'
self.patch('app.util.network.Network.get_host_id', new=get_host_id)
deploy_subcommand = DeploySubcommand()
non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate)
self.assertEquals(0, len(non_registered))
@genty_dataset(
valid_deployment=genty_args(
slaves_to_validate=['slave_host_1', 'slave_host_2'],
connected_slaves=['slave_host_1', 'slave_host_2'],
host_name_to_uid={
'slave_host_1': 'host_1_id',
'slave_host_2': 'host_2_id',
},
is_valid=True,
),
host_mismatch=genty_args(
slaves_to_validate=['slave_host_1', 'slave_host_2'],
connected_slaves=['slave_host_3', 'slave_host_2'],
host_name_to_uid={
'slave_host_2': 'host_2_id',
},
is_valid=False,
),
number_of_slaves_not_match=genty_args(
slaves_to_validate=['slave_host_1'],
connected_slaves=['slave_host_1', 'slave_host_2'],
host_name_to_uid={
'slave_host_1': 'host_1_id',
},
is_valid=False,
),
valid_deployment_different_host_names_with_same_host_id=genty_args(
slaves_to_validate=['slave_host_1', 'slave_host_2'],
connected_slaves=['slave_host_1_alias', 'slave_host_2'],
host_name_to_uid={
'slave_host_1': 'host_1_id',
'slave_host_1_alias': 'host_1_id',
'slave_host_2': 'host_2_id',
},
is_valid=True,
),
)
def test_validate_deployment_checks_each_slave_is_connected(
self,
slaves_to_validate,
connected_slaves,
host_name_to_uid,
is_valid,
):
def get_host_id(host):
if host in host_name_to_uid:
return host_name_to_uid[host]
else:
return str(uuid.uuid4())
self.patch('app.util.network.Network.get_host_id', new=get_host_id)
deploy_subcommand = DeploySubcommand()
deploy_subcommand._registered_slave_hostnames = MagicMock(return_value=connected_slaves)
deploy_subcommand._SLAVE_REGISTRY_TIMEOUT_SEC = 1
deploy_subcommand._non_registered_slaves = MagicMock()
validate = partial(deploy_subcommand._validate_successful_deployment, 'master_host_url', slaves_to_validate)
if not is_valid:
with self.assertRaises(SystemExit):
validate()
else:
validate()
| {
"repo_name": "box/ClusterRunner",
"path": "test/unit/subcommands/test_deploy_subcommand.py",
"copies": "1",
"size": "9609",
"license": "apache-2.0",
"hash": 3232647974515858400,
"line_mean": 43.4861111111,
"line_max": 116,
"alpha_frac": 0.6032885836,
"autogenerated": false,
"ratio": 3.771193092621664,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4874481676221664,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os.path import join
from kivy.uix.scrollview import ScrollView
from designer.utils import constants
from designer.utils.utils import get_kd_data_dir
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
NEW_PROJECTS = {
'FloatLayout': ('template_floatlayout_kv',
'template_floatlayout_py'),
'BoxLayout': ('template_boxlayout_kv',
'template_boxlayout_py'),
'ScreenManager': ('template_screen_manager_kv',
'template_screen_manager_py'),
'ActionBar': ('template_actionbar_kv',
'template_actionbar_py'),
'Carousel and ActionBar': ('template_actionbar_carousel_kv',
'template_actionbar_carousel_py'),
'ScreenManager and ActionBar': ('template_screen_manager_actionbar_kv',
'template_screen_manager_actionbar_py'),
'TabbedPanel': ('template_tabbed_panel_kv',
'template_tabbed_panel_py'),
'TextInput and ScrollView': ('template_textinput_scrollview_kv',
'template_textinput_scrollview_py')}
class ProjectTemplateBox(ScrollView):
'''Container consistings of buttons, with their names specifying
the recent files.
'''
grid = ObjectProperty(None)
'''The grid layout consisting of all buttons.
This property is an instance of :class:`~kivy.uix.gridlayout`
:data:`grid` is a :class:`~kivy.properties.ObjectProperty`
'''
text = ObjectProperty(None)
'''The grid layout consisting of all buttons.
This property is an instance of :class:`~kivy.uix.gridlayout`
:data:`grid` is a :class:`~kivy.properties.ObjectProperty`
'''
def __init__(self, **kwargs):
super(ProjectTemplateBox, self).__init__(**kwargs)
def add_template(self):
'''To add buttons representing Recent Files.
:param list_files: array of paths
'''
item_strings = list(NEW_PROJECTS.keys())
item_strings.sort()
for p in item_strings:
recent_item = Factory.DesignerListItemButton(text=p)
self.grid.add_widget(recent_item)
recent_item.bind(on_press=self.btn_release)
self.grid.height += recent_item.height
self.grid.height = max(self.grid.height, self.height)
self.grid.children[-1].trigger_action()
def btn_release(self, instance):
'''Event Handler for 'on_release' of an event.
'''
self.text = instance.text
self.parent.update_template_preview(instance)
class NewProjectDialog(BoxLayout):
select_button = ObjectProperty(None)
''':class:`~kivy.uix.button.Button` used to select the list item.
:data:`select_button` is a :class:`~kivy.properties.ObjectProperty`
'''
cancel_button = ObjectProperty(None)
''':class:`~kivy.uix.button.Button` to cancel the dialog.
:data:`cancel_button` is a :class:`~kivy.properties.ObjectProperty`
'''
template_preview = ObjectProperty(None)
'''Type of :class:`~kivy.uix.image.Image` to display preview of selected
new template.
:data:`template_preview` is a :class:`~kivy.properties.ObjectProperty`
'''
template_list = ObjectProperty(None)
'''Type of :class:`ProjectTemplateBox` used for showing template available.
:data:`template_list` is a :class:`~kivy.properties.ObjectProperty`
'''
app_name = ObjectProperty(None)
'''Type of :class:`ProjectTemplateBox` used for showing template available.
:data:`template_list` is a :class:`~kivy.properties.ObjectProperty`
'''
package_name = ObjectProperty(None)
'''Type of :class:`ProjectTemplateBox` used for showing template available.
:data:`template_list` is a :class:`~kivy.properties.ObjectProperty`
'''
package_version = ObjectProperty(None)
'''Type of :class:`ProjectTemplateBox` used for showing template available.
:data:`template_list` is a :class:`~kivy.properties.ObjectProperty`
'''
__events__ = ('on_select', 'on_cancel')
def __init__(self, **kwargs):
super(NewProjectDialog, self).__init__(**kwargs)
self.template_list.add_template()
self.app_name.bind(text=self.on_app_name_text)
self.app_name.text = "My Application"
self.package_version.text = "0.1.dev0"
def update_template_preview(self, instance):
'''Event handler for 'on_selection_change' event of adapter.
'''
name = instance.text.lower() + '.png'
name = name.replace(' and ', '_')
image_source = join(get_kd_data_dir(),
constants.NEW_TEMPLATE_IMAGE_PATH, name)
self.template_preview.source = image_source
def on_app_name_text(self, instance, value):
self.package_name.text = 'org.test.' + value.lower().replace(' ', '_')
def on_select(self, *args):
'''Default Event Handler for 'on_select' event
'''
pass
def on_cancel(self, *args):
'''Default Event Handler for 'on_cancel' event
'''
pass
def on_select_button(self, *args):
'''Event Handler for 'on_release' of select button.
'''
self.select_button.bind(on_press=partial(self.dispatch, 'on_select'))
def on_cancel_button(self, *args):
'''Event Handler for 'on_release' of cancel button.
'''
self.cancel_button.bind(on_press=partial(self.dispatch, 'on_cancel'))
| {
"repo_name": "aron-bordin/kivy-designer",
"path": "designer/components/dialogs/new_project.py",
"copies": "2",
"size": "5582",
"license": "mit",
"hash": -916347915262550000,
"line_mean": 36.2133333333,
"line_max": 79,
"alpha_frac": 0.6313149409,
"autogenerated": false,
"ratio": 3.9560595322466336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5587374473146633,
"avg_score": null,
"num_lines": null
} |
from functools import partial
from os.path import join
from pythonforandroid.toolchain import Recipe, shprint, current_directory
import sh
class OpenSSLRecipe(Recipe):
version = '1.0.2h'
url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'
def should_build(self, arch):
return not self.has_libs(arch, 'libssl' + self.version + '.so',
'libcrypto' + self.version + '.so')
def check_symbol(self, env, sofile, symbol):
nm = env.get('NM', 'nm')
syms = sh.sh('-c', "{} -gp {} | cut -d' ' -f3".format(
nm, sofile), _env=env).splitlines()
if symbol in syms:
return True
print('{} missing symbol {}; rebuilding'.format(sofile, symbol))
return False
def get_recipe_env(self, arch=None):
env = super(OpenSSLRecipe, self).get_recipe_env(arch)
env['OPENSSL_VERSION'] = self.version
env['CFLAGS'] += ' ' + env['LDFLAGS']
env['CC'] += ' ' + env['LDFLAGS']
env['MAKE'] = 'make' # This removes the '-j5', which isn't safe
return env
def select_build_arch(self, arch):
aname = arch.arch
if 'arm64' in aname:
return 'linux-aarch64'
if 'v7a' in aname:
return 'android-armv7'
if 'arm' in aname:
return 'android'
if 'x86' in aname:
return 'android-x86'
return 'linux-armv4'
def build_arch(self, arch):
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
# sh fails with code 255 trying to execute ./Configure
# so instead we manually run perl passing in Configure
perl = sh.Command('perl')
buildarch = self.select_build_arch(arch)
shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)
self.apply_patch('disable-sover.patch', arch.arch)
self.apply_patch('rename-shared-lib.patch', arch.arch)
# check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')
check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')
while True:
shprint(sh.make, 'build_libs', _env=env)
if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):
break
shprint(sh.make, 'clean', _env=env)
libn = 'libssl', 'libcrypto'
libs = [lib + self.version + '.so' for lib in libn]
self.install_libs(arch, *libs)
def get_include_dirs(self, arch):
return [join(self.get_build_dir(arch.arch), 'include')]
recipe = OpenSSLRecipe()
| {
"repo_name": "wexi/python-for-android",
"path": "pythonforandroid/recipes/openssl/__init__.py",
"copies": "1",
"size": "2738",
"license": "mit",
"hash": -6950994870275438000,
"line_mean": 37.5633802817,
"line_max": 94,
"alpha_frac": 0.5624543462,
"autogenerated": false,
"ratio": 3.5790849673202616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9638411912134892,
"avg_score": 0.0006254802770740503,
"num_lines": 71
} |
from functools import partial
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn.apionly as sns
from ..analysis.csv_analysis import analyze_data, load_surveys
from ..data.survey_utils import ExperimentType
from .latexify import latexify, figure, fig_size
from .plot_tools import plot_detailed, plot_distribution, plot_overview
# Colours
default_cycler = plt.rcParamsDefault["axes.prop_cycle"]
colorblind_cmaps = ["Dark2", "Set2"]
cmap_main, cmap_complement = colorblind_cmaps
# cmap_main, cmap_complement = cmap_complement, cmap_main
colorblind_cyclers = {cmap: plt.cycler("color", plt.cm.get_cmap(cmap).colors)
for cmap in colorblind_cmaps}
plt.rcParams["axes.prop_cycle"] = colorblind_cyclers[cmap_main]
FIGURE_DIR = Path(__file__).parent.joinpath("../../reports/thesis/img/plots")
figure = partial(figure, folder=FIGURE_DIR, exts=["pdf", "pgf"])
def do_drone_dos():
with figure("ardrone_dos", size=fig_size(0.45)):
distances = np.array([0, 2, 8, 18, 23, 29, 34, 40,
45, 51, 56, 62, 67, 72, 78, 80])
powers = np.array([90, 90, 86, 60, 50, 62, 35, 26,
24, 12, 20, 22, 26, 22, 12, 5])
fig, ax1 = plt.subplots()
ax1.step(distances, powers, lw=0.5)
ax1.set_xlabel("distance (m)")
ax1.set_ylabel(r"signal (\%)")
ax1.set_ylim(0, 100)
x_range = np.arange(80)
best_fit = 10 * np.log10(6 / (1e5 * x_range**2.7))
ax2 = ax1.twinx()
ax2.plot(x_range, best_fit, c="C1", lw=0.5)
ax2.set_ylim(-100, -50)
ax2.yaxis.set_tick_params(which="both", labelright=False, right=False)
plt.legend([ax.get_children()[0] for ax in (ax1, ax2)], ["data", "fit"])
def do_paths():
with figure("paths_overview", size=fig_size(0.75, 0.8)):
ax1 = plt.subplot("121")
plot_overview(results, ExperimentType.Onboard, color="C0", size_point=2,
drone_width=0.5)
ax2 = plt.subplot("122", sharex=ax1, sharey=ax1)
plot_overview(results, ExperimentType.Spirit, color="C1", size_point=2,
ylabel="", drone_width=0.5)
plt.setp(ax2.get_yticklabels(), visible=False)
with figure("paths_detailed", size=fig_size(0.75, 0.7)):
ax1 = plt.subplot("121")
plot_detailed(results, ExperimentType.Onboard, color="C0",
size_point=2, crosshair=True, drone_width=0.5)
ax1.legend_.remove()
ax2 = plt.subplot("122", sharex=ax1, sharey=ax1)
plot_detailed(results, ExperimentType.Spirit, color="C1", ylabel="",
size_point=2, crosshair=True, drone_width=0.5)
ax2.legend_.remove()
plt.setp(ax2.get_yticklabels(), visible=False)
def do_distributions():
with figure("distribution_onboard", size=fig_size(0.44, 1)):
plot_distribution(results, ExperimentType.Onboard, color="C0",
crosshair=True, drone_width=0.5)
with figure("distribution_spirit", size=fig_size(0.44, 1)):
plot_distribution(results, ExperimentType.Spirit, color="C1",
crosshair=True, drone_width=0.5)
def do_durations():
with figure("duration", size=fig_size(0.44, 1)):
sns.factorplot(x="experiment", y="duration", data=analyses, kind="box")
sns.swarmplot(x="experiment", y="duration", split=True, data=analyses,
palette=cmap_complement)
plt.ylim(0, plt.ylim()[1])
plt.ylabel("duration (s)")
with figure("duration_runs", size=fig_size(0.44, 1)):
sns.factorplot(x="order", y="duration", hue="experiment", data=analyses,
capsize=0.2)
plt.ylim(0, plt.ylim()[1])
plt.ylabel("duration (s)")
plt.xlabel("run")
def do_movement():
with figure("movement", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["path_length", "move_x", "move_y"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("Path length")
g.fig.axes[1].set_title("Movement in $x$")
g.fig.axes[2].set_title("Movement in $y$")
g.fig.axes[0].set_ylabel("distance (m)")
plt.ylim(0, plt.ylim()[1])
with figure("movement_x"):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["move_l", "move_r", "move_x"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("Movement left")
g.fig.axes[1].set_title("Movement right")
g.fig.axes[2].set_title("Movement in $x$")
g.fig.axes[0].set_ylabel("distance (m)")
plt.ylim(0, plt.ylim()[1])
with figure("movement_y"):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["move_b", "move_f", "move_y"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("Movement backwards")
g.fig.axes[1].set_title("Movement forwards")
g.fig.axes[2].set_title("Movement in $y$")
g.fig.axes[0].set_ylabel("distance (m)")
plt.ylim(0, plt.ylim()[1])
with figure("movement_back"):
sns.factorplot(x="experiment", y="move_b", data=analyses, kind="box")
sns.swarmplot(x="experiment", y="move_b", split=True, data=analyses,
palette=cmap_complement)
plt.ylabel("distance (m)")
plt.title("Movement backwards")
with figure("movement_runs", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["path_length", "move_x", "move_y"])
g = sns.factorplot(x="order", y="value", col="variable",
data=molten, hue="experiment", capsize=0.2)
g.fig.axes[0].set_title("Path length")
g.fig.axes[1].set_title("Movement in $x$")
g.fig.axes[2].set_title("Movement in $y$")
g.fig.axes[0].set_ylabel("distance (m)")
g.fig.axes[0].set_xlabel("run")
g.fig.axes[1].set_xlabel("run")
g.fig.axes[2].set_xlabel("run")
plt.ylim(0, plt.ylim()[1])
with figure("movement_x_runs"):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["move_l", "move_r", "move_x"])
g = sns.factorplot(x="order", y="value", col="variable",
data=molten, hue="experiment")
g.fig.axes[0].set_title("Movement left")
g.fig.axes[1].set_title("Movement right")
g.fig.axes[2].set_title("Movement in $x$")
g.fig.axes[0].set_ylabel("distance (m)")
g.fig.axes[0].set_xlabel("run")
g.fig.axes[1].set_xlabel("run")
g.fig.axes[2].set_xlabel("run")
plt.ylim(0, plt.ylim()[1])
with figure("movement_y_runs"):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["move_b", "move_f", "move_y"])
g = sns.factorplot(x="order", y="value", col="variable",
data=molten, hue="experiment")
g.fig.axes[0].set_title("Movement backwards")
g.fig.axes[1].set_title("Movement forwards")
g.fig.axes[2].set_title("Movement in $y$")
g.fig.axes[0].set_ylabel("distance (m)")
g.fig.axes[0].set_xlabel("run")
g.fig.axes[1].set_xlabel("run")
g.fig.axes[2].set_xlabel("run")
plt.ylim(0, plt.ylim()[1])
def do_errors():
with figure("rms", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["rms", "rms_x", "rms_y"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("RMS Error*")
g.fig.axes[1].set_title("RMS Error in $x$*")
g.fig.axes[2].set_title("RMS Error in $y$*")
g.fig.axes[0].set_ylabel("error (m)")
with figure("rms_runs", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["rms", "rms_x", "rms_y"])
g = sns.factorplot(x="order", y="value", col="variable",
hue="experiment", data=molten, capsize=0.2)
g.fig.axes[0].set_title("RMS Error")
g.fig.axes[1].set_title("RMS Error in $x$")
g.fig.axes[2].set_title("RMS Error in $y$")
g.fig.axes[0].set_ylabel("error (m)")
g.fig.axes[0].set_xlabel("run")
g.fig.axes[1].set_xlabel("run")
g.fig.axes[2].set_xlabel("run")
with figure("distance", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=[r"dist_err", r"x_err", r"y_err"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("Distance from target*")
g.fig.axes[1].set_title("Distance from target in $x$")
g.fig.axes[2].set_title("Distance from target in $y$*")
g.fig.axes[0].set_ylabel("distance (m)")
g.axes[0][0].axhline(0, color="black", linewidth=1, zorder=-1)
g.axes[0][1].axhline(0, color="black", linewidth=1, zorder=-1)
g.axes[0][2].axhline(0, color="black", linewidth=1, zorder=-1)
def do_surveys():
with figure("tlx_results", size=fig_size(0.44, 1)):
sns.factorplot(x="experiment", y="tlx", data=tlx, kind="box")
sns.swarmplot(x="experiment", y=r"tlx",
data=tlx, palette=cmap_complement, split=True)
plt.ylim(0, plt.ylim()[1])
plt.ylabel("NASA-TLX weighted score*")
with figure("tlx_components", size=fig_size(0.44, 1)):
components = ["mental", "physical", "temporal", "performance",
"effort", "frustration"]
molten = pd.melt(tlx, id_vars=["user", "experiment", "order"],
value_vars=components,
var_name="component", value_name="score")
sns.barplot(x=r"component", y="score", hue="experiment", data=molten)
plt.gca().set_xticklabels(
["MD", "PD", "TD", "P", "E", "F"])
plt.xlabel("NASA-TLX component")
plt.ylabel("score")
with figure("survey_results", size=fig_size(0.44, 1)):
sns.factorplot(x="experiment", y="total", data=surveys, kind="box")
sns.swarmplot(x="experiment", y=r"total", data=surveys,
palette=cmap_complement, split=True)
plt.ylim(0, plt.ylim()[1])
plt.ylabel("survey score*")
with figure("survey_components", size=fig_size(0.44, 1)):
components = [r"orientation_understanding", r"orientation_control",
r"position_understanding", r"position_control",
r"spacial_understanding", r"spacial_control"]
molten = pd.melt(surveys, id_vars=["user", "experiment", "order"],
value_vars=components,
var_name="question", value_name="rating")
sns.barplot(x=r"question", y="rating", hue="experiment", data=molten)
plt.gca().set_xticklabels(
["OA", "OC", "PA*", "PC*", "RA*", "RC*"])
plt.xlabel("question")
plt.ylabel("rating")
with figure("survey_overview", size=fig_size(0.9, 0.5)):
molten = pd.melt(surveys, id_vars=["user", "experiment", "order"],
value_vars=[r"orientation_understanding",
r"orientation_control",
r"position_understanding",
r"position_control",
r"spacial_understanding",
r"spacial_control"],
var_name="question", value_name="rating")
g = sns.barplot(x=r"rating", y=r"question", hue="experiment",
data=molten)
sns.stripplot(x="rating", y=r"question", data=molten, hue="experiment",
split=True, palette=cmap_complement, jitter=0.6, size=3)
plt.gca().set_yticklabels(
["angle aware", "angle control",
"position aware*", "position control*",
"rel. pos. aware*", "rel. pos. control*"])
handles, labels = g.get_legend_handles_labels()
plt.legend(handles[2:], labels[2:])
plt.xlabel("rating")
plt.title("Survey results")
if __name__ == "__main__":
latexify()
do_drone_dos()
results, analyses = analyze_data()
do_paths()
do_distributions()
do_durations()
do_movement()
do_errors()
users, tlx, surveys = load_surveys()
do_surveys()
| {
"repo_name": "masasin/spirit",
"path": "src/visualization/plot_thesis.py",
"copies": "1",
"size": "13531",
"license": "mit",
"hash": -8634962383404471000,
"line_mean": 42.0923566879,
"line_max": 80,
"alpha_frac": 0.5411277807,
"autogenerated": false,
"ratio": 3.2754780924715567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4316605873171556,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.